Merge remote-tracking branch 'upstream/main' into dev-1.24
commit
0289b52eca
31
Dockerfile
31
Dockerfile
|
@ -4,29 +4,42 @@
|
||||||
# change is that the Hugo version is now an overridable argument rather than a fixed
|
# change is that the Hugo version is now an overridable argument rather than a fixed
|
||||||
# environment variable.
|
# environment variable.
|
||||||
|
|
||||||
FROM golang:1.15-alpine
|
FROM golang:1.16-alpine
|
||||||
|
|
||||||
LABEL maintainer="Luc Perkins <lperkins@linuxfoundation.org>"
|
LABEL maintainer="Luc Perkins <lperkins@linuxfoundation.org>"
|
||||||
|
|
||||||
RUN apk add --no-cache \
|
RUN apk add --no-cache \
|
||||||
curl \
|
curl \
|
||||||
git \
|
gcc \
|
||||||
openssh-client \
|
g++ \
|
||||||
rsync \
|
musl-dev \
|
||||||
build-base \
|
build-base \
|
||||||
libc6-compat \
|
libc6-compat
|
||||||
npm && \
|
|
||||||
npm install -D autoprefixer postcss-cli
|
|
||||||
|
|
||||||
ARG HUGO_VERSION
|
ARG HUGO_VERSION
|
||||||
|
|
||||||
|
RUN mkdir $HOME/src && \
|
||||||
|
cd $HOME/src && \
|
||||||
|
curl -L https://github.com/gohugoio/hugo/archive/refs/tags/v${HUGO_VERSION}.tar.gz | tar -xz && \
|
||||||
|
cd "hugo-${HUGO_VERSION}" && \
|
||||||
|
go install --tags extended
|
||||||
|
|
||||||
|
FROM golang:1.16-alpine
|
||||||
|
|
||||||
|
RUN apk add --no-cache \
|
||||||
|
git \
|
||||||
|
openssh-client \
|
||||||
|
rsync \
|
||||||
|
npm && \
|
||||||
|
npm install -D autoprefixer postcss-cli
|
||||||
|
|
||||||
RUN mkdir -p /usr/local/src && \
|
RUN mkdir -p /usr/local/src && \
|
||||||
cd /usr/local/src && \
|
cd /usr/local/src && \
|
||||||
curl -L https://github.com/gohugoio/hugo/releases/download/v${HUGO_VERSION}/hugo_extended_${HUGO_VERSION}_Linux-64bit.tar.gz | tar -xz && \
|
|
||||||
mv hugo /usr/local/bin/hugo && \
|
|
||||||
addgroup -Sg 1000 hugo && \
|
addgroup -Sg 1000 hugo && \
|
||||||
adduser -Sg hugo -u 1000 -h /src hugo
|
adduser -Sg hugo -u 1000 -h /src hugo
|
||||||
|
|
||||||
|
COPY --from=0 /go/bin/hugo /usr/local/bin/hugo
|
||||||
|
|
||||||
WORKDIR /src
|
WORKDIR /src
|
||||||
|
|
||||||
USER hugo:hugo
|
USER hugo:hugo
|
||||||
|
|
|
@ -31,6 +31,7 @@ aliases:
|
||||||
- savitharaghunathan
|
- savitharaghunathan
|
||||||
- sftim
|
- sftim
|
||||||
- tengqm
|
- tengqm
|
||||||
|
- zacharysarah
|
||||||
sig-docs-en-reviews: # PR reviews for English content
|
sig-docs-en-reviews: # PR reviews for English content
|
||||||
- bradtopol
|
- bradtopol
|
||||||
- celestehorgan
|
- celestehorgan
|
||||||
|
@ -44,6 +45,7 @@ aliases:
|
||||||
- sftim
|
- sftim
|
||||||
- shannonxtreme
|
- shannonxtreme
|
||||||
- tengqm
|
- tengqm
|
||||||
|
- zacharysarah
|
||||||
sig-docs-es-owners: # Admins for Spanish content
|
sig-docs-es-owners: # Admins for Spanish content
|
||||||
- raelga
|
- raelga
|
||||||
- electrocucaracha
|
- electrocucaracha
|
||||||
|
|
|
@ -167,6 +167,14 @@ For more information about contributing to the Kubernetes documentation, see:
|
||||||
- [Documentation Style Guide](https://kubernetes.io/docs/contribute/style/style-guide/)
|
- [Documentation Style Guide](https://kubernetes.io/docs/contribute/style/style-guide/)
|
||||||
- [Localizing Kubernetes Documentation](https://kubernetes.io/docs/contribute/localization/)
|
- [Localizing Kubernetes Documentation](https://kubernetes.io/docs/contribute/localization/)
|
||||||
|
|
||||||
|
### New contributor ambassadors
|
||||||
|
|
||||||
|
If you need help at any point when contributing, the [New Contributor Ambassadors](https://kubernetes.io/docs/contribute/advanced/#serve-as-a-new-contributor-ambassador) are a good point of contact. These are SIG Docs approvers whose responsibilities include mentoring new contributors and helping them through their first few pull requests. The best place to contact the New Contributors Ambassadors would be on the [Kubernetes Slack](https://slack.k8s.io/). Current New Contributors Ambassadors for SIG Docs:
|
||||||
|
|
||||||
|
| Name | Slack | GitHub |
|
||||||
|
| -------------------------- | -------------------------- | -------------------------- |
|
||||||
|
| Arsh Sharma | @arsh | @RinkiyaKeDad |
|
||||||
|
|
||||||
## Localization `README.md`'s
|
## Localization `README.md`'s
|
||||||
|
|
||||||
| Language | Language |
|
| Language | Language |
|
||||||
|
|
|
@ -1,8 +1,11 @@
|
||||||
---
|
---
|
||||||
title: " Faster than a speeding Latte "
|
title: "Faster than a speeding Latte"
|
||||||
date: 2015-04-06
|
date: 2015-04-06
|
||||||
slug: faster-than-speeding-latte
|
slug: faster-than-speeding-latte
|
||||||
url: /blog/2015/04/Faster-Than-Speeding-Latte
|
url: /blog/2015/04/Faster-Than-Speeding-Latte
|
||||||
|
evergreen: true
|
||||||
---
|
---
|
||||||
|
|
||||||
Check out Brendan Burns racing Kubernetes.
|
Check out Brendan Burns racing Kubernetes.
|
||||||
[![Check out Brendan Burns racing Kubernetes](https://img.youtube.com/vi/7vZ9dRKRMyc/0.jpg)](https://www.youtube.com/watch?v=?7vZ9dRKRMyc)
|
|
||||||
|
{{< youtube id="7vZ9dRKRMyc" title="Latte vs. Kubernetes setup - which is faster?">}}
|
||||||
|
|
|
@ -1,17 +1,20 @@
|
||||||
---
|
---
|
||||||
title: Runc and CVE-2019-5736
|
title: Runc and CVE-2019-5736
|
||||||
date: 2019-02-11
|
date: 2019-02-11
|
||||||
|
evergreen: false # mentions PodSecurityPolicy
|
||||||
---
|
---
|
||||||
|
|
||||||
|
Authors: Kubernetes Product Security Committee
|
||||||
|
|
||||||
This morning [a container escape vulnerability in runc was announced](https://www.openwall.com/lists/oss-security/2019/02/11/2). We wanted to provide some guidance to Kubernetes users to ensure everyone is safe and secure.
|
This morning [a container escape vulnerability in runc was announced](https://www.openwall.com/lists/oss-security/2019/02/11/2). We wanted to provide some guidance to Kubernetes users to ensure everyone is safe and secure.
|
||||||
|
|
||||||
## What Is Runc?
|
## What is runc?
|
||||||
|
|
||||||
Very briefly, runc is the low-level tool which does the heavy lifting of spawning a Linux container. Other tools like Docker, Containerd, and CRI-O sit on top of runc to deal with things like data formatting and serialization, but runc is at the heart of all of these systems.
|
Very briefly, runc is the low-level tool which does the heavy lifting of spawning a Linux container. Other tools like Docker, Containerd, and CRI-O sit on top of runc to deal with things like data formatting and serialization, but runc is at the heart of all of these systems.
|
||||||
|
|
||||||
Kubernetes in turn sits on top of those tools, and so while no part of Kubernetes itself is vulnerable, most Kubernetes installations are using runc under the hood.
|
Kubernetes in turn sits on top of those tools, and so while no part of Kubernetes itself is vulnerable, most Kubernetes installations are using runc under the hood.
|
||||||
|
|
||||||
### What Is The Vulnerability?
|
### What is the vulnerability?
|
||||||
|
|
||||||
While full details are still embargoed to give people time to patch, the rough version is that when running a process as root (UID 0) inside a container, that process can exploit a bug in runc to gain root privileges on the host running the container. This then allows them unlimited access to the server as well as any other containers on that server.
|
While full details are still embargoed to give people time to patch, the rough version is that when running a process as root (UID 0) inside a container, that process can exploit a bug in runc to gain root privileges on the host running the container. This then allows them unlimited access to the server as well as any other containers on that server.
|
||||||
|
|
||||||
|
@ -19,13 +22,14 @@ If the process inside the container is either trusted (something you know is not
|
||||||
|
|
||||||
The most common source of risk is attacker-controller container images, such as unvetted images from public repositories.
|
The most common source of risk is attacker-controller container images, such as unvetted images from public repositories.
|
||||||
|
|
||||||
### What Should I Do?
|
### What should i do?
|
||||||
|
|
||||||
As with all security issues, the two main options are to mitigate the vulnerability or upgrade your version of runc to one that includes the fix.
|
As with all security issues, the two main options are to mitigate the vulnerability or upgrade your version of runc to one that includes the fix.
|
||||||
|
|
||||||
As the exploit requires UID 0 within the container, a direct mitigation is to ensure all your containers are running as a non-0 user. This can be set within the container image, or via your pod specification:
|
As the exploit requires UID 0 within the container, a direct mitigation is to ensure all your containers are running as a non-0 user. This can be set within the container image, or via your pod specification:
|
||||||
|
|
||||||
```yaml
|
```yaml
|
||||||
|
---
|
||||||
apiVersion: v1
|
apiVersion: v1
|
||||||
kind: Pod
|
kind: Pod
|
||||||
metadata:
|
metadata:
|
||||||
|
@ -39,6 +43,7 @@ spec:
|
||||||
This can also be enforced globally using a PodSecurityPolicy:
|
This can also be enforced globally using a PodSecurityPolicy:
|
||||||
|
|
||||||
```yaml
|
```yaml
|
||||||
|
---
|
||||||
apiVersion: policy/v1beta1
|
apiVersion: policy/v1beta1
|
||||||
kind: PodSecurityPolicy
|
kind: PodSecurityPolicy
|
||||||
metadata:
|
metadata:
|
||||||
|
@ -89,7 +94,7 @@ We don't have specific confirmation that Docker for Mac and Docker for Windows a
|
||||||
|
|
||||||
If you are unable to upgrade Docker, the Rancher team has provided backports of the fix for many older versions at [github.com/rancher/runc-cve](https://github.com/rancher/runc-cve).
|
If you are unable to upgrade Docker, the Rancher team has provided backports of the fix for many older versions at [github.com/rancher/runc-cve](https://github.com/rancher/runc-cve).
|
||||||
|
|
||||||
## Getting More Information
|
## Getting more information
|
||||||
|
|
||||||
If you have any further questions about how this vulnerability impacts Kubernetes, please join us at [discuss.kubernetes.io](https://discuss.kubernetes.io/).
|
If you have any further questions about how this vulnerability impacts Kubernetes, please join us at [discuss.kubernetes.io](https://discuss.kubernetes.io/).
|
||||||
|
|
||||||
|
|
|
@ -30,15 +30,15 @@ This led to design principles that allow the Gateway API to improve upon Ingress
|
||||||
|
|
||||||
The Gateway API introduces a few new resource types:
|
The Gateway API introduces a few new resource types:
|
||||||
|
|
||||||
- **[GatewayClasses](https://gateway-api.sigs.k8s.io/references/spec/#networking.x-k8s.io/v1alpha1.GatewayClass)** are cluster-scoped resources that act as templates to explicitly define behavior for Gateways derived from them. This is similar in concept to StorageClasses, but for networking data-planes.
|
- **[GatewayClasses](https://gateway-api.sigs.k8s.io/v1alpha1/references/spec/#networking.x-k8s.io/v1alpha1.GatewayClass)** are cluster-scoped resources that act as templates to explicitly define behavior for Gateways derived from them. This is similar in concept to StorageClasses, but for networking data-planes.
|
||||||
- **[Gateways](https://gateway-api.sigs.k8s.io/references/spec/#networking.x-k8s.io/v1alpha1.Gateway)** are the deployed instances of GatewayClasses. They are the logical representation of the data-plane which performs routing, which may be in-cluster proxies, hardware LBs, or cloud LBs.
|
- **[Gateways](https://gateway-api.sigs.k8s.io/v1alpha1/references/spec/#networking.x-k8s.io/v1alpha1.Gateway)** are the deployed instances of GatewayClasses. They are the logical representation of the data-plane which performs routing, which may be in-cluster proxies, hardware LBs, or cloud LBs.
|
||||||
- **Routes** are not a single resource, but represent many different protocol-specific Route resources. The [HTTPRoute](https://gateway-api.sigs.k8s.io/references/spec/#networking.x-k8s.io/v1alpha1.HTTPRoute) has matching, filtering, and routing rules that get applied to Gateways that can process HTTP and HTTPS traffic. Similarly, there are [TCPRoutes](https://gateway-api.sigs.k8s.io/references/spec/#networking.x-k8s.io/v1alpha1.TCPRoute), [UDPRoutes](https://gateway-api.sigs.k8s.io/references/spec/#networking.x-k8s.io/v1alpha1.UDPRoute), and [TLSRoutes](https://gateway-api.sigs.k8s.io/references/spec/#networking.x-k8s.io/v1alpha1.TLSRoute) which also have protocol-specific semantics. This model also allows the Gateway API to incrementally expand its protocol support in the future.
|
- **Routes** are not a single resource, but represent many different protocol-specific Route resources. The [HTTPRoute](https://gateway-api.sigs.k8s.io/v1alpha1/references/spec/#networking.x-k8s.io/v1alpha1.HTTPRoute) has matching, filtering, and routing rules that get applied to Gateways that can process HTTP and HTTPS traffic. Similarly, there are [TCPRoutes](https://gateway-api.sigs.k8s.io/v1alpha1/references/spec/#networking.x-k8s.io/v1alpha1.TCPRoute), [UDPRoutes](https://gateway-api.sigs.k8s.io/v1alpha1/references/spec/#networking.x-k8s.io/v1alpha1.UDPRoute), and [TLSRoutes](https://gateway-api.sigs.k8s.io/v1alpha1/references/spec/#networking.x-k8s.io/v1alpha1.TLSRoute) which also have protocol-specific semantics. This model also allows the Gateway API to incrementally expand its protocol support in the future.
|
||||||
|
|
||||||
![The resources of the Gateway API](gateway-api-resources.png)
|
![The resources of the Gateway API](gateway-api-resources.png)
|
||||||
|
|
||||||
### Gateway Controller Implementations
|
### Gateway Controller Implementations
|
||||||
|
|
||||||
The good news is that although Gateway is in [Alpha](https://github.com/kubernetes-sigs/gateway-api/releases), there are already several [Gateway controller implementations](https://gateway-api.sigs.k8s.io/references/implementations/) that you can run. Since it’s a standardized spec, the following example could be run on any of them and should function the exact same way. Check out [getting started](https://gateway-api.sigs.k8s.io/guides/getting-started/) to see how to install and use one of these Gateway controllers.
|
The good news is that although Gateway is in [Alpha](https://github.com/kubernetes-sigs/gateway-api/releases), there are already several [Gateway controller implementations](https://gateway-api.sigs.k8s.io/implementations/) that you can run. Since it’s a standardized spec, the following example could be run on any of them and should function the exact same way. Check out [getting started](https://gateway-api.sigs.k8s.io/v1alpha1/guides/getting-started/) to see how to install and use one of these Gateway controllers.
|
||||||
|
|
||||||
## Getting Hands-on with the Gateway API
|
## Getting Hands-on with the Gateway API
|
||||||
|
|
||||||
|
@ -134,7 +134,7 @@ spec:
|
||||||
|
|
||||||
So we have two HTTPRoutes matching and routing traffic to different Services. You might be wondering, where are these Services accessible? Through which networks or IPs are they exposed?
|
So we have two HTTPRoutes matching and routing traffic to different Services. You might be wondering, where are these Services accessible? Through which networks or IPs are they exposed?
|
||||||
|
|
||||||
How Routes are exposed to clients is governed by [Route binding](https://gateway-api.sigs.k8s.io/concepts/api-overview/#route-binding), which describes how Routes and Gateways create a bidirectional relationship between each other. When Routes are bound to a Gateway it means their collective routing rules are configured on the underlying load balancers or proxies and the Routes are accessible through the Gateway. Thus, a Gateway is a logical representation of a networking data plane that can be configured through Routes.
|
How Routes are exposed to clients is governed by [Route binding](https://gateway-api.sigs.k8s.io/concepts/api-overview/#route-resources), which describes how Routes and Gateways create a bidirectional relationship between each other. When Routes are bound to a Gateway it means their collective routing rules are configured on the underlying load balancers or proxies and the Routes are accessible through the Gateway. Thus, a Gateway is a logical representation of a networking data plane that can be configured through Routes.
|
||||||
|
|
||||||
|
|
||||||
![How Routes bind with Gateways](route-binding.png )
|
![How Routes bind with Gateways](route-binding.png )
|
||||||
|
@ -192,6 +192,6 @@ When you put it all together, you have a single load balancing infrastructure th
|
||||||
|
|
||||||
There are many resources to check out to learn more.
|
There are many resources to check out to learn more.
|
||||||
|
|
||||||
* Check out the [user guides](https://gateway-api.sigs.k8s.io/guides/getting-started/) to see what use-cases can be addressed.
|
* Check out the [user guides](https://gateway-api.sigs.k8s.io/v1alpha1/guides/getting-started/) to see what use-cases can be addressed.
|
||||||
* Try out one of the [existing Gateway controllers ](https://gateway-api.sigs.k8s.io/references/implementations/)
|
* Try out one of the [existing Gateway controllers ](https://gateway-api.sigs.k8s.io/implementations/)
|
||||||
* Or [get involved](https://gateway-api.sigs.k8s.io/contributing/community/) and help design and influence the future of Kubernetes service networking!
|
* Or [get involved](https://gateway-api.sigs.k8s.io/contributing/community/) and help design and influence the future of Kubernetes service networking!
|
||||||
|
|
|
@ -0,0 +1,78 @@
|
||||||
|
---
|
||||||
|
layout: blog
|
||||||
|
title: "Meet Our Contributors - APAC (Aus-NZ region)"
|
||||||
|
date: 2022-03-16T12:00:00+0000
|
||||||
|
slug: meet-our-contributors-au-nz-ep-02
|
||||||
|
canonicalUrl: https://www.kubernetes.dev/blog/2022/03/14/meet-our-contributors-au-nz-ep-02/
|
||||||
|
---
|
||||||
|
|
||||||
|
**Authors & Interviewers:** [Anubhav Vardhan](https://github.com/anubha-v-ardhan), [Atharva Shinde](https://github.com/Atharva-Shinde), [Avinesh Tripathi](https://github.com/AvineshTripathi), [Brad McCoy](https://github.com/bradmccoydev), [Debabrata Panigrahi](https://github.com/Debanitrkl), [Jayesh Srivastava](https://github.com/jayesh-srivastava), [Kunal Verma](https://github.com/verma-kunal), [Pranshu Srivastava](https://github.com/PranshuSrivastava), [Priyanka Saggu](github.com/Priyankasaggu11929/), [Purneswar Prasad](https://github.com/PurneswarPrasad), [Vedant Kakde](https://github.com/vedant-kakde)
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
Good day, everyone 👋
|
||||||
|
|
||||||
|
Welcome back to the second episode of the "Meet Our Contributors" blog post series for APAC.
|
||||||
|
|
||||||
|
This post will feature four outstanding contributors from the Australia and New Zealand regions, who have played diverse leadership and community roles in the Upstream Kubernetes project.
|
||||||
|
|
||||||
|
So, without further ado, let's get straight to the blog.
|
||||||
|
|
||||||
|
## [Caleb Woodbine](https://github.com/BobyMCbobs)
|
||||||
|
|
||||||
|
Caleb Woodbine is currently a member of the ii.nz organisation.
|
||||||
|
|
||||||
|
He began contributing to the Kubernetes project in 2018 as a member of the Kubernetes Conformance working group. His experience was positive, and he benefited from early guidance from [Hippie Hacker](https://github.com/hh), a fellow contributor from New Zealand.
|
||||||
|
|
||||||
|
He has made major contributions to Kubernetes project since then through `SIG k8s-infra` and `k8s-conformance` working group.
|
||||||
|
|
||||||
|
Caleb is also a co-organizer of the [CloudNative NZ](https://www.meetup.com/cloudnative-nz/) community events, which aim to expand the reach of Kubernetes project throughout New Zealand in order to encourage technical education and improved employment opportunities.
|
||||||
|
|
||||||
|
> _There need to be more outreach in APAC and the educators and universities must pick up Kubernetes, as they are very slow and about 8+ years out of date. NZ tends to rather pay overseas than educate locals on the latest cloud tech Locally._
|
||||||
|
|
||||||
|
## [Dylan Graham](https://github.com/DylanGraham)
|
||||||
|
|
||||||
|
Dylan Graham is a cloud engineer from Adeliade, Australia. He has been contributing to the upstream Kubernetes project since 2018.
|
||||||
|
|
||||||
|
He stated that being a part of such a large-scale project was initially overwhelming, but that the community's friendliness and openness assisted him in getting through it.
|
||||||
|
|
||||||
|
He began by contributing to the project documentation and is now mostly focused on the community support for the APAC region.
|
||||||
|
|
||||||
|
He believes that consistent attendance at community/project meetings, taking on project tasks, and seeking community guidance as needed can help new aspiring developers become effective contributors.
|
||||||
|
|
||||||
|
> _The feeling of being a part of a large community is really special. I've met some amazing people, even some before the pandemic in real life :)_
|
||||||
|
|
||||||
|
## [Hippie Hacker](https://github.com/hh)
|
||||||
|
|
||||||
|
Hippie has worked for the CNCF.io as a Strategic Initiatives contractor from New Zealand for almost 5+ years. He is an active contributor to k8s-infra, API conformance testing, Cloud provider conformance submissions, and apisnoop.cncf.io domains of the upstream Kubernetes & CNCF projects.
|
||||||
|
|
||||||
|
He recounts their early involvement with the Kubernetes project, which began roughly 5 years ago when their firm, ii.nz, demonstrated [network booting from a Raspberry Pi using PXE and running Gitlab in-cluster to install Kubernetes on servers](https://ii.nz/post/bringing-the-cloud-to-your-community/).
|
||||||
|
|
||||||
|
He describes their own contributing experience as someone who, at first, tried to do all of the hard lifting on their own, but eventually saw the benefit of group contributions which reduced burnout and task division which allowed folks to keep moving forward on their own momentum.
|
||||||
|
|
||||||
|
He recommends that new contributors use pair programming.
|
||||||
|
|
||||||
|
> _The cross pollination of approaches and two pairs of eyes on the same work can often yield a much more amplified effect than a PR comment / approval alone can afford._
|
||||||
|
|
||||||
|
## [Nick Young](https://github.com/youngnick)
|
||||||
|
|
||||||
|
Nick Young works at VMware as a technical lead for Contour, a CNCF ingress controller. He was active with the upstream Kubernetes project from the beginning, and eventually became the chair of the LTS working group, where he advocated user concerns. He is currently the SIG Network Gateway API subproject's maintainer.
|
||||||
|
|
||||||
|
His contribution path was notable in that he began working on major areas of the Kubernetes project early on, skewing his trajectory.
|
||||||
|
|
||||||
|
He asserts the best thing a new contributor can do is to "start contributing". Naturally, if it is relevant to their employment, that is excellent; however, investing non-work time in contributing can pay off in the long run in terms of work. He believes that new contributors, particularly those who are currently Kubernetes users, should be encouraged to participate in higher-level project discussions.
|
||||||
|
|
||||||
|
> _Just being active and contributing will get you a long way. Once you've been active for a while, you'll find that you're able to answer questions, which will mean you're asked questions, and before you know it you are an expert._
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
If you have any recommendations/suggestions for who we should interview next, please let us know in #sig-contribex. Your suggestions would be much appreciated. We're thrilled to have additional folks assisting us in reaching out to even more wonderful individuals of the community.
|
||||||
|
|
||||||
|
|
||||||
|
We'll see you all in the next one. Everyone, till then, have a happy contributing! 👋
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -33,9 +33,9 @@ There are two main ways to have Nodes added to the {{< glossary_tooltip text="AP
|
||||||
1. The kubelet on a node self-registers to the control plane
|
1. The kubelet on a node self-registers to the control plane
|
||||||
2. You (or another human user) manually add a Node object
|
2. You (or another human user) manually add a Node object
|
||||||
|
|
||||||
After you create a Node {{< glossary_tooltip text="object" term_id="object" >}}, or the kubelet on a node self-registers, the
|
After you create a Node {{< glossary_tooltip text="object" term_id="object" >}},
|
||||||
control plane checks whether the new Node object is valid. For example, if you
|
or the kubelet on a node self-registers, the control plane checks whether the new Node object is
|
||||||
try to create a Node from the following JSON manifest:
|
valid. For example, if you try to create a Node from the following JSON manifest:
|
||||||
|
|
||||||
```json
|
```json
|
||||||
{
|
{
|
||||||
|
@ -85,19 +85,23 @@ register itself with the API server. This is the preferred pattern, used by mos
|
||||||
|
|
||||||
For self-registration, the kubelet is started with the following options:
|
For self-registration, the kubelet is started with the following options:
|
||||||
|
|
||||||
- `--kubeconfig` - Path to credentials to authenticate itself to the API server.
|
- `--kubeconfig` - Path to credentials to authenticate itself to the API server.
|
||||||
- `--cloud-provider` - How to talk to a {{< glossary_tooltip text="cloud provider" term_id="cloud-provider" >}} to read metadata about itself.
|
- `--cloud-provider` - How to talk to a {{< glossary_tooltip text="cloud provider" term_id="cloud-provider" >}}
|
||||||
- `--register-node` - Automatically register with the API server.
|
to read metadata about itself.
|
||||||
- `--register-with-taints` - Register the node with the given list of {{< glossary_tooltip text="taints" term_id="taint" >}} (comma separated `<key>=<value>:<effect>`).
|
- `--register-node` - Automatically register with the API server.
|
||||||
|
- `--register-with-taints` - Register the node with the given list of
|
||||||
|
{{< glossary_tooltip text="taints" term_id="taint" >}} (comma separated `<key>=<value>:<effect>`).
|
||||||
|
|
||||||
No-op if `register-node` is false.
|
No-op if `register-node` is false.
|
||||||
- `--node-ip` - IP address of the node.
|
- `--node-ip` - IP address of the node.
|
||||||
- `--node-labels` - {{< glossary_tooltip text="Labels" term_id="label" >}} to add when registering the node in the cluster (see label restrictions enforced by the [NodeRestriction admission plugin](/docs/reference/access-authn-authz/admission-controllers/#noderestriction)).
|
- `--node-labels` - {{< glossary_tooltip text="Labels" term_id="label" >}} to add when registering the node
|
||||||
- `--node-status-update-frequency` - Specifies how often kubelet posts its node status to the API server.
|
in the cluster (see label restrictions enforced by the
|
||||||
|
[NodeRestriction admission plugin](/docs/reference/access-authn-authz/admission-controllers/#noderestriction)).
|
||||||
|
- `--node-status-update-frequency` - Specifies how often kubelet posts its node status to the API server.
|
||||||
|
|
||||||
When the [Node authorization mode](/docs/reference/access-authn-authz/node/) and
|
When the [Node authorization mode](/docs/reference/access-authn-authz/node/) and
|
||||||
[NodeRestriction admission plugin](/docs/reference/access-authn-authz/admission-controllers/#noderestriction) are enabled,
|
[NodeRestriction admission plugin](/docs/reference/access-authn-authz/admission-controllers/#noderestriction)
|
||||||
kubelets are only authorized to create/modify their own Node resource.
|
are enabled, kubelets are only authorized to create/modify their own Node resource.
|
||||||
|
|
||||||
{{< note >}}
|
{{< note >}}
|
||||||
As mentioned in the [Node name uniqueness](#node-name-uniqueness) section,
|
As mentioned in the [Node name uniqueness](#node-name-uniqueness) section,
|
||||||
|
@ -168,8 +172,10 @@ Each section of the output is described below.
|
||||||
|
|
||||||
The usage of these fields varies depending on your cloud provider or bare metal configuration.
|
The usage of these fields varies depending on your cloud provider or bare metal configuration.
|
||||||
|
|
||||||
* HostName: The hostname as reported by the node's kernel. Can be overridden via the kubelet `--hostname-override` parameter.
|
* HostName: The hostname as reported by the node's kernel. Can be overridden via the kubelet
|
||||||
* ExternalIP: Typically the IP address of the node that is externally routable (available from outside the cluster).
|
`--hostname-override` parameter.
|
||||||
|
* ExternalIP: Typically the IP address of the node that is externally routable (available from
|
||||||
|
outside the cluster).
|
||||||
* InternalIP: Typically the IP address of the node that is routable only within the cluster.
|
* InternalIP: Typically the IP address of the node that is routable only within the cluster.
|
||||||
|
|
||||||
|
|
||||||
|
@ -289,7 +295,6 @@ and for updating their related Leases.
|
||||||
updates to the Node's `.status`. If the Lease update fails, the kubelet retries,
|
updates to the Node's `.status`. If the Lease update fails, the kubelet retries,
|
||||||
using exponential backoff that starts at 200 milliseconds and capped at 7 seconds.
|
using exponential backoff that starts at 200 milliseconds and capped at 7 seconds.
|
||||||
|
|
||||||
|
|
||||||
## Node controller
|
## Node controller
|
||||||
|
|
||||||
The node {{< glossary_tooltip text="controller" term_id="controller" >}} is a
|
The node {{< glossary_tooltip text="controller" term_id="controller" >}} is a
|
||||||
|
@ -306,6 +311,7 @@ controller deletes the node from its list of nodes.
|
||||||
|
|
||||||
The third is monitoring the nodes' health. The node controller is
|
The third is monitoring the nodes' health. The node controller is
|
||||||
responsible for:
|
responsible for:
|
||||||
|
|
||||||
- In the case that a node becomes unreachable, updating the NodeReady condition
|
- In the case that a node becomes unreachable, updating the NodeReady condition
|
||||||
of within the Node's `.status`. In this case the node controller sets the
|
of within the Node's `.status`. In this case the node controller sets the
|
||||||
NodeReady condition to `ConditionUnknown`.
|
NodeReady condition to `ConditionUnknown`.
|
||||||
|
@ -327,6 +333,7 @@ The node eviction behavior changes when a node in a given availability zone
|
||||||
becomes unhealthy. The node controller checks what percentage of nodes in the zone
|
becomes unhealthy. The node controller checks what percentage of nodes in the zone
|
||||||
are unhealthy (NodeReady condition is `ConditionUnknown` or `ConditionFalse`) at
|
are unhealthy (NodeReady condition is `ConditionUnknown` or `ConditionFalse`) at
|
||||||
the same time:
|
the same time:
|
||||||
|
|
||||||
- If the fraction of unhealthy nodes is at least `--unhealthy-zone-threshold`
|
- If the fraction of unhealthy nodes is at least `--unhealthy-zone-threshold`
|
||||||
(default 0.55), then the eviction rate is reduced.
|
(default 0.55), then the eviction rate is reduced.
|
||||||
- If the cluster is small (i.e. has less than or equal to
|
- If the cluster is small (i.e. has less than or equal to
|
||||||
|
@ -391,7 +398,9 @@ for more information.
|
||||||
|
|
||||||
The kubelet attempts to detect node system shutdown and terminates pods running on the node.
|
The kubelet attempts to detect node system shutdown and terminates pods running on the node.
|
||||||
|
|
||||||
Kubelet ensures that pods follow the normal [pod termination process](/docs/concepts/workloads/pods/pod-lifecycle/#pod-termination) during the node shutdown.
|
Kubelet ensures that pods follow the normal
|
||||||
|
[pod termination process](/docs/concepts/workloads/pods/pod-lifecycle/#pod-termination)
|
||||||
|
during the node shutdown.
|
||||||
|
|
||||||
The Graceful node shutdown feature depends on systemd since it takes advantage of
|
The Graceful node shutdown feature depends on systemd since it takes advantage of
|
||||||
[systemd inhibitor locks](https://www.freedesktop.org/wiki/Software/systemd/inhibit/) to
|
[systemd inhibitor locks](https://www.freedesktop.org/wiki/Software/systemd/inhibit/) to
|
||||||
|
@ -404,18 +413,26 @@ enabled by default in 1.21.
|
||||||
Note that by default, both configuration options described below,
|
Note that by default, both configuration options described below,
|
||||||
`shutdownGracePeriod` and `shutdownGracePeriodCriticalPods` are set to zero,
|
`shutdownGracePeriod` and `shutdownGracePeriodCriticalPods` are set to zero,
|
||||||
thus not activating Graceful node shutdown functionality.
|
thus not activating Graceful node shutdown functionality.
|
||||||
To activate the feature, the two kubelet config settings should be configured appropriately and set to non-zero values.
|
To activate the feature, the two kubelet config settings should be configured appropriately and
|
||||||
|
set to non-zero values.
|
||||||
|
|
||||||
During a graceful shutdown, kubelet terminates pods in two phases:
|
During a graceful shutdown, kubelet terminates pods in two phases:
|
||||||
|
|
||||||
1. Terminate regular pods running on the node.
|
1. Terminate regular pods running on the node.
|
||||||
2. Terminate [critical pods](/docs/tasks/administer-cluster/guaranteed-scheduling-critical-addon-pods/#marking-pod-as-critical) running on the node.
|
2. Terminate [critical pods](/docs/tasks/administer-cluster/guaranteed-scheduling-critical-addon-pods/#marking-pod-as-critical)
|
||||||
|
running on the node.
|
||||||
|
|
||||||
|
Graceful node shutdown feature is configured with two
|
||||||
|
[`KubeletConfiguration`](/docs/tasks/administer-cluster/kubelet-config-file/) options:
|
||||||
|
|
||||||
Graceful node shutdown feature is configured with two [`KubeletConfiguration`](/docs/tasks/administer-cluster/kubelet-config-file/) options:
|
|
||||||
* `shutdownGracePeriod`:
|
* `shutdownGracePeriod`:
|
||||||
* Specifies the total duration that the node should delay the shutdown by. This is the total grace period for pod termination for both regular and [critical pods](/docs/tasks/administer-cluster/guaranteed-scheduling-critical-addon-pods/#marking-pod-as-critical).
|
* Specifies the total duration that the node should delay the shutdown by. This is the total
|
||||||
|
grace period for pod termination for both regular and
|
||||||
|
[critical pods](/docs/tasks/administer-cluster/guaranteed-scheduling-critical-addon-pods/#marking-pod-as-critical).
|
||||||
* `shutdownGracePeriodCriticalPods`:
|
* `shutdownGracePeriodCriticalPods`:
|
||||||
* Specifies the duration used to terminate [critical pods](/docs/tasks/administer-cluster/guaranteed-scheduling-critical-addon-pods/#marking-pod-as-critical) during a node shutdown. This value should be less than `shutdownGracePeriod`.
|
* Specifies the duration used to terminate
|
||||||
|
[critical pods](/docs/tasks/administer-cluster/guaranteed-scheduling-critical-addon-pods/#marking-pod-as-critical)
|
||||||
|
during a node shutdown. This value should be less than `shutdownGracePeriod`.
|
||||||
|
|
||||||
For example, if `shutdownGracePeriod=30s`, and
|
For example, if `shutdownGracePeriod=30s`, and
|
||||||
`shutdownGracePeriodCriticalPods=10s`, kubelet will delay the node shutdown by
|
`shutdownGracePeriodCriticalPods=10s`, kubelet will delay the node shutdown by
|
||||||
|
@ -443,8 +460,8 @@ To provide more flexibility during graceful node shutdown around the ordering
|
||||||
of pods during shutdown, graceful node shutdown honors the PriorityClass for
|
of pods during shutdown, graceful node shutdown honors the PriorityClass for
|
||||||
Pods, provided that you enabled this feature in your cluster. The feature
|
Pods, provided that you enabled this feature in your cluster. The feature
|
||||||
allows cluster administers to explicitly define the ordering of pods
|
allows cluster administers to explicitly define the ordering of pods
|
||||||
during graceful node shutdown based on [priority
|
during graceful node shutdown based on
|
||||||
classes](docs/concepts/scheduling-eviction/pod-priority-preemption/#priorityclass).
|
[priority classes](/docs/concepts/scheduling-eviction/pod-priority-preemption/#priorityclass).
|
||||||
|
|
||||||
The [Graceful Node Shutdown](#graceful-node-shutdown) feature, as described
|
The [Graceful Node Shutdown](#graceful-node-shutdown) feature, as described
|
||||||
above, shuts down pods in two phases, non-critical pods, followed by critical
|
above, shuts down pods in two phases, non-critical pods, followed by critical
|
||||||
|
@ -457,8 +474,8 @@ graceful node shutdown in multiple phases, each phase shutting down a
|
||||||
particular priority class of pods. The kubelet can be configured with the exact
|
particular priority class of pods. The kubelet can be configured with the exact
|
||||||
phases and shutdown time per phase.
|
phases and shutdown time per phase.
|
||||||
|
|
||||||
Assuming the following custom pod [priority
|
Assuming the following custom pod
|
||||||
classes](docs/concepts/scheduling-eviction/pod-priority-preemption/#priorityclass)
|
[priority classes](/docs/concepts/scheduling-eviction/pod-priority-preemption/#priorityclass)
|
||||||
in a cluster,
|
in a cluster,
|
||||||
|
|
||||||
|Pod priority class name|Pod priority class value|
|
|Pod priority class name|Pod priority class value|
|
||||||
|
@ -492,7 +509,7 @@ shutdownGracePeriodByPodPriority:
|
||||||
shutdownGracePeriodSeconds: 60
|
shutdownGracePeriodSeconds: 60
|
||||||
```
|
```
|
||||||
|
|
||||||
The above table implies that any pod with priority value >= 100000 will get
|
The above table implies that any pod with `priority` value >= 100000 will get
|
||||||
just 10 seconds to stop, any pod with value >= 10000 and < 100000 will get 180
|
just 10 seconds to stop, any pod with value >= 10000 and < 100000 will get 180
|
||||||
seconds to stop, any pod with value >= 1000 and < 10000 will get 120 seconds to stop.
|
seconds to stop, any pod with value >= 1000 and < 10000 will get 120 seconds to stop.
|
||||||
Finally, all other pods will get 60 seconds to stop.
|
Finally, all other pods will get 60 seconds to stop.
|
||||||
|
@ -507,8 +524,8 @@ example, you could instead use these settings:
|
||||||
| 0 |60 seconds |
|
| 0 |60 seconds |
|
||||||
|
|
||||||
|
|
||||||
In the above case, the pods with custom-class-b will go into the same bucket
|
In the above case, the pods with `custom-class-b` will go into the same bucket
|
||||||
as custom-class-c for shutdown.
|
as `custom-class-c` for shutdown.
|
||||||
|
|
||||||
If there are no pods in a particular range, then the kubelet does not wait
|
If there are no pods in a particular range, then the kubelet does not wait
|
||||||
for pods in that priority range. Instead, the kubelet immediately skips to the
|
for pods in that priority range. Instead, the kubelet immediately skips to the
|
||||||
|
@ -577,3 +594,4 @@ see [KEP-2400](https://github.com/kubernetes/enhancements/issues/2400) and its
|
||||||
* Read the [Node](https://git.k8s.io/community/contributors/design-proposals/architecture/architecture.md#the-kubernetes-node)
|
* Read the [Node](https://git.k8s.io/community/contributors/design-proposals/architecture/architecture.md#the-kubernetes-node)
|
||||||
section of the architecture design document.
|
section of the architecture design document.
|
||||||
* Read about [taints and tolerations](/docs/concepts/scheduling-eviction/taint-and-toleration/).
|
* Read about [taints and tolerations](/docs/concepts/scheduling-eviction/taint-and-toleration/).
|
||||||
|
|
||||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -88,13 +88,13 @@ has all the information needed to configure a load balancer or proxy server. Mos
|
||||||
contains a list of rules matched against all incoming requests. Ingress resource only supports rules
|
contains a list of rules matched against all incoming requests. Ingress resource only supports rules
|
||||||
for directing HTTP(S) traffic.
|
for directing HTTP(S) traffic.
|
||||||
|
|
||||||
If the `ingressClassName` is omitted, a [default Ingress class](#default-ingress-class)
|
If the `ingressClassName` is omitted, a [default Ingress class](#default-ingress-class)
|
||||||
should be defined.
|
should be defined.
|
||||||
|
|
||||||
There are some ingress controllers, that work without the definition of a
|
There are some ingress controllers, that work without the definition of a
|
||||||
default `IngressClass`. For example, the Ingress-NGINX controller can be
|
default `IngressClass`. For example, the Ingress-NGINX controller can be
|
||||||
configured with a [flag](https://kubernetes.github.io/ingress-nginx/#what-is-the-flag-watch-ingress-without-class)
|
configured with a [flag](https://kubernetes.github.io/ingress-nginx/#what-is-the-flag-watch-ingress-without-class)
|
||||||
`--watch-ingress-without-class`. It is [recommended](https://kubernetes.github.io/ingress-nginx/#i-have-only-one-instance-of-the-ingresss-nginx-controller-in-my-cluster-what-should-i-do) though, to specify the
|
`--watch-ingress-without-class`. It is [recommended](https://kubernetes.github.io/ingress-nginx/#i-have-only-one-instance-of-the-ingresss-nginx-controller-in-my-cluster-what-should-i-do) though, to specify the
|
||||||
default `IngressClass` as shown [below](#default-ingress-class).
|
default `IngressClass` as shown [below](#default-ingress-class).
|
||||||
|
|
||||||
### Ingress rules
|
### Ingress rules
|
||||||
|
@ -118,8 +118,14 @@ match a path in the spec.
|
||||||
|
|
||||||
### DefaultBackend {#default-backend}
|
### DefaultBackend {#default-backend}
|
||||||
|
|
||||||
An Ingress with no rules sends all traffic to a single default backend. The `defaultBackend` is conventionally a configuration option
|
An Ingress with no rules sends all traffic to a single default backend and `.spec.defaultBackend`
|
||||||
of the [Ingress controller](/docs/concepts/services-networking/ingress-controllers) and is not specified in your Ingress resources.
|
is the backend that should handle requests in that case.
|
||||||
|
The `defaultBackend` is conventionally a configuration option of the
|
||||||
|
[Ingress controller](/docs/concepts/services-networking/ingress-controllers) and
|
||||||
|
is not specified in your Ingress resources.
|
||||||
|
If no `.spec.rules` are specified, `.spec.defaultBackend` must be specified.
|
||||||
|
If `defaultBackend` is not set, the handling of requests that do not match any of the rules will be up to the
|
||||||
|
ingress controller (consult the documentation for your ingress controller to find out how it handles this case).
|
||||||
|
|
||||||
If none of the hosts or paths match the HTTP request in the Ingress objects, the traffic is
|
If none of the hosts or paths match the HTTP request in the Ingress objects, the traffic is
|
||||||
routed to your default backend.
|
routed to your default backend.
|
||||||
|
|
|
@ -733,7 +733,7 @@ The following HTTP headers can be used to performing an impersonation request:
|
||||||
|
|
||||||
* `Impersonate-User`: The username to act as.
|
* `Impersonate-User`: The username to act as.
|
||||||
* `Impersonate-Group`: A group name to act as. Can be provided multiple times to set multiple groups. Optional. Requires "Impersonate-User".
|
* `Impersonate-Group`: A group name to act as. Can be provided multiple times to set multiple groups. Optional. Requires "Impersonate-User".
|
||||||
* `Impersonate-Extra-( extra name )`: A dynamic header used to associate extra fields with the user. Optional. Requires "Impersonate-User". In order to be preserved consistently, `( extra name )` should be lower-case, and any characters which aren't [legal in HTTP header labels](https://tools.ietf.org/html/rfc7230#section-3.2.6) MUST be utf8 and [percent-encoded](https://tools.ietf.org/html/rfc3986#section-2.1).
|
* `Impersonate-Extra-( extra name )`: A dynamic header used to associate extra fields with the user. Optional. Requires "Impersonate-User". In order to be preserved consistently, `( extra name )` must be lower-case, and any characters which aren't [legal in HTTP header labels](https://tools.ietf.org/html/rfc7230#section-3.2.6) MUST be utf8 and [percent-encoded](https://tools.ietf.org/html/rfc3986#section-2.1).
|
||||||
* `Impersonate-Uid`: A unique identifier that represents the user being impersonated. Optional. Requires "Impersonate-User". Kubernetes does not impose any format requirements on this string.
|
* `Impersonate-Uid`: A unique identifier that represents the user being impersonated. Optional. Requires "Impersonate-User". Kubernetes does not impose any format requirements on this string.
|
||||||
|
|
||||||
{{< note >}}
|
{{< note >}}
|
||||||
|
|
|
@ -163,14 +163,14 @@ kubelet [flags]
|
||||||
<td colspan="2">--cloud-config string</td>
|
<td colspan="2">--cloud-config string</td>
|
||||||
</tr>
|
</tr>
|
||||||
<tr>
|
<tr>
|
||||||
<td></td><td style="line-height: 130%; word-wrap: break-word;">The path to the cloud provider configuration file. Empty string for no configuration file. (DEPRECATED: will be removed in 1.23, in favor of removing cloud providers code from Kubelet.)</td>
|
<td></td><td style="line-height: 130%; word-wrap: break-word;">The path to the cloud provider configuration file. Empty string for no configuration file. (DEPRECATED: will be removed in 1.24 or later, in favor of removing cloud providers code from kubelet.)</td>
|
||||||
</tr>
|
</tr>
|
||||||
|
|
||||||
<tr>
|
<tr>
|
||||||
<td colspan="2">--cloud-provider string</td>
|
<td colspan="2">--cloud-provider string</td>
|
||||||
</tr>
|
</tr>
|
||||||
<tr>
|
<tr>
|
||||||
<td></td><td style="line-height: 130%; word-wrap: break-word;">The provider for cloud services. Set to empty string for running with no cloud provider. If set, the cloud provider determines the name of the node (consult cloud provider documentation to determine if and how the hostname is used). (DEPRECATED: will be removed in 1.23, in favor of removing cloud provider code from Kubelet.)</td>
|
<td></td><td style="line-height: 130%; word-wrap: break-word;">The provider for cloud services. Set to empty string for running with no cloud provider. If set, the cloud provider determines the name of the node (consult cloud provider documentation to determine if and how the hostname is used). (DEPRECATED: will be removed in 1.24 or later, in favor of removing cloud provider code from Kubelet.)</td>
|
||||||
</tr>
|
</tr>
|
||||||
|
|
||||||
<tr>
|
<tr>
|
||||||
|
@ -297,7 +297,7 @@ kubelet [flags]
|
||||||
<td colspan="2">--dynamic-config-dir string</td>
|
<td colspan="2">--dynamic-config-dir string</td>
|
||||||
</tr>
|
</tr>
|
||||||
<tr>
|
<tr>
|
||||||
<td></td><td style="line-height: 130%; word-wrap: break-word;">The Kubelet will use this directory for checkpointing downloaded configurations and tracking configuration health. The Kubelet will create this directory if it does not already exist. The path may be absolute or relative; relative paths start at the Kubelet's current working directory. Providing this flag enables dynamic Kubelet configuration. The <code>DynamicKubeletConfig</code> feature gate must be enabled to pass this flag. (DEPRECATED: Feature DynamicKubeletConfig is deprecated in 1.22 and will not move to GA. It is planned to be removed from Kubernetes in the version 1.23. Please use alternative ways to update kubelet configuration.)</td>
|
<td></td><td style="line-height: 130%; word-wrap: break-word;">The Kubelet will use this directory for checkpointing downloaded configurations and tracking configuration health. The Kubelet will create this directory if it does not already exist. The path may be absolute or relative; relative paths start at the Kubelet's current working directory. Providing this flag enables dynamic Kubelet configuration. The <code>DynamicKubeletConfig</code> feature gate must be enabled to pass this flag. (DEPRECATED: Feature DynamicKubeletConfig is deprecated in 1.22 and will not move to GA. It is planned to be removed from Kubernetes in the version 1.24 or later. Please use alternative ways to update kubelet configuration.)</td>
|
||||||
</tr>
|
</tr>
|
||||||
|
|
||||||
<tr>
|
<tr>
|
||||||
|
@ -395,21 +395,21 @@ kubelet [flags]
|
||||||
<td colspan="2">--experimental-allocatable-ignore-eviction Default: <code>false</code></td>
|
<td colspan="2">--experimental-allocatable-ignore-eviction Default: <code>false</code></td>
|
||||||
</tr>
|
</tr>
|
||||||
<tr>
|
<tr>
|
||||||
<td></td><td style="line-height: 130%; word-wrap: break-word;">When set to <code>true</code>, hard eviction thresholds will be ignored while calculating node allocatable. See https://kubernetes.io/docs/tasks/administer-cluster/reserve-compute-resources/ for more details. (DEPRECATED: will be removed in 1.23)</td>
|
<td></td><td style="line-height: 130%; word-wrap: break-word;">When set to <code>true</code>, hard eviction thresholds will be ignored while calculating node allocatable. See https://kubernetes.io/docs/tasks/administer-cluster/reserve-compute-resources/ for more details. (DEPRECATED: will be removed in 1.24 or later)</td>
|
||||||
</tr>
|
</tr>
|
||||||
|
|
||||||
<tr>
|
<tr>
|
||||||
<td colspan="2">--experimental-check-node-capabilities-before-mount</td>
|
<td colspan="2">--experimental-check-node-capabilities-before-mount</td>
|
||||||
</tr>
|
</tr>
|
||||||
<tr>
|
<tr>
|
||||||
<td></td><td style="line-height: 130%; word-wrap: break-word;">[Experimental] if set to <code>true</code>, the kubelet will check the underlying node for required components (binaries, etc.) before performing the mount (DEPRECATED: will be removed in 1.23, in favor of using CSI.)</td>
|
<td></td><td style="line-height: 130%; word-wrap: break-word;">[Experimental] if set to <code>true</code>, the kubelet will check the underlying node for required components (binaries, etc.) before performing the mount (DEPRECATED: will be removed in 1.24 or later, in favor of using CSI.)</td>
|
||||||
</tr>
|
</tr>
|
||||||
|
|
||||||
<tr>
|
<tr>
|
||||||
<td colspan="2">--experimental-kernel-memcg-notification</td>
|
<td colspan="2">--experimental-kernel-memcg-notification</td>
|
||||||
</tr>
|
</tr>
|
||||||
<tr>
|
<tr>
|
||||||
<td></td><td style="line-height: 130%; word-wrap: break-word;">Use kernelMemcgNotification configuration, this flag will be removed in 1.23. (DEPRECATED: This parameter should be set via the config file specified by the Kubelet's <code>--config</code> flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information.)</td>
|
<td></td><td style="line-height: 130%; word-wrap: break-word;">Use kernelMemcgNotification configuration, this flag will be removed in 1.24 or later. (DEPRECATED: This parameter should be set via the config file specified by the Kubelet's <code>--config</code> flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information.)</td>
|
||||||
</tr>
|
</tr>
|
||||||
|
|
||||||
<tr>
|
<tr>
|
||||||
|
@ -423,7 +423,7 @@ kubelet [flags]
|
||||||
<td colspan="2">--experimental-mounter-path string Default: <code>mount</code></td>
|
<td colspan="2">--experimental-mounter-path string Default: <code>mount</code></td>
|
||||||
</tr>
|
</tr>
|
||||||
<tr>
|
<tr>
|
||||||
<td></td><td style="line-height: 130%; word-wrap: break-word;">[Experimental] Path of mounter binary. Leave empty to use the default <code>mount</code>. (DEPRECATED: will be removed in 1.23, in favor of using CSI.)</td>
|
<td></td><td style="line-height: 130%; word-wrap: break-word;">[Experimental] Path of mounter binary. Leave empty to use the default <code>mount</code>. (DEPRECATED: will be removed in 1.24 or later, in favor of using CSI.)</td>
|
||||||
</tr>
|
</tr>
|
||||||
|
|
||||||
<tr>
|
<tr>
|
||||||
|
|
|
@ -478,7 +478,7 @@ the settings you specify apply to all containers in that Pod.
|
||||||
### container.seccomp.security.alpha.kubernetes.io/[NAME] {#container-seccomp-security-alpha-kubernetes-io}
|
### container.seccomp.security.alpha.kubernetes.io/[NAME] {#container-seccomp-security-alpha-kubernetes-io}
|
||||||
|
|
||||||
This annotation has been deprecated since Kubernetes v1.19 and will become non-functional in v1.25.
|
This annotation has been deprecated since Kubernetes v1.19 and will become non-functional in v1.25.
|
||||||
The tutorial [Restrict a Container's Syscalls with seccomp](/docs/tutorials/clusters/seccomp/) takes
|
The tutorial [Restrict a Container's Syscalls with seccomp](/docs/tutorials/security/seccomp/) takes
|
||||||
you through the steps you follow to apply a seccomp profile to a Pod or to one of
|
you through the steps you follow to apply a seccomp profile to a Pod or to one of
|
||||||
its containers. That tutorial covers the supported mechanism for configuring seccomp in Kubernetes,
|
its containers. That tutorial covers the supported mechanism for configuring seccomp in Kubernetes,
|
||||||
based on setting `securityContext` within the Pod's `.spec`.
|
based on setting `securityContext` within the Pod's `.spec`.
|
||||||
|
|
|
@ -237,7 +237,9 @@ In each case, the credentials of the pod are used to communicate securely with t
|
||||||
|
|
||||||
## Accessing services running on the cluster
|
## Accessing services running on the cluster
|
||||||
|
|
||||||
The previous section describes how to connect to the Kubernetes API server. For information about connecting to other services running on a Kubernetes cluster, see [Access Cluster Services.](/docs/tasks/administer-cluster/access-cluster-services/)
|
The previous section describes how to connect to the Kubernetes API server.
|
||||||
|
For information about connecting to other services running on a Kubernetes cluster, see
|
||||||
|
[Access Cluster Services](/docs/tasks/access-application-cluster/access-cluster-services/).
|
||||||
|
|
||||||
## Requesting redirects
|
## Requesting redirects
|
||||||
|
|
||||||
|
|
|
@ -19,11 +19,14 @@ This page describes the CoreDNS upgrade process and how to install CoreDNS inste
|
||||||
|
|
||||||
## About CoreDNS
|
## About CoreDNS
|
||||||
|
|
||||||
[CoreDNS](https://coredns.io) is a flexible, extensible DNS server that can serve as the Kubernetes cluster DNS.
|
[CoreDNS](https://coredns.io) is a flexible, extensible DNS server
|
||||||
Like Kubernetes, the CoreDNS project is hosted by the {{< glossary_tooltip text="CNCF" term_id="cncf" >}}.
|
that can serve as the Kubernetes cluster DNS.
|
||||||
|
Like Kubernetes, the CoreDNS project is hosted by the
|
||||||
|
{{< glossary_tooltip text="CNCF" term_id="cncf" >}}.
|
||||||
|
|
||||||
You can use CoreDNS instead of kube-dns in your cluster by replacing kube-dns in an existing
|
You can use CoreDNS instead of kube-dns in your cluster by replacing
|
||||||
deployment, or by using tools like kubeadm that will deploy and upgrade the cluster for you.
|
kube-dns in an existing deployment, or by using tools like kubeadm
|
||||||
|
that will deploy and upgrade the cluster for you.
|
||||||
|
|
||||||
## Installing CoreDNS
|
## Installing CoreDNS
|
||||||
|
|
||||||
|
@ -34,51 +37,44 @@ For manual deployment or replacement of kube-dns, see the documentation at the
|
||||||
|
|
||||||
### Upgrading an existing cluster with kubeadm
|
### Upgrading an existing cluster with kubeadm
|
||||||
|
|
||||||
In Kubernetes version 1.10 and later, you can also move to CoreDNS when you use `kubeadm` to upgrade
|
In Kubernetes version 1.21, kubeadm removed its support for `kube-dns` as a DNS application.
|
||||||
a cluster that is using `kube-dns`. In this case, `kubeadm` will generate the CoreDNS configuration
|
For `kubeadm` v{{< skew currentVersion >}}, the only supported cluster DNS application
|
||||||
|
is CoreDNS.
|
||||||
|
|
||||||
|
You can move to CoreDNS when you use `kubeadm` to upgrade a cluster that is
|
||||||
|
using `kube-dns`. In this case, `kubeadm` generates the CoreDNS configuration
|
||||||
("Corefile") based upon the `kube-dns` ConfigMap, preserving configurations for
|
("Corefile") based upon the `kube-dns` ConfigMap, preserving configurations for
|
||||||
stub domains, and upstream name server.
|
stub domains, and upstream name server.
|
||||||
|
|
||||||
If you are moving from kube-dns to CoreDNS, make sure to set the `CoreDNS` feature gate to `true`
|
|
||||||
during an upgrade. For example, here is what a `v1.11.0` upgrade would look like:
|
|
||||||
```
|
|
||||||
kubeadm upgrade apply v1.11.0 --feature-gates=CoreDNS=true
|
|
||||||
```
|
|
||||||
|
|
||||||
In Kubernetes version 1.13 and later the `CoreDNS` feature gate is removed and CoreDNS
|
|
||||||
is used by default.
|
|
||||||
|
|
||||||
In versions prior to 1.11 the Corefile will be **overwritten** by the one created during upgrade.
|
|
||||||
**You should save your existing ConfigMap if you have customized it.** You may re-apply your
|
|
||||||
customizations after the new ConfigMap is up and running.
|
|
||||||
|
|
||||||
If you are running CoreDNS in Kubernetes version 1.11 and later, during upgrade,
|
|
||||||
your existing Corefile will be retained.
|
|
||||||
|
|
||||||
In Kubernetes version 1.21, support for `kube-dns` is removed from kubeadm.
|
|
||||||
|
|
||||||
## Upgrading CoreDNS
|
## Upgrading CoreDNS
|
||||||
|
|
||||||
CoreDNS is available in Kubernetes since v1.9.
|
You can check the version of CoreDNS that kubeadm installs for each version of
|
||||||
You can check the version of CoreDNS shipped with Kubernetes and the changes made to CoreDNS [here](https://github.com/coredns/deployment/blob/master/kubernetes/CoreDNS-k8s_version.md).
|
Kubernetes in the page
|
||||||
|
[CoreDNS version in Kubernetes](https://github.com/coredns/deployment/blob/master/kubernetes/CoreDNS-k8s_version.md).
|
||||||
|
|
||||||
|
CoreDNS can be upgraded manually in case you want to only upgrade CoreDNS
|
||||||
|
or use your own custom image.
|
||||||
|
There is a helpful [guideline and walkthrough](https://github.com/coredns/deployment/blob/master/kubernetes/Upgrading_CoreDNS.md)
|
||||||
|
available to ensure a smooth upgrade.
|
||||||
|
Make sure the existing CoreDNS configuration ("Corefile") is retained when
|
||||||
|
upgrading your cluster.
|
||||||
|
|
||||||
|
If you are upgrading your cluster using the `kubeadm` tool, `kubeadm`
|
||||||
|
can take care of retaining the existing CoreDNS configuration automatically.
|
||||||
|
|
||||||
CoreDNS can be upgraded manually in case you want to only upgrade CoreDNS or use your own custom image.
|
|
||||||
There is a helpful [guideline and walkthrough](https://github.com/coredns/deployment/blob/master/kubernetes/Upgrading_CoreDNS.md) available to ensure a smooth upgrade.
|
|
||||||
|
|
||||||
## Tuning CoreDNS
|
## Tuning CoreDNS
|
||||||
|
|
||||||
When resource utilisation is a concern, it may be useful to tune the configuration of CoreDNS. For more details, check out the
|
When resource utilisation is a concern, it may be useful to tune the
|
||||||
|
configuration of CoreDNS. For more details, check out the
|
||||||
[documentation on scaling CoreDNS](https://github.com/coredns/deployment/blob/master/kubernetes/Scaling_CoreDNS.md).
|
[documentation on scaling CoreDNS](https://github.com/coredns/deployment/blob/master/kubernetes/Scaling_CoreDNS.md).
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
## {{% heading "whatsnext" %}}
|
## {{% heading "whatsnext" %}}
|
||||||
|
|
||||||
|
|
||||||
You can configure [CoreDNS](https://coredns.io) to support many more use cases than
|
You can configure [CoreDNS](https://coredns.io) to support many more use cases than
|
||||||
kube-dns by modifying the `Corefile`. For more information, see the
|
kube-dns does by modifying the CoreDNS configuration ("Corefile").
|
||||||
[CoreDNS site](https://coredns.io/2017/05/08/custom-dns-entries-for-kubernetes/).
|
For more information, see the [documentation](https://coredns.io/plugins/kubernetes/)
|
||||||
|
for the `kubernetes` CoreDNS plugin, or read the
|
||||||
|
[Custom DNS Entries for Kubernetes](https://coredns.io/2017/05/08/custom-dns-entries-for-kubernetes/).
|
||||||
|
in the CoreDNS blog.
|
||||||
|
|
||||||
|
|
|
@ -50,7 +50,7 @@ plugins:
|
||||||
# Array of authenticated usernames to exempt.
|
# Array of authenticated usernames to exempt.
|
||||||
usernames: []
|
usernames: []
|
||||||
# Array of runtime class names to exempt.
|
# Array of runtime class names to exempt.
|
||||||
runtimeClassNames: []
|
runtimeClasses: []
|
||||||
# Array of namespaces to exempt.
|
# Array of namespaces to exempt.
|
||||||
namespaces: []
|
namespaces: []
|
||||||
```
|
```
|
||||||
|
|
|
@ -6,20 +6,15 @@ weight: 40
|
||||||
|
|
||||||
<!-- overview -->
|
<!-- overview -->
|
||||||
|
|
||||||
This page shows how a Pod can use a DownwardAPIVolumeFile to expose information
|
This page shows how a Pod can use a
|
||||||
about itself to Containers running in the Pod. A DownwardAPIVolumeFile can expose
|
[`DownwardAPIVolumeFile`](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#downwardapivolumefile-v1-core)
|
||||||
Pod fields and Container fields.
|
to expose information about itself to Containers running in the Pod.
|
||||||
|
A `DownwardAPIVolumeFile` can expose Pod fields and Container fields.
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
## {{% heading "prerequisites" %}}
|
## {{% heading "prerequisites" %}}
|
||||||
|
|
||||||
|
|
||||||
{{< include "task-tutorial-prereqs.md" >}} {{< version-check >}}
|
{{< include "task-tutorial-prereqs.md" >}} {{< version-check >}}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
<!-- steps -->
|
<!-- steps -->
|
||||||
|
|
||||||
## The Downward API
|
## The Downward API
|
||||||
|
@ -27,10 +22,10 @@ Pod fields and Container fields.
|
||||||
There are two ways to expose Pod and Container fields to a running Container:
|
There are two ways to expose Pod and Container fields to a running Container:
|
||||||
|
|
||||||
* [Environment variables](/docs/tasks/inject-data-application/environment-variable-expose-pod-information/#the-downward-api)
|
* [Environment variables](/docs/tasks/inject-data-application/environment-variable-expose-pod-information/#the-downward-api)
|
||||||
* Volume Files
|
* Volume files
|
||||||
|
|
||||||
Together, these two ways of exposing Pod and Container fields are called the
|
Together, these two ways of exposing Pod and Container fields are called the
|
||||||
*Downward API*.
|
"Downward API".
|
||||||
|
|
||||||
## Store Pod fields
|
## Store Pod fields
|
||||||
|
|
||||||
|
@ -60,13 +55,13 @@ Create the Pod:
|
||||||
kubectl apply -f https://k8s.io/examples/pods/inject/dapi-volume.yaml
|
kubectl apply -f https://k8s.io/examples/pods/inject/dapi-volume.yaml
|
||||||
```
|
```
|
||||||
|
|
||||||
Verify that the Container in the Pod is running:
|
Verify that the container in the Pod is running:
|
||||||
|
|
||||||
```shell
|
```shell
|
||||||
kubectl get pods
|
kubectl get pods
|
||||||
```
|
```
|
||||||
|
|
||||||
View the Container's logs:
|
View the container's logs:
|
||||||
|
|
||||||
```shell
|
```shell
|
||||||
kubectl logs kubernetes-downwardapi-volume-example
|
kubectl logs kubernetes-downwardapi-volume-example
|
||||||
|
@ -83,7 +78,7 @@ build="two"
|
||||||
builder="john-doe"
|
builder="john-doe"
|
||||||
```
|
```
|
||||||
|
|
||||||
Get a shell into the Container that is running in your Pod:
|
Get a shell into the container that is running in your Pod:
|
||||||
|
|
||||||
```shell
|
```shell
|
||||||
kubectl exec -it kubernetes-downwardapi-volume-example -- sh
|
kubectl exec -it kubernetes-downwardapi-volume-example -- sh
|
||||||
|
@ -136,8 +131,7 @@ total 8
|
||||||
|
|
||||||
Using symbolic links enables dynamic atomic refresh of the metadata; updates are
|
Using symbolic links enables dynamic atomic refresh of the metadata; updates are
|
||||||
written to a new temporary directory, and the `..data` symlink is updated
|
written to a new temporary directory, and the `..data` symlink is updated
|
||||||
atomically using
|
atomically using [rename(2)](http://man7.org/linux/man-pages/man2/rename.2.html).
|
||||||
[rename(2)](http://man7.org/linux/man-pages/man2/rename.2.html).
|
|
||||||
|
|
||||||
{{< note >}}
|
{{< note >}}
|
||||||
A container using Downward API as a
|
A container using Downward API as a
|
||||||
|
@ -153,17 +147,19 @@ Exit the shell:
|
||||||
|
|
||||||
## Store Container fields
|
## Store Container fields
|
||||||
|
|
||||||
The preceding exercise, you stored Pod fields in a DownwardAPIVolumeFile.
|
The preceding exercise, you stored Pod fields in a
|
||||||
|
[`DownwardAPIVolumeFile`](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#downwardapivolumefile-v1-core)..
|
||||||
In this next exercise, you store Container fields. Here is the configuration
|
In this next exercise, you store Container fields. Here is the configuration
|
||||||
file for a Pod that has one Container:
|
file for a Pod that has one Container:
|
||||||
|
|
||||||
{{< codenew file="pods/inject/dapi-volume-resources.yaml" >}}
|
{{< codenew file="pods/inject/dapi-volume-resources.yaml" >}}
|
||||||
|
|
||||||
In the configuration file, you can see that the Pod has a `downwardAPI` Volume,
|
In the configuration file, you can see that the Pod has a
|
||||||
and the Container mounts the Volume at `/etc/podinfo`.
|
[`downwardAPI` volume](/concepts/storage/volumes/#downwardapi),
|
||||||
|
and the Container mounts the volume at `/etc/podinfo`.
|
||||||
|
|
||||||
Look at the `items` array under `downwardAPI`. Each element of the array is a
|
Look at the `items` array under `downwardAPI`. Each element of the array is a
|
||||||
DownwardAPIVolumeFile.
|
[`DownwardAPIVolumeFile`](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#downwardapivolumefile-v1-core).
|
||||||
|
|
||||||
The first element specifies that in the Container named `client-container`,
|
The first element specifies that in the Container named `client-container`,
|
||||||
the value of the `limits.cpu` field in the format specified by `1m` should be
|
the value of the `limits.cpu` field in the format specified by `1m` should be
|
||||||
|
@ -176,7 +172,7 @@ Create the Pod:
|
||||||
kubectl apply -f https://k8s.io/examples/pods/inject/dapi-volume-resources.yaml
|
kubectl apply -f https://k8s.io/examples/pods/inject/dapi-volume-resources.yaml
|
||||||
```
|
```
|
||||||
|
|
||||||
Get a shell into the Container that is running in your Pod:
|
Get a shell into the container that is running in your Pod:
|
||||||
|
|
||||||
```shell
|
```shell
|
||||||
kubectl exec -it kubernetes-downwardapi-volume-example-2 -- sh
|
kubectl exec -it kubernetes-downwardapi-volume-example-2 -- sh
|
||||||
|
@ -187,46 +183,56 @@ In your shell, view the `cpu_limit` file:
|
||||||
```shell
|
```shell
|
||||||
/# cat /etc/podinfo/cpu_limit
|
/# cat /etc/podinfo/cpu_limit
|
||||||
```
|
```
|
||||||
|
|
||||||
You can use similar commands to view the `cpu_request`, `mem_limit` and
|
You can use similar commands to view the `cpu_request`, `mem_limit` and
|
||||||
`mem_request` files.
|
`mem_request` files.
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
<!-- discussion -->
|
<!-- discussion -->
|
||||||
|
|
||||||
|
<!-- TODO: This section should be extracted out of the task page. -->
|
||||||
## Capabilities of the Downward API
|
## Capabilities of the Downward API
|
||||||
|
|
||||||
The following information is available to containers through environment
|
The following information is available to containers through environment
|
||||||
variables and `downwardAPI` volumes:
|
variables and `downwardAPI` volumes:
|
||||||
|
|
||||||
* Information available via `fieldRef`:
|
* Information available via `fieldRef`:
|
||||||
|
|
||||||
* `metadata.name` - the pod's name
|
* `metadata.name` - the pod's name
|
||||||
* `metadata.namespace` - the pod's namespace
|
* `metadata.namespace` - the pod's namespace
|
||||||
* `metadata.uid` - the pod's UID
|
* `metadata.uid` - the pod's UID
|
||||||
* `metadata.labels['<KEY>']` - the value of the pod's label `<KEY>` (for example, `metadata.labels['mylabel']`)
|
* `metadata.labels['<KEY>']` - the value of the pod's label `<KEY>`
|
||||||
* `metadata.annotations['<KEY>']` - the value of the pod's annotation `<KEY>` (for example, `metadata.annotations['myannotation']`)
|
(for example, `metadata.labels['mylabel']`)
|
||||||
|
* `metadata.annotations['<KEY>']` - the value of the pod's annotation `<KEY>`
|
||||||
|
(for example, `metadata.annotations['myannotation']`)
|
||||||
|
|
||||||
* Information available via `resourceFieldRef`:
|
* Information available via `resourceFieldRef`:
|
||||||
|
|
||||||
* A Container's CPU limit
|
* A Container's CPU limit
|
||||||
* A Container's CPU request
|
* A Container's CPU request
|
||||||
* A Container's memory limit
|
* A Container's memory limit
|
||||||
* A Container's memory request
|
* A Container's memory request
|
||||||
* A Container's hugepages limit (providing that the `DownwardAPIHugePages` [feature gate](/docs/reference/command-line-tools-reference/feature-gates/) is enabled)
|
* A Container's hugepages limit (provided that the `DownwardAPIHugePages`
|
||||||
* A Container's hugepages request (providing that the `DownwardAPIHugePages` [feature gate](/docs/reference/command-line-tools-reference/feature-gates/) is enabled)
|
[feature gate](/docs/reference/command-line-tools-reference/feature-gates/) is enabled)
|
||||||
|
* A Container's hugepages request (provided that the `DownwardAPIHugePages`
|
||||||
|
[feature gate](/docs/reference/command-line-tools-reference/feature-gates/) is enabled)
|
||||||
* A Container's ephemeral-storage limit
|
* A Container's ephemeral-storage limit
|
||||||
* A Container's ephemeral-storage request
|
* A Container's ephemeral-storage request
|
||||||
|
|
||||||
In addition, the following information is available through
|
In addition, the following information is available through
|
||||||
`downwardAPI` volume `fieldRef`:
|
`downwardAPI` volume `fieldRef`:
|
||||||
|
|
||||||
* `metadata.labels` - all of the pod's labels, formatted as `label-key="escaped-label-value"` with one label per line
|
* `metadata.labels` - all of the pod's labels, formatted as `label-key="escaped-label-value"`
|
||||||
* `metadata.annotations` - all of the pod's annotations, formatted as `annotation-key="escaped-annotation-value"` with one annotation per line
|
with one label per line
|
||||||
|
* `metadata.annotations` - all of the pod's annotations, formatted as
|
||||||
|
`annotation-key="escaped-annotation-value"` with one annotation per line
|
||||||
|
|
||||||
The following information is available through environment variables:
|
The following information is available through environment variables:
|
||||||
|
|
||||||
* `status.podIP` - the pod's IP address
|
* `status.podIP` - the pod's IP address
|
||||||
* `spec.serviceAccountName` - the pod's service account name, available since v1.4.0-alpha.3
|
* `spec.serviceAccountName` - the pod's service account name
|
||||||
* `spec.nodeName` - the node's name, available since v1.4.0-alpha.3
|
* `spec.nodeName` - the name of the node to which the scheduler always attempts to
|
||||||
* `status.hostIP` - the node's IP, available since v1.7.0-alpha.1
|
schedule the pod
|
||||||
|
* `status.hostIP` - the IP of the node to which the Pod is assigned
|
||||||
|
|
||||||
{{< note >}}
|
{{< note >}}
|
||||||
If CPU and memory limits are not specified for a Container, the
|
If CPU and memory limits are not specified for a Container, the
|
||||||
|
@ -241,7 +247,7 @@ basis. For more information, see
|
||||||
|
|
||||||
## Motivation for the Downward API
|
## Motivation for the Downward API
|
||||||
|
|
||||||
It is sometimes useful for a Container to have information about itself, without
|
It is sometimes useful for a container to have information about itself, without
|
||||||
being overly coupled to Kubernetes. The Downward API allows containers to consume
|
being overly coupled to Kubernetes. The Downward API allows containers to consume
|
||||||
information about themselves or the cluster without using the Kubernetes client
|
information about themselves or the cluster without using the Kubernetes client
|
||||||
or API server.
|
or API server.
|
||||||
|
@ -252,19 +258,17 @@ application, but that is tedious and error prone, and it violates the goal of lo
|
||||||
coupling. A better option would be to use the Pod's name as an identifier, and
|
coupling. A better option would be to use the Pod's name as an identifier, and
|
||||||
inject the Pod's name into the well-known environment variable.
|
inject the Pod's name into the well-known environment variable.
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
## {{% heading "whatsnext" %}}
|
## {{% heading "whatsnext" %}}
|
||||||
|
|
||||||
|
* Check the [`PodSpec`](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#podspec-v1-core)
|
||||||
* [PodSpec](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#podspec-v1-core)
|
API definition which defines the desired state of a Pod.
|
||||||
* [Volume](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#volume-v1-core)
|
* Check the [`Volume`](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#volume-v1-core)
|
||||||
* [DownwardAPIVolumeSource](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#downwardapivolumesource-v1-core)
|
API definition which defines a generic volume in a Pod for containers to access.
|
||||||
* [DownwardAPIVolumeFile](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#downwardapivolumefile-v1-core)
|
* Check the [`DownwardAPIVolumeSource`](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#downwardapivolumesource-v1-core)
|
||||||
* [ResourceFieldSelector](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#resourcefieldselector-v1-core)
|
API definition which defines a volume that contains Downward API information.
|
||||||
|
* Check the [`DownwardAPIVolumeFile`](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#downwardapivolumefile-v1-core)
|
||||||
|
API definition which contains references to object or resource fields for
|
||||||
|
populating a file in the Downward API volume.
|
||||||
|
* Check the [`ResourceFieldSelector`](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#resourcefieldselector-v1-core)
|
||||||
|
API definition which specifies the container resources and their output format.
|
||||||
|
|
||||||
|
|
|
@ -308,7 +308,7 @@ metadata:
|
||||||
type: Opaque
|
type: Opaque
|
||||||
```
|
```
|
||||||
|
|
||||||
Like ConfigMaps, generated Secrets can be used in Deployments by refering to the name of the secretGenerator:
|
Like ConfigMaps, generated Secrets can be used in Deployments by referring to the name of the secretGenerator:
|
||||||
|
|
||||||
```shell
|
```shell
|
||||||
# Create a password.txt file
|
# Create a password.txt file
|
||||||
|
|
|
@ -12,16 +12,11 @@ To do so in all your shell sessions, add the following to your `~/.zshrc` file:
|
||||||
source <(kubectl completion zsh)
|
source <(kubectl completion zsh)
|
||||||
```
|
```
|
||||||
|
|
||||||
If you have an alias for kubectl, you can extend shell completion to work with that alias:
|
If you have an alias for kubectl, kubectl autocompletion will automatically work with it.
|
||||||
|
|
||||||
```zsh
|
|
||||||
echo 'alias k=kubectl' >>~/.zshrc
|
|
||||||
echo 'compdef __start_kubectl k' >>~/.zshrc
|
|
||||||
```
|
|
||||||
|
|
||||||
After reloading your shell, kubectl autocompletion should be working.
|
After reloading your shell, kubectl autocompletion should be working.
|
||||||
|
|
||||||
If you get an error like `complete:13: command not found: compdef`, then add the following to the beginning of your `~/.zshrc` file:
|
If you get an error like `2: command not found: compdef`, then add the following to the beginning of your `~/.zshrc` file:
|
||||||
|
|
||||||
```zsh
|
```zsh
|
||||||
autoload -Uz compinit
|
autoload -Uz compinit
|
||||||
|
|
|
@ -382,27 +382,13 @@ If you do not want AppArmor to be available on your cluster, it can be disabled
|
||||||
```
|
```
|
||||||
|
|
||||||
When disabled, any Pod that includes an AppArmor profile will fail validation with a "Forbidden"
|
When disabled, any Pod that includes an AppArmor profile will fail validation with a "Forbidden"
|
||||||
error. Note that by default docker always enables the "docker-default" profile on non-privileged
|
error.
|
||||||
pods (if the AppArmor kernel module is enabled), and will continue to do so even if the feature-gate
|
|
||||||
is disabled. The option to disable AppArmor will be removed when AppArmor graduates to general
|
{{<note>}}
|
||||||
|
Even if the Kubernetes feature is disabled, runtimes may still enforce the default profile. The
|
||||||
|
option to disable the AppArmor feature will be removed when AppArmor graduates to general
|
||||||
availability (GA).
|
availability (GA).
|
||||||
|
{{</note>}}
|
||||||
### Upgrading to Kubernetes v1.4 with AppArmor
|
|
||||||
|
|
||||||
No action is required with respect to AppArmor to upgrade your cluster to v1.4. However, if any
|
|
||||||
existing pods had an AppArmor annotation, they will not go through validation (or PodSecurityPolicy
|
|
||||||
admission). If permissive profiles are loaded on the nodes, a malicious user could pre-apply a
|
|
||||||
permissive profile to escalate the pod privileges above the docker-default. If this is a concern, it
|
|
||||||
is recommended to scrub the cluster of any pods containing an annotation with
|
|
||||||
`apparmor.security.beta.kubernetes.io`.
|
|
||||||
|
|
||||||
### Upgrade path to General Availability
|
|
||||||
|
|
||||||
When AppArmor is ready to be graduated to general availability (GA), the options currently specified
|
|
||||||
through annotations will be converted to fields. Supporting all the upgrade and downgrade paths
|
|
||||||
through the transition is very nuanced, and will be explained in detail when the transition
|
|
||||||
occurs. We will commit to supporting both fields and annotations for at least 2 releases, and will
|
|
||||||
explicitly reject the annotations for at least 2 releases after that.
|
|
||||||
|
|
||||||
## Authoring Profiles
|
## Authoring Profiles
|
||||||
|
|
||||||
|
@ -415,10 +401,6 @@ tools to help with that:
|
||||||
* [bane](https://github.com/jfrazelle/bane) is an AppArmor profile generator for Docker that uses a
|
* [bane](https://github.com/jfrazelle/bane) is an AppArmor profile generator for Docker that uses a
|
||||||
simplified profile language.
|
simplified profile language.
|
||||||
|
|
||||||
It is recommended to run your application through Docker on a development workstation to generate
|
|
||||||
the profiles, but there is nothing preventing running the tools on the Kubernetes node where your
|
|
||||||
Pod is running.
|
|
||||||
|
|
||||||
To debug problems with AppArmor, you can check the system logs to see what, specifically, was
|
To debug problems with AppArmor, you can check the system logs to see what, specifically, was
|
||||||
denied. AppArmor logs verbose messages to `dmesg`, and errors can usually be found in the system
|
denied. AppArmor logs verbose messages to `dmesg`, and errors can usually be found in the system
|
||||||
logs or through `journalctl`. More information is provided in
|
logs or through `journalctl`. More information is provided in
|
||||||
|
@ -441,9 +423,8 @@ Specifying the profile a container will run with:
|
||||||
- `runtime/default`: Refers to the default runtime profile.
|
- `runtime/default`: Refers to the default runtime profile.
|
||||||
- Equivalent to not specifying a profile (without a PodSecurityPolicy default), except it still
|
- Equivalent to not specifying a profile (without a PodSecurityPolicy default), except it still
|
||||||
requires AppArmor to be enabled.
|
requires AppArmor to be enabled.
|
||||||
- For Docker, this resolves to the
|
- In practice, many container runtimes use the same OCI default profile, defined here:
|
||||||
[`docker-default`](https://docs.docker.com/engine/security/apparmor/) profile for non-privileged
|
https://github.com/containers/common/blob/main/pkg/apparmor/apparmor_linux_template.go
|
||||||
containers, and unconfined (no profile) for privileged containers.
|
|
||||||
- `localhost/<profile_name>`: Refers to a profile loaded on the node (localhost) by name.
|
- `localhost/<profile_name>`: Refers to a profile loaded on the node (localhost) by name.
|
||||||
- The possible profile names are detailed in the
|
- The possible profile names are detailed in the
|
||||||
[core policy reference](https://gitlab.com/apparmor/apparmor/wikis/AppArmor_Core_Policy_Reference#profile-names-and-attachment-specifications).
|
[core policy reference](https://gitlab.com/apparmor/apparmor/wikis/AppArmor_Core_Policy_Reference#profile-names-and-attachment-specifications).
|
||||||
|
@ -474,5 +455,3 @@ Additional resources:
|
||||||
|
|
||||||
* [Quick guide to the AppArmor profile language](https://gitlab.com/apparmor/apparmor/wikis/QuickProfileLanguage)
|
* [Quick guide to the AppArmor profile language](https://gitlab.com/apparmor/apparmor/wikis/QuickProfileLanguage)
|
||||||
* [AppArmor core policy reference](https://gitlab.com/apparmor/apparmor/wikis/Policy_Layout)
|
* [AppArmor core policy reference](https://gitlab.com/apparmor/apparmor/wikis/Policy_Layout)
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -12,7 +12,7 @@ Pod Security admission (PSA) is enabled by default in v1.23 and later, as it has
|
||||||
[graduated to beta](/blog/2021/12/09/pod-security-admission-beta/).
|
[graduated to beta](/blog/2021/12/09/pod-security-admission-beta/).
|
||||||
Pod Security
|
Pod Security
|
||||||
is an admission controller that carries out checks against the Kubernetes
|
is an admission controller that carries out checks against the Kubernetes
|
||||||
[Pod Security Standards](docs/concepts/security/pod-security-standards/) when new pods are
|
[Pod Security Standards](/docs/concepts/security/pod-security-standards/) when new pods are
|
||||||
created. This tutorial shows you how to enforce the `baseline` Pod Security
|
created. This tutorial shows you how to enforce the `baseline` Pod Security
|
||||||
Standard at the cluster level which applies a standard configuration
|
Standard at the cluster level which applies a standard configuration
|
||||||
to all namespaces in a cluster.
|
to all namespaces in a cluster.
|
||||||
|
|
|
@ -243,7 +243,7 @@ pointing to. This IP address is accessible only within the cluster.
|
||||||
|
|
||||||
If you want guests to be able to access your guestbook, you must configure the
|
If you want guests to be able to access your guestbook, you must configure the
|
||||||
frontend Service to be externally visible, so a client can request the Service
|
frontend Service to be externally visible, so a client can request the Service
|
||||||
from outside the Kubernetes cluster. However a Kubernetes user you can use
|
from outside the Kubernetes cluster. However a Kubernetes user can use
|
||||||
`kubectl port-forward` to access the service even though it uses a
|
`kubectl port-forward` to access the service even though it uses a
|
||||||
`ClusterIP`.
|
`ClusterIP`.
|
||||||
|
|
||||||
|
|
|
@ -0,0 +1,273 @@
|
||||||
|
---
|
||||||
|
reviewers:
|
||||||
|
- electrocucaracha
|
||||||
|
- raelga
|
||||||
|
- gamba47
|
||||||
|
title: Interrupciones
|
||||||
|
content_type: concept
|
||||||
|
weight: 60
|
||||||
|
---
|
||||||
|
|
||||||
|
<!-- overview -->
|
||||||
|
Esta guía es para los dueños de aplicaciones que quieren crear
|
||||||
|
aplicaciones con alta disponibilidad y que necesitan entender
|
||||||
|
que tipos de interrupciones pueden suceder en los Pods.
|
||||||
|
|
||||||
|
También es para los administradores de clústers que quieren aplicar acciones
|
||||||
|
automatizadas en sus clústers, cómo actualizar o autoescalar los clústers.
|
||||||
|
|
||||||
|
<!-- body -->
|
||||||
|
|
||||||
|
## Interrupciones voluntarias e involuntarias
|
||||||
|
|
||||||
|
Los Pods no desaparecen hasta que algo (una persona o un controlador) los destruye
|
||||||
|
ó hay problemas de hardware ó software que son inevitables.
|
||||||
|
|
||||||
|
Nosotros llamamos a esos casos inevitables *interrupciones involuntarias* de
|
||||||
|
una aplicación. Algunos ejemplos:
|
||||||
|
|
||||||
|
- Una falla en hardware de la máquina física del nodo
|
||||||
|
- Un administrador del clúster borra una VM (instancia) por error
|
||||||
|
- El proveedor de la nube o el hipervisor falla y hace desaparecer la VM
|
||||||
|
- Un kernel panic
|
||||||
|
- El nodo desaparece del clúster por un problema de red que lo separa del clúster
|
||||||
|
- Una remoción del Pod porque el nodo [no tiene recursos suficientes](/docs/concepts/scheduling-eviction/node-pressure-eviction/).
|
||||||
|
|
||||||
|
A excepción de la condición sin recursos suficientes, todas estas condiciones
|
||||||
|
deben ser familiares para la mayoría de los usuarios, no son específicas
|
||||||
|
de Kubernetes
|
||||||
|
|
||||||
|
Nosotros llamamos a los otros casos *interrupciones voluntarias*. Estas incluyen
|
||||||
|
las acciones iniciadas por el dueño de la aplicación y aquellas iniciadas por el Administrador
|
||||||
|
del Clúster. Las acciones típicas de los dueños de la aplicación incluye:
|
||||||
|
|
||||||
|
- borrar el Deployment u otro controlador que maneja el Pod
|
||||||
|
- actualizar el Deployment del Pod que causa un reinicio
|
||||||
|
- borrar un Pod (por ejemplo, por accidente)
|
||||||
|
|
||||||
|
Las acciones del administrador del clúster incluyen:
|
||||||
|
|
||||||
|
- [Drenar un nodo](/docs/tasks/administer-cluster/safely-drain-node/) para reparar o actualizar.
|
||||||
|
- Drenar un nodo del clúster para reducir el clúster (aprenda acerca de [Autoescalamiento de Clúster](https://github.com/kubernetes/autoscaler/#readme)
|
||||||
|
).
|
||||||
|
- Remover un Pod de un nodo para permitir que otra cosa pueda ingresar a ese nodo.
|
||||||
|
|
||||||
|
Estas acciones pueden ser realizadas directamente por el administrador del clúster, por
|
||||||
|
tareas automatizadas del administrador del clúster ó por el proveedor del clúster.
|
||||||
|
|
||||||
|
Consulte al administrador de su clúster, a su proveedor de la nube ó a la documentación de su distribución
|
||||||
|
para determinar si alguna de estas interrupciones voluntarias están habilitadas en su clúster.
|
||||||
|
Si ninguna se encuentra habilitada, puede omitir la creación del presupuesto de Interrupción de Pods.
|
||||||
|
|
||||||
|
{{< caution >}}
|
||||||
|
No todas las interrupciones voluntarias son consideradas por el presupuesto de interrupción de Pods. Por ejemplo,
|
||||||
|
borrar un Deployment o Pods que evitan el uso del presupuesto.
|
||||||
|
{{< /caution >}}
|
||||||
|
|
||||||
|
## Tratando con las interrupciones
|
||||||
|
|
||||||
|
Estas son algunas de las maneras para mitigar las interrupciones involuntarias:
|
||||||
|
|
||||||
|
- Asegurarse que el Pod [solicite los recursos](/docs/tasks/configure-pod-container/assign-memory-resource) que necesita.
|
||||||
|
- Replique su aplicación si usted necesita alta disponibilidad. (Aprenda sobre correr aplicaciones replicadas
|
||||||
|
[stateless](/docs/tasks/run-application/run-stateless-application-deployment/)
|
||||||
|
y [stateful](/docs/tasks/run-application/run-replicated-stateful-application/)
|
||||||
|
- Incluso, para una alta disponibilidad mayor cuando se corren aplicaciones replicadas,
|
||||||
|
propague las aplicaciones por varios racks (usando
|
||||||
|
[anti-affinity](/docs/concepts/scheduling-eviction/assign-pod-node/#affinity-and-anti-affinity))
|
||||||
|
o usando zonas (si usa un [clúster multi-zona](/docs/setup/multiple-zones).)
|
||||||
|
|
||||||
|
La frecuencia de las interrupciones voluntarias varía. En un clúster basico de Kubernetes, no hay
|
||||||
|
interrupciones voluntarias automáticas (solo el usuario las genera). Sin embargo, su administrador del clúster o proveedor de alojamiento
|
||||||
|
puede correr algun servicio adicional que pueda causar estas interrupciones voluntarias. Por ejemplo,
|
||||||
|
desplegando una actualización de software en los nodos puede causar interrupciones. También, algunas implementaciones
|
||||||
|
de clústers con autoescalamiento de nodos puede causar interrupciones para defragmentar o compactar los nodos.
|
||||||
|
Su administrador de clúster o proveedor de alojamiento debe tener documentado cuál es el nivel de interrupciones
|
||||||
|
voluntarias esperadas, sí es que las hay. Ciertas opciones de configuración, como ser
|
||||||
|
[usar PriorityClasses](/docs/concepts/scheduling-eviction/pod-priority-preemption/)
|
||||||
|
en las especificaciones de su Pod pueden también causar interrupciones voluntarias (o involuntarias).
|
||||||
|
|
||||||
|
|
||||||
|
## Presupuesto de Interrupción de Pods
|
||||||
|
|
||||||
|
{{< feature-state for_k8s_version="v1.21" state="stable" >}}
|
||||||
|
|
||||||
|
Kubernetes ofrece carácteristicas para ayudar a ejecutar aplicaciones con alta disponibliidad, incluso cuando usted
|
||||||
|
introduce interrupciones voluntarias frecuentes.
|
||||||
|
|
||||||
|
Como dueño de la aplicación, usted puede crear un presupuesto de interrupción de Pods (PDB por sus siglas en inglés) para cada aplicación.
|
||||||
|
Un PDB limita el numero de Pods de una aplicación replicada, que estan caídos de manera simultánea por
|
||||||
|
interrupciones voluntarias. Por ejemplo, una aplicación basada en quórum puede
|
||||||
|
asegurarse que el número de réplicas corriendo nunca es menor al
|
||||||
|
número necesitado para obtener el quórum. Una web de tipo front end puede querer
|
||||||
|
asegurarse que el número de réplicas atendiendo al tráfico nunca puede caer bajo un cierto
|
||||||
|
porcentaje del total.
|
||||||
|
|
||||||
|
Los administradores del clúster y proveedores de hosting pueden usar herramientas que
|
||||||
|
respeten el presupuesto de interrupción de Pods utilizando la [API de Desalojo](/docs/tasks/administer-clúster/safely-drain-node/#eviction-api)
|
||||||
|
en vez de directamente borrar Pods o Deployments.
|
||||||
|
|
||||||
|
Por ejemplo, el subcomando `kubectl drain` le permite marcar un nodo a un modo fuera de
|
||||||
|
servicio. Cuando se ejecuta `kubectl drain`, la herramienta trata de quitar a todos los Pods en
|
||||||
|
el nodo que se esta dejando fuera de servicio. La petición de desalojo que `kubectl` solicita en
|
||||||
|
su nombre puede ser temporalmente denegado, entonces la herramienta periodicamente reintenta todas las
|
||||||
|
peticiones fallidas hasta que todos los Pods en el nodo afectado son terminados ó hasta que el tiempo de espera,
|
||||||
|
que puede ser configurado, es alcanzado.
|
||||||
|
|
||||||
|
Un PDB especifica el número de réplicas que una aplicación puede tolerar, relativo a cuantas
|
||||||
|
se pretende tener. Por ejemplo, un Deployment que tiene un `.spec.replicas: 5` se
|
||||||
|
supone que tiene 5 Pods en cualquier momento. Si su PDB permite tener 4 a la vez,
|
||||||
|
entonces la API de Desalojo va a permitir interrupciones voluntarias de un (pero no a dos) Pod a la vez.
|
||||||
|
|
||||||
|
El grupo de Pods que comprende a la aplicación esta especificada usando una etiqueta selectora, la misma
|
||||||
|
que es usada por el controlador de aplicación (deployment, stateful-set, etc).
|
||||||
|
|
||||||
|
El numero de Pods "deseado" es calculado a partir de `.spec.replicas` de el recurso de Workload
|
||||||
|
que es manejado para esos Pods. El plano de control descubre el recurso Workload perteneciente a el
|
||||||
|
examinando las `.metadata.ownerReferences` del Pod.
|
||||||
|
|
||||||
|
Las [Interrupciones Involuntarias](#voluntary-and-involuntary-disruptions) no pueden ser prevenidas por los PDB; pero si
|
||||||
|
son contabilizadas a partir este presupuesto.
|
||||||
|
|
||||||
|
Los Pods que son borrados o no estan disponibles debido a una actualización continua de una aplicación forman parte del presupuesto de interrupciones, pero los recursos Workload (como los Deployments y StatefulSet)
|
||||||
|
no están limitados por los PDBs cuando se hacen actualizaciones continuas. En cambio, la administración de fallas
|
||||||
|
durante la actualización de la aplicación es configurada en la especificación para este recurso Workload específico.
|
||||||
|
|
||||||
|
Cuando un Pod es quitado usando la API de desalojo, este es
|
||||||
|
[terminado](/docs/concepts/workloads/pods/pod-lifecycle/#pod-termination) correctamente, haciendo honor al
|
||||||
|
`terminationGracePeriodSeconds` configurado en su [PodSpec](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#podspec-v1-core).
|
||||||
|
|
||||||
|
## Ejemplo de Presupuesto de Interrupción de POD {#pdb-example}
|
||||||
|
|
||||||
|
Considere un clúster con 3 nodos, `nodo-1` hasta `nodo-3`.
|
||||||
|
El clúster esta corriendo varias aplicaciones. Uno de ellos tiene 3 replicas, que llamaremos
|
||||||
|
`pod-a`, `pod-b`, y `pod-c`. Otro Pod no relacionado y sin PDB, llamado `pod-x`, también se muestra.
|
||||||
|
|
||||||
|
Inicialmente los pods estan distribuidos de esta manera:
|
||||||
|
|
||||||
|
|
||||||
|
| nodo-1 | nodo-2 | nodo-3 |
|
||||||
|
|:--------------------:|:-------------------:|:------------------:|
|
||||||
|
| pod-a *available* | pod-b *available* | pod-c *available* |
|
||||||
|
| pod-x *available* | | |
|
||||||
|
|
||||||
|
Los 3 Pods son parte de un Deployment, ellos colectivamente tienen un PDB que requiere
|
||||||
|
que por lo menos 2 de los 3 Pods esten disponibles todo el tiempo.
|
||||||
|
|
||||||
|
Por ejemplo, supongamos que el administrador del clúster quiere reiniciar para actualizar el kernel y arreglar un bug.
|
||||||
|
El administrador del clúster primero intenta desocupar el `nodo-1` usando el comando `kubectl drain`.
|
||||||
|
La herramienta intenta desalojar a los pods `pod-a` y `pod-x`. Esto tiene éxito inmediatamente.
|
||||||
|
Ambos Pods van al estado `terminating` al mismo tiempo.
|
||||||
|
Pone al clúster en el siguiente estado:
|
||||||
|
|
||||||
|
| nodo-1 *draining* | nodo-2 | nodo-3 |
|
||||||
|
|:--------------------:|:-------------------:|:------------------:|
|
||||||
|
| pod-a *terminating* | pod-b *available* | pod-c *available* |
|
||||||
|
| pod-x *terminating* | | |
|
||||||
|
|
||||||
|
El Deployment detecta que uno de los Pods esta terminando, entonces crea un reemplazo
|
||||||
|
llamado `pod-d`. Como el `nodo-1` esta bloqueado, el pod termina en otro nodo. Algo más, adicionalmente
|
||||||
|
a creado el pod `pod-y` como un reemplazo del `pod-x` .
|
||||||
|
|
||||||
|
(Nota: para un StatefulSet, `pod-a`, el cual debería ser llamado algo como `pod-0`, necesitaría ser terminado completamente antes de su remplazo, el cual también es llamado `pod-0` pero tiene un UID diferente, podría ser creado. De lo contrario, el ejemplo también aplica a un StatefulSet.)
|
||||||
|
|
||||||
|
Ahora el clúster esta en este estado:
|
||||||
|
|
||||||
|
| nodo-1 *draining* | nodo-2 | nodo-3 |
|
||||||
|
|:--------------------:|:-------------------:|:------------------:|
|
||||||
|
| pod-a *terminating* | pod-b *available* | pod-c *available* |
|
||||||
|
| pod-x *terminating* | pod-d *starting* | pod-y |
|
||||||
|
|
||||||
|
En algún punto, los Pods finalizan y el clúster se ve de esta forma:
|
||||||
|
|
||||||
|
| nodo-1 *drained* | nodo-2 | nodo-3 |
|
||||||
|
|:--------------------:|:-------------------:|:------------------:|
|
||||||
|
| | pod-b *available* | pod-c *available* |
|
||||||
|
| | pod-d *starting* | pod-y |
|
||||||
|
|
||||||
|
En este estado, si un administrador del clúster impaciente intenta desalojar el `nodo-2` ó el
|
||||||
|
`nodo-3`, el comando drain va a ser bloqueado, porque hay solamente 2 Pods disponibles para
|
||||||
|
el Deployment y el PDB requiere por lo menos 2. Después de pasado un tiempo el `pod-d` esta disponible.
|
||||||
|
|
||||||
|
El estado del clúster ahora se ve así:
|
||||||
|
|
||||||
|
| nodo-1 *drained* | nodo-2 | nodo-3 |
|
||||||
|
|:--------------------:|:-------------------:|:------------------:|
|
||||||
|
| | pod-b *available* | pod-c *available* |
|
||||||
|
| | pod-d *available* | pod-y |
|
||||||
|
|
||||||
|
Ahora, el administrador del clúster desaloja el `nodo-2`.
|
||||||
|
El comando drain tratará de desalojar a los 2 Pods con algún orden, digamos
|
||||||
|
primero el `pod-b` y después el `pod-d`. Va a tener éxito en quitar el `pod-b`.
|
||||||
|
Pero cuando intente desalojar al `pod-d`, va a ser rechazado porque esto va a dejar
|
||||||
|
un Pod solamente disponible para el Deployment.
|
||||||
|
|
||||||
|
El Deployment crea un reemplazo para el `pod-b` llamado `pod-e`.
|
||||||
|
Porque no hay recursos suficientes disponibles en el clúster para programar
|
||||||
|
el `pod-e` el desalojo será bloqueado nuevamente. El clúster va a terminar en este
|
||||||
|
estado:
|
||||||
|
|
||||||
|
| nodo-1 *drained* | nodo-2 | nodo-3 | *no node* |
|
||||||
|
|:--------------------:|:-------------------:|:------------------:|:------------------:|
|
||||||
|
| | pod-b *terminating* | pod-c *available* | pod-e *pending* |
|
||||||
|
| | pod-d *available* | pod-y | |
|
||||||
|
|
||||||
|
Ahora, el administrador del clúster necesita
|
||||||
|
agregar un nuevo nodo en el clúster para continuar con la actualización.
|
||||||
|
|
||||||
|
Usted puede ver como Kubernetes varia la tasa a la que las interrupciones
|
||||||
|
pueden suceder, en función de:
|
||||||
|
|
||||||
|
- cuantas réplicas una aplicación necesita
|
||||||
|
- cuanto toma apagar una instancia de manera correcta
|
||||||
|
- cuanto tiempo toma que una nueva instancia inicie
|
||||||
|
- el tipo de controlador
|
||||||
|
- la capacidad de recursos del clúster
|
||||||
|
|
||||||
|
## Separando al dueño del Clúster y los roles de dueños de la Aplicación
|
||||||
|
|
||||||
|
Muchas veces es útil pensar en el Administrador del Clúster
|
||||||
|
y al dueño de la aplicación como roles separados con conocimiento limitado
|
||||||
|
el uno del otro. Esta separación de responsabilidades
|
||||||
|
puede tener sentido en estos escenarios:
|
||||||
|
|
||||||
|
- Cuando hay muchas equipos con aplicaciones compartiendo un clúster de Kubernetes y
|
||||||
|
hay una especialización natural de roles
|
||||||
|
- Cuando una herramienta de terceros o servicio es usado para automatizar el control del clúster
|
||||||
|
|
||||||
|
El presupuesto de interrupción de Pods soporta esta separación de roles, ofreciendo
|
||||||
|
una interfaz entre los roles.
|
||||||
|
|
||||||
|
Si no se tiene tal separación de responsabilidades en la organización,
|
||||||
|
posiblemente no se necesite el Presupuesto de Interrupción de Pods.
|
||||||
|
|
||||||
|
## Como realizar Acciones Disruptivas en el Clúster
|
||||||
|
|
||||||
|
Si usted es el Administrador del Clúster y necesita realizar una acción disruptiva en todos
|
||||||
|
los nodos en el clúster, como ser una actualización de nodo o de software, estas son algunas de las opciones:
|
||||||
|
|
||||||
|
- Aceptar el tiempo sin funcionar mientras dura la actualización.
|
||||||
|
- Conmutar a otra replica completa del clúster.
|
||||||
|
- No hay tiempo sin funcionar, pero puede ser costoso tener duplicados los nodos
|
||||||
|
y tambien un esfuerzo humano para orquestar dicho cambio.
|
||||||
|
- Escribir la toleracia a la falla de la aplicación y usar PDBs.
|
||||||
|
- No hay tiempo sin funcionar.
|
||||||
|
- Duplicación de recursos mínimo.
|
||||||
|
- Permite mucha más automatización de la administración del clúster.
|
||||||
|
- Escribir aplicaciones que tengan tolerancia a fallas es complicado, pero el trabajo para tolerar interrupciones
|
||||||
|
involuntarias, largamente se sobrepone con el trabajo que es dar soporte a autoescalamientos y tolerar
|
||||||
|
interrupciones involuntarias.
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
## {{% heading "whatsnext" %}}
|
||||||
|
|
||||||
|
|
||||||
|
* Siga los pasos para proteger su aplicación con [configurar el Presupuesto de Interrupciones de Pods](/docs/tasks/run-application/configure-pdb/).
|
||||||
|
|
||||||
|
* Aprenda más sobre [desalojar nodos](/docs/tasks/administer-clúster/safely-drain-node/)
|
||||||
|
|
||||||
|
* Aprenda sobre [actualizar un Deployment](/docs/concepts/workloads/controllers/deployment/#updating-a-deployment)
|
||||||
|
incluyendo los pasos para mantener su disponibilidad mientras dura la actualización.
|
||||||
|
|
|
@ -0,0 +1,197 @@
|
||||||
|
---
|
||||||
|
title: クラスターのセキュリティ
|
||||||
|
content_type: task
|
||||||
|
---
|
||||||
|
|
||||||
|
<!-- overview -->
|
||||||
|
|
||||||
|
このドキュメントでは、偶発的または悪意のあるアクセスからクラスターを保護するためのトピックについて説明します。
|
||||||
|
また、全体的なセキュリティに関する推奨事項を提供します。
|
||||||
|
|
||||||
|
|
||||||
|
## {{% heading "prerequisites" %}}
|
||||||
|
|
||||||
|
|
||||||
|
* {{< include "task-tutorial-prereqs.md" >}} {{< version-check >}}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
<!-- steps -->
|
||||||
|
|
||||||
|
## Kubernetes APIへのアクセスの制御
|
||||||
|
|
||||||
|
Kubernetesは完全にAPI駆動であるため、誰がクラスターにアクセスできるか、どのようなアクションを実行できるかを制御・制限することが第一の防御策となります。
|
||||||
|
|
||||||
|
### すべてのAPIトラフィックにTLS(Transport Layer Security)を使用する
|
||||||
|
|
||||||
|
Kubernetesは、クラスター内のすべてのAPI通信がデフォルトでTLSにより暗号化されていることを期待しており、大半のインストール方法では、必要な証明書を作成してクラスターコンポーネントに配布することができます。
|
||||||
|
|
||||||
|
コンポーネントやインストール方法によっては、HTTP上のローカルポートを有効にする場合があることに注意してください。管理者は、潜在的に保護されていないトラフィックを特定するために、各コンポーネントの設定に精通している必要があります。
|
||||||
|
|
||||||
|
### APIの認証
|
||||||
|
|
||||||
|
クラスターのインストール時に、共通のアクセスパターンに合わせて、APIサーバーが使用する認証メカニズムを選択します。
|
||||||
|
例えば、シングルユーザーの小規模なクラスターでは、シンプルな証明書や静的なBearerトークンを使用することができます。
|
||||||
|
大規模なクラスターでは、ユーザーをグループに細分化できる既存のOIDCまたはLDAPサーバーを統合することができます。
|
||||||
|
|
||||||
|
ノード、プロキシー、スケジューラー、ボリュームプラグインなど、インフラの一部であるものも含めて、すべてのAPIクライアントを認証する必要があります。
|
||||||
|
これらのクライアントは通常、[service accounts](/docs/reference/access-authn-authz/service-accounts-admin/)であるか、またはx509クライアント証明書を使用しており、クラスター起動時に自動的に作成されるか、クラスターインストールの一部として設定されます。
|
||||||
|
|
||||||
|
詳細については、[認証](/ja/docs/reference/access-authn-authz/authentication/)を参照してください。
|
||||||
|
|
||||||
|
### APIの認可
|
||||||
|
|
||||||
|
認証されると、すべてのAPIコールは認可チェックを通過することになります。
|
||||||
|
|
||||||
|
Kubernetesには、統合された[RBAC](/ja/docs/reference/access-authn-authz/rbac/)コンポーネントが搭載されており、入力されたユーザーやグループを、ロールにまとめられたパーミッションのセットにマッチさせます。
|
||||||
|
これらのパーミッションは、動詞(get, create, delete)とリソース(pods, services, nodes)を組み合わせたもので、ネームスペース・スコープまたはクラスター・スコープに対応しています。
|
||||||
|
すぐに使えるロールのセットが提供されており、クライアントが実行したいアクションに応じて、デフォルトで適切な責任の分離を提供します。
|
||||||
|
|
||||||
|
[Node](/docs/reference/access-authn-authz/node/)と[RBAC](/ja/docs/reference/access-authn-authz/rbac/)の承認者は、[NodeRestriction](/docs/reference/access-authn-authz/admission-controllers/#noderestriction)のアドミッションプラグインと組み合わせて使用することをお勧めします。
|
||||||
|
|
||||||
|
認証の場合と同様に、小規模なクラスターにはシンプルで幅広い役割が適切かもしれません。
|
||||||
|
しかし、より多くのユーザーがクラスターに関わるようになるとチームを別の名前空間に分け、より限定的な役割を持たせることが必要になるかもしれません。
|
||||||
|
認可においては、あるオブジェクトの更新が、他の場所でどのようなアクションを起こすかを理解することが重要です。
|
||||||
|
|
||||||
|
たとえば、ユーザーは直接Podを作成することはできませんが、ユーザーに代わってPodを作成するDeploymentの作成を許可することで、間接的にそれらのPodを作成することができます。
|
||||||
|
同様に、APIからノードを削除すると、そのノードにスケジューリングされていたPodが終了し、他のノードに再作成されます。
|
||||||
|
すぐに使えるロールは、柔軟性と一般的なユースケースのバランスを表していますが、より限定的なロールは、偶発的なエスカレーションを防ぐために慎重に検討する必要があります。
|
||||||
|
すぐに使えるロールがニーズを満たさない場合は、ユースケースに合わせてロールを作成することができます。
|
||||||
|
|
||||||
|
詳しくは[authorization reference section](/docs/reference/access-authn-authz/authorization/)に参照してください。
|
||||||
|
|
||||||
|
## Kubeletへのアクセスの制御
|
||||||
|
|
||||||
|
Kubeletsは、ノードやコンテナの強力な制御を可能にするHTTPSエンドポイントを公開しています。
|
||||||
|
デフォルトでは、KubeletsはこのAPIへの認証されていないアクセスを許可しています。
|
||||||
|
|
||||||
|
本番環境のクラスターでは、Kubeletの認証と認可を有効にする必要があります。
|
||||||
|
|
||||||
|
詳細は、[Kubelet 認証/認可](/ja/docs/reference/command-line-tools-reference/kubelet-authentication-authorization)に参照してください。
|
||||||
|
|
||||||
|
## ワークロードやユーザーのキャパシティーを実行時に制御
|
||||||
|
|
||||||
|
Kubernetesにおける権限付与は、意図的にハイレベルであり、リソースに対する粗いアクションに焦点を当てています。
|
||||||
|
|
||||||
|
より強力なコントロールは**policies**として存在し、それらのオブジェクトがクラスタや自身、その他のリソースにどのように作用するかをユースケースによって制限します。
|
||||||
|
|
||||||
|
### クラスターのリソース使用量の制限
|
||||||
|
|
||||||
|
[リソースクォータ](/ja/docs/concepts/policy/resource-quotas/)は、ネームスペースに付与されるリソースの数や容量を制限するものです。
|
||||||
|
|
||||||
|
これは、ネームスペースが割り当てることのできるCPU、メモリー、永続的なディスクの量を制限するためによく使われますが、各ネームスペースに存在するPod、サービス、ボリュームの数を制御することもできます。
|
||||||
|
|
||||||
|
[Limit ranges](/docs/tasks/administer-cluster/manage-resources/memory-default-namespace/)は、上記のリソースの一部の最大または最小サイズを制限することで、ユーザーがメモリーなどの一般的に予約されたリソースに対して不当に高いまたは低い値を要求するのを防いだり、何も指定されていない場合にデフォルトの制限を提供したりします。
|
||||||
|
|
||||||
|
### コンテナが利用する特権の制御
|
||||||
|
|
||||||
|
Podの定義には、[security context](/docs/tasks/configure-pod-container/security-context/)が含まれており、ノード上の特定の Linux ユーザー(rootなど)として実行するためのアクセス、特権的に実行するためのアクセス、ホストネットワークにアクセスするためのアクセス、その他の制御を要求することができます。
|
||||||
|
[Pod security policies](/docs/concepts/policy/pod-security-policy/)は、危険なセキュリティコンテキスト設定を提供できるユーザーやサービスアカウントを制限することができます。
|
||||||
|
|
||||||
|
たとえば、Podのセキュリティポリシーでは、ボリュームマウント、特に`hostPath`を制限することができ、これはPodの制御すべき側面です。
|
||||||
|
一般に、ほとんどのアプリケーションワークロードでは、ホストリソースへのアクセスを制限する必要があります。
|
||||||
|
ホスト情報にアクセスすることなく、ルートプロセス(uid 0)として正常に実行できます。
|
||||||
|
ただし、ルートユーザーに関連する権限を考慮して、非ルートユーザーとして実行するようにアプリケーションコンテナを記述する必要があります。
|
||||||
|
|
||||||
|
## コンテナが不要なカーネルモジュールをロードしないようにします
|
||||||
|
|
||||||
|
Linuxカーネルは、ハードウェアが接続されたときやファイルシステムがマウントされたときなど、特定の状況下で必要となるカーネルモジュールをディスクから自動的にロードします。
|
||||||
|
特にKubernetesでは、非特権プロセスであっても、適切なタイプのソケットを作成するだけで、特定のネットワークプロトコル関連のカーネルモジュールをロードさせることができます。これにより、管理者が使用されていないと思い込んでいるカーネルモジュールのセキュリティホールを攻撃者が利用できる可能性があります。
|
||||||
|
特定のモジュールが自動的にロードされないようにするには、そのモジュールをノードからアンインストールしたり、ルールを追加してブロックしたりします。
|
||||||
|
|
||||||
|
ほとんどのLinuxディストリビューションでは、`/etc/modprobe.d/kubernetes-blacklist.conf`のような内容のファイルを作成することで実現できます。
|
||||||
|
|
||||||
|
```
|
||||||
|
# DCCPは必要性が低く、複数の深刻な脆弱性があり、保守も十分ではありません。
|
||||||
|
blacklist dccp
|
||||||
|
|
||||||
|
# SCTPはほとんどのKubernetesクラスタでは使用されておらず、また過去には脆弱性がありました。
|
||||||
|
blacklist sctp
|
||||||
|
```
|
||||||
|
|
||||||
|
モジュールのロードをより一般的にブロックするには、SELinuxなどのLinuxセキュリティモジュールを使って、コンテナに対する `module_request`権限を完全に拒否し、いかなる状況下でもカーネルがコンテナ用のモジュールをロードできないようにすることができます。
|
||||||
|
(Podは、手動でロードされたモジュールや、より高い権限を持つプロセスに代わってカーネルがロードしたモジュールを使用することはできます)。
|
||||||
|
|
||||||
|
|
||||||
|
### ネットワークアクセスの制限
|
||||||
|
|
||||||
|
名前空間の[ネットワークポリシー](/ja/docs/tasks/administer-cluster/declare-network-policy/)により、アプリケーション作成者は、他の名前空間のPodが自分の名前空間内のPodやポートにアクセスすることを制限することができます。
|
||||||
|
|
||||||
|
サポートされている[Kubernetes networking providers](/ja/docs/concepts/cluster-administration/networking/)の多くは、ネットワークポリシーを尊重するようになりました。
|
||||||
|
クォータやリミットの範囲は、ユーザーがノードポートや負荷分散サービスを要求するかどうかを制御するためにも使用でき、多くのクラスターでは、ユーザーのアプリケーションがクラスターの外で見えるかどうかを制御できます。
|
||||||
|
ノードごとのファイアウォール、クロストークを防ぐための物理的なクラスタノードの分離、高度なネットワークポリシーなど、プラグインや環境ごとにネットワークルールを制御する追加の保護機能が利用できる場合もあります。
|
||||||
|
|
||||||
|
### クラウドメタデータのAPIアクセスを制限
|
||||||
|
|
||||||
|
クラウドプラットフォーム(AWS、Azure、GCEなど)では、しばしばメタデータサービスをインスタンスローカルに公開しています。
|
||||||
|
デフォルトでは、これらのAPIはインスタンス上で実行されているPodからアクセスでき、そのノードのクラウド認証情報や、kubelet認証情報などのプロビジョニングデータを含むことができます。
|
||||||
|
これらの認証情報は、クラスター内でのエスカレーションや、同じアカウントの他のクラウドサービスへのエスカレーションに使用できます。
|
||||||
|
|
||||||
|
クラウドプラットフォーム上でKubernetesを実行する場合は、インスタンスの認証情報に与えられるパーミッションを制限し、[ネットワークポリシー](/ja/docs/tasks/administer-cluster/declare-network-policy/)を使用してメタデータAPIへのPodのアクセスを制限し、プロビジョニングデータを使用してシークレットを配信することは避けてください。
|
||||||
|
|
||||||
|
### Podのアクセス可能ノードを制御
|
||||||
|
|
||||||
|
デフォルトでは、どのノードがPodを実行できるかについての制限はありません。
|
||||||
|
Kubernetesは、エンドユーザーが利用できる[Node上へのPodのスケジューリング](/ja/docs/concepts/scheduling-eviction/assign-pod-node/)と[TaintとToleration](/ja/docs/concepts/scheduling-eviction/taint-and-toleration/)を提供します。
|
||||||
|
多くのクラスターでは、ワークロードを分離するためにこれらのポリシーを使用することは、作者が採用したり、ツールを使って強制したりする慣習になっています。
|
||||||
|
|
||||||
|
管理者としては、ベータ版のアドミッションプラグイン「PodNodeSelector」を使用して、ネームスペース内のPodをデフォルトまたは特定のノードセレクタを必要とするように強制することができます。
|
||||||
|
エンドユーザーがネームスペースを変更できない場合は、特定のワークロード内のすべてのPodの配置を強く制限することができます。
|
||||||
|
|
||||||
|
## クラスターのコンポーネントの保護
|
||||||
|
|
||||||
|
このセクションでは、クラスターを危険から守るための一般的なパターンを説明します。
|
||||||
|
|
||||||
|
### etcdへのアクセスの制限
|
||||||
|
|
||||||
|
API用のetcdバックエンドへの書き込みアクセスは、クラスタ全体のrootを取得するのと同等であり、読み取りアクセスはかなり迅速にエスカレートするために使用できます。
|
||||||
|
管理者は、TLSクライアント証明書による相互認証など、APIサーバーからetcdサーバーへの強力な認証情報を常に使用すべきであり、API サーバーのみがアクセスできるファイアウォールの後ろにetcdサーバーを隔離することがしばしば推奨されます。
|
||||||
|
|
||||||
|
{{< caution >}}
|
||||||
|
クラスター内の他のコンポーネントが、完全なキースペースへの読み取りまたは書き込みアクセスを持つマスターetcdインスタンスへのアクセスを許可することは、クラスター管理者のアクセスを許可することと同じです。
|
||||||
|
マスター以外のコンポーネントに別のetcdインスタンスを使用するか、またはetcd ACLを使用してキースペースのサブセットへの読み取りおよび書き込みアクセスを制限することを強く推奨します。
|
||||||
|
{{< /caution >}}
|
||||||
|
|
||||||
|
### 監査ログの有効
|
||||||
|
|
||||||
|
[audit logger](/docs/tasks/debug-application-cluster/audit/)はベータ版の機能で、APIによって行われたアクションを記録し、侵害があった場合に後から分析できるようにするものです。
|
||||||
|
|
||||||
|
監査ログを有効にして、ログファイルを安全なサーバーにアーカイブすることをお勧めします。
|
||||||
|
|
||||||
|
### アルファまたはベータ機能へのアクセスの制限
|
||||||
|
|
||||||
|
アルファ版およびベータ版のKubernetesの機能は活発に開発が行われており、セキュリティ上の脆弱性をもたらす制限やバグがある可能性があります。
|
||||||
|
常に、アルファ版またはベータ版の機能が提供する価値と、セキュリティ体制に起こりうるリスクを比較して評価してください。
|
||||||
|
疑問がある場合は、使用しない機能を無効にしてください。
|
||||||
|
|
||||||
|
### インフラの認証情報を頻繁に交換
|
||||||
|
|
||||||
|
秘密やクレデンシャルの有効期間が短いほど、攻撃者がそのクレデンシャルを利用することは難しくなります。
|
||||||
|
証明書の有効期間を短く設定し、そのローテーションを自動化します。
|
||||||
|
発行されたトークンの利用可能期間を制御できる認証プロバイダーを使用し、可能な限り短いライフタイムを使用します。
|
||||||
|
外部統合でサービス・アカウント・トークンを使用する場合、これらのトークンを頻繁にローテーションすることを計画します。
|
||||||
|
例えば、ブートストラップ・フェーズが完了したら、ノードのセットアップに使用したブートストラップ・トークンを失効させるか、その認証を解除する必要があります。
|
||||||
|
|
||||||
|
### サードパーティの統合を有効にする前に確認
|
||||||
|
|
||||||
|
Kubernetesへの多くのサードパーティの統合は、クラスターのセキュリティプロファイルを変更する可能性があります。
|
||||||
|
統合を有効にする際には、アクセスを許可する前に、拡張機能が要求するパーミッションを常に確認してください。
|
||||||
|
|
||||||
|
例えば、多くのセキュリティ統合は、事実上そのコンポーネントをクラスター管理者にしているクラスター上のすべての秘密を見るためのアクセスを要求するかもしれません。
|
||||||
|
疑問がある場合は、可能な限り単一の名前空間で機能するように統合を制限してください。
|
||||||
|
Podを作成するコンポーネントも、`kube-system`名前空間のような名前空間内で行うことができれば、予想外に強力になる可能性があります。これは、サービスアカウントのシークレットにアクセスしたり、サービスアカウントに寛容な[pod security policies](/docs/concepts/policy/pod-security-policy/)へのアクセスが許可されている場合に、昇格したパーミッションでPodが実行される可能性があるからです。
|
||||||
|
|
||||||
|
### etcdにあるSecretを暗号化
|
||||||
|
|
||||||
|
一般的に、etcdデータベースにはKubernetes APIを介してアクセス可能なあらゆる情報が含まれており、クラスターの状態に対する大きな可視性を攻撃者へ与える可能性があります。
|
||||||
|
よく吟味されたバックアップおよび暗号化ソリューションを使用して、常にバックアップを暗号化し、可能な場合はフルディスク暗号化の使用を検討してください。
|
||||||
|
|
||||||
|
Kubernetesは1.7で導入された機能である[encryption at rest](/docs/tasks/administer-cluster/encrypt-data/)をサポートしており、これは1.13からはベータ版となっています。
|
||||||
|
これは、etcdの`Secret`リソースを暗号化し、etcdのバックアップにアクセスした人が、それらのシークレットの内容を見ることを防ぎます。
|
||||||
|
この機能は現在ベータ版ですが、バックアップが暗号化されていない場合や、攻撃者がetcdへの読み取りアクセスを得た場合に、追加の防御レベルを提供します。
|
||||||
|
|
||||||
|
### セキュリティアップデートのアラートの受信と脆弱性の報告
|
||||||
|
|
||||||
|
[kubernetes-announce](https://groups.google.com/forum/#!forum/kubernetes-announce)に参加してください。
|
||||||
|
グループに参加すると、セキュリティアナウンスに関するメールを受け取ることができます。
|
||||||
|
脆弱性の報告方法については、[security reporting](/docs/reference/issues-security/security/)ページを参照してください。
|
|
@ -0,0 +1,872 @@
|
||||||
|
---
|
||||||
|
title: Gerenciamento de recursos em Pods e contêineres
|
||||||
|
content_type: concept
|
||||||
|
weight: 40
|
||||||
|
feature:
|
||||||
|
title: Empacotamento automático
|
||||||
|
description: >
|
||||||
|
Distribui contêineres automaticamente com base em requerimentos de recursos
|
||||||
|
e em outras restrições, evitando sacrificar disponibilidade.
|
||||||
|
Combina cargas de trabalho críticas com cargas de trabalho de prioridades
|
||||||
|
mais baixas para melhorar a utilização e reduzir o desperdício de recursos.
|
||||||
|
---
|
||||||
|
|
||||||
|
<!-- overview -->
|
||||||
|
|
||||||
|
Ao criar a especificação de um {{< glossary_tooltip term_id="pod" >}}, você pode
|
||||||
|
opcionalmente especificar quanto de cada recurso um {{< glossary_tooltip text="contêiner" term_id="container" >}}
|
||||||
|
precisa. Os recursos mais comuns a serem especificados são CPU e memória (RAM);
|
||||||
|
há outros recursos que podem ser especificados.
|
||||||
|
|
||||||
|
Quando você especifica o _requerimento_ de recursos em um Pod, o
|
||||||
|
{{< glossary_tooltip text="kube-scheduler" term_id="kube-scheduler" >}} utiliza
|
||||||
|
esta informação para decidir a qual nó o Pod será atribuído. Quando você
|
||||||
|
especifica um _limite_ de recurso para um contêiner, o kubelet garante o
|
||||||
|
cumprimento de tais limites, de modo que o contêiner em execução não consiga
|
||||||
|
utilizar uma quantidade de tal recurso além do limite especificado. O kubelet
|
||||||
|
também reserva pelo menos o _requerimento_ daquele recurso de sistema
|
||||||
|
especificamente para que este contêiner utilize.
|
||||||
|
|
||||||
|
<!-- body -->
|
||||||
|
|
||||||
|
## Requerimentos e limites
|
||||||
|
|
||||||
|
Se o nó em que um Pod está rodando tem o suficiente de um recurso específico
|
||||||
|
disponível, é possível (e permitido) a um contêiner utilizar mais do que o seu
|
||||||
|
`request` para aquele recurso especifica. No entanto, não é permitido a um
|
||||||
|
contêiner consumir mais do que o seu `limit` para um recurso.
|
||||||
|
|
||||||
|
Por exemplo, se você especificar um requerimento de `memory` de 256 MiB para um
|
||||||
|
contêiner, e aquele contêiner está em um Pod atribuído a um nó com 8GiB de
|
||||||
|
memória, sem outros Pods, então este contêiner pode tentar consumir mais memória
|
||||||
|
RAM.
|
||||||
|
|
||||||
|
Se você especificar um limite de `memory` de 4GiB para aquele contêiner, o
|
||||||
|
kubelet (e o
|
||||||
|
{{< glossary_tooltip text="agente de execução de contêiner" term_id="container-runtime" >}})
|
||||||
|
vão garantir o cumprimento do limite. O agente de execução impede que o contêiner
|
||||||
|
utilize mais de um recurso do que seu limite configurado. Por exemplo, quando
|
||||||
|
um processo no contêiner tenta consumir mais que o limite permitido de memória,
|
||||||
|
o núcleo do sistema encerra o processo que tentou efetuar a alocação de memória
|
||||||
|
com um erro de memória esgotada (_out of memory (OOM) error_).
|
||||||
|
|
||||||
|
Limites podem ser implementados de forma reativa (o sistema intervém quando
|
||||||
|
uma violação ocorre) ou por garantia (o sistema previne o contêiner de exceder
|
||||||
|
o limite). Diferentes agentes de execução implementam as mesmas restrições de
|
||||||
|
maneiras diferentes.
|
||||||
|
|
||||||
|
{{< note >}}
|
||||||
|
Se um contêiner especifica seu próprio limite de memória, mas não especifica seu
|
||||||
|
requerimento de memória, o Kubernetes automaticamente cria um requerimento de
|
||||||
|
memória com o mesmo valor do limite. A mesma regra vale para o limite de CPU:
|
||||||
|
quando não há requerimento de CPU, o Kubernetes automaticamente cria um
|
||||||
|
requerimento de CPU idêntico ao limite.
|
||||||
|
{{< /note >}}
|
||||||
|
|
||||||
|
## Tipos de recursos
|
||||||
|
|
||||||
|
_CPU_ e _memória_ são _tipos de recursos_. Um tipo de recurso possui uma unidade
|
||||||
|
básica. CPU representa processamento computacional e é especificada em unidades
|
||||||
|
de [CPU do Kubernetes](#meaning-of-cpu).
|
||||||
|
Memória é especificada em bytes. Em cargas de trabalho Linux, você pode
|
||||||
|
especificar o recurso _huge pages_. _Huge pages_ são uma funcionalidade
|
||||||
|
específica do Linux que permite ao núcleo do sistema operacional alocar
|
||||||
|
blocos de memória muito maiores que o tamanho de página de memória padrão.
|
||||||
|
|
||||||
|
Por exemplo, em um sistema onde o tamanho da página de memória padrão é de 4 KiB,
|
||||||
|
você pode especificar um limite `hugepages-2Mi: 80Mi`. Se o contêiner tentar
|
||||||
|
alocar mais de 40 _huge pages_ de 2 MiB cada, ou um total de 80 MiB, essa
|
||||||
|
alocação irá falhar.
|
||||||
|
|
||||||
|
{{< note >}}
|
||||||
|
Você não pode superdimensionar (ou solicitar acima do limite físico) recursos do
|
||||||
|
tipo `hugepages-*`.
|
||||||
|
O recurso `hugepages-*` difere dos recursos `memory` e `cpu` neste aspecto.
|
||||||
|
{{< /note >}}
|
||||||
|
|
||||||
|
CPU e memória são chamados coletivamente de _recursos computacionais_, ou apenas
|
||||||
|
_recursos_. Recursos computacionais são quantidades mensuráveis que podem ser
|
||||||
|
requisitadas, alocadas, e consumidas. Estes recursos diferem dos
|
||||||
|
[recursos de API](/docs/concepts/overview/kubernetes-api/). Recursos de API,
|
||||||
|
como Pods e [Services](/docs/concepts/services-networking/service/) são objetos
|
||||||
|
que podem ser lidos e modificados através do servidor da API do Kubernetes.
|
||||||
|
|
||||||
|
## Requerimentos de recursos e limites de Pod e contêiner
|
||||||
|
|
||||||
|
Para cada contêiner, você pode especificar limites e requerimentos de recursos,
|
||||||
|
incluindo os seguintes recursos:
|
||||||
|
|
||||||
|
* `spec.containers[].resources.limits.cpu`
|
||||||
|
* `spec.containers[].resources.limits.memory`
|
||||||
|
* `spec.containers[].resources.limits.hugepages-<size>`
|
||||||
|
* `spec.containers[].resources.requests.cpu`
|
||||||
|
* `spec.containers[].resources.requests.memory`
|
||||||
|
* `spec.containers[].resources.requests.hugepages-<size>`
|
||||||
|
|
||||||
|
Embora você possa especificar apenas requerimentos e limites para contêineres
|
||||||
|
individuais, é útil também pensar sobre os requerimentos e limites gerais de um
|
||||||
|
Pod.
|
||||||
|
Para um recurso em particular, um _requerimento ou limite de recurso de um Pod_
|
||||||
|
é a soma de todos os valores dos requerimentos ou limites de um recurso daquele
|
||||||
|
tipo, especificados em cada um dos contêineres daquele Pod.
|
||||||
|
|
||||||
|
## Unidades de recursos no Kubernetes
|
||||||
|
|
||||||
|
### Unidades de recurso de CPU {#meaning-of-cpu}
|
||||||
|
|
||||||
|
Limites e requerimentos de recursos de CPU são mensurados em unidades de _cpu_.
|
||||||
|
No Kubernetes, uma unidade de CPU é equivalente a **um núcleo físico de CPU**,
|
||||||
|
ou **um núcleo virtual**, dependendo se o nó é uma máquina física ou uma máquina
|
||||||
|
virtual rodando em uma máquina física.
|
||||||
|
|
||||||
|
Requerimentos fracionários são permitidos. Quando você define um contêiner cujo
|
||||||
|
valor do campo `spec.containers[].resources.requests.cpu` é `0.5`, você está
|
||||||
|
solicitando metade da quantidade de CPU que teria sido solicitada caso o valor
|
||||||
|
fosse `1.0`.
|
||||||
|
No caso de unidades de recurso de CPU, a expressão de
|
||||||
|
[quantidade](/docs/reference/kubernetes-api/common-definitions/quantity/) `0.1`
|
||||||
|
é equivalente à expressão `100m`, que pode ser lida como "cem milicpus", ou
|
||||||
|
"cem milinúcleos". "Milicpu" ou "milinúcleo" equivalem à milésima parte de um
|
||||||
|
núcleo ou CPU, de modo que "100m" equivalem a 10% do tempo computacional de um
|
||||||
|
processador.
|
||||||
|
|
||||||
|
Recursos de CPU são sempre especificados como uma quantidade absoluta de recurso,
|
||||||
|
nunca como uma quantidade relativa. Por exemplo, `500m` de CPU representam
|
||||||
|
grosseiramente a mesma quantidade de poder computacional, independentemente do
|
||||||
|
contêiner rodar em uma máquina com processador de núcleo único, de dois núcleos
|
||||||
|
ou de 48 núcleos.
|
||||||
|
|
||||||
|
{{< note >}}
|
||||||
|
O Kubernetes não permite que você especifique recursos de CPU com uma precisão
|
||||||
|
maior que `1m`. Devido a isso, é útil especificar unidades de CPU menores do que
|
||||||
|
`1.0` ou `1000m` utilizando a notação de milicpu. Por exemplo, `5m` ao invés de
|
||||||
|
`0.005`.
|
||||||
|
{{< /note >}}
|
||||||
|
|
||||||
|
### Unidades de recurso de memória {#meaning-of-memory}
|
||||||
|
|
||||||
|
Limites e requerimentos de `memory` são medidos em bytes. Você pode expressar
|
||||||
|
memória como um número inteiro ou como um número de ponto fixo, utilizando um
|
||||||
|
destes sufixos de
|
||||||
|
[quantidade](/docs/reference/kubernetes-api/common-definitions/quantity/):
|
||||||
|
E, P, T, G, M, k. Você também pode utilizar os equivalentes de potência de dois:
|
||||||
|
Ei, Pi, Ti, Gi, Mi, Ki. Por exemplo, as quantidades abaixo representam, a grosso
|
||||||
|
modo, o mesmo valor:
|
||||||
|
|
||||||
|
```shell
|
||||||
|
128974848, 129e6, 129M, 128974848000m, 123Mi
|
||||||
|
```
|
||||||
|
|
||||||
|
Tome cuidado com os sufixos. Se você solicitar `400m` de memória, esta
|
||||||
|
quantidade estará de fato requerendo o equivalente a 0,4 byte de memória. A
|
||||||
|
intenção da pessoa que fez esta requisição provavelmente era solictar 400
|
||||||
|
mebibytes (`400Mi`) ou 400 megabytes (`400M`).
|
||||||
|
|
||||||
|
## Exemplo de recursos de contêiner {#example-1}
|
||||||
|
|
||||||
|
O Pod seguinte tem dois contêineres. Ambos os contêineres têm um requerimento de
|
||||||
|
0,25 CPU e 64 MiB (ou 2<sup>26</sup> bytes) de memória. Cada contêiner tem um
|
||||||
|
limite de 0,5 CPU e 128 MiB de memória. Você pode dizer que o Pod tem um
|
||||||
|
requerimento de 0,5 CPU e 128 MiB de memória, e um limite de 1 CPU e 256 MiB de
|
||||||
|
memória.
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
---
|
||||||
|
apiVersion: v1
|
||||||
|
kind: Pod
|
||||||
|
metadata:
|
||||||
|
name: frontend
|
||||||
|
spec:
|
||||||
|
containers:
|
||||||
|
- name: app
|
||||||
|
image: images.my-company.example/app:v4
|
||||||
|
resources:
|
||||||
|
requests:
|
||||||
|
memory: "64Mi"
|
||||||
|
cpu: "250m"
|
||||||
|
limits:
|
||||||
|
memory: "128Mi"
|
||||||
|
cpu: "500m"
|
||||||
|
- name: log-aggregator
|
||||||
|
image: images.my-company.example/log-aggregator:v6
|
||||||
|
resources:
|
||||||
|
requests:
|
||||||
|
memory: "64Mi"
|
||||||
|
cpu: "250m"
|
||||||
|
limits:
|
||||||
|
memory: "128Mi"
|
||||||
|
cpu: "500m"
|
||||||
|
```
|
||||||
|
|
||||||
|
## Como Pods com requerimentos de recursos são agendados
|
||||||
|
|
||||||
|
Quando você cria um Pod, o escalonador do Kubernetes seleciona um nó para que o
|
||||||
|
Pod rode. Cada nó possui uma capacidade máxima para cada um dos tipos de recurso:
|
||||||
|
a quantidade de CPU e memória que o nó pode fornecer aos Pods. O escalonador
|
||||||
|
garante que, para cada tipo de recurso, a soma dos requerimentos de recursos dos
|
||||||
|
contêineres agendados seja menor que a capacidade do nó.
|
||||||
|
Note que, embora o consumo de memória ou CPU real nos nós seja muito baixo, o
|
||||||
|
escalonador ainda irá se recusar a agendar um Pod em um nó se a verificação de
|
||||||
|
capacidade falhar. Isso protege contra a falta de um recurso em um nó quando o
|
||||||
|
consumo de recursos aumenta com o passar do tempo, como por exemplo durante o
|
||||||
|
pico diário de requisições a um serviço.
|
||||||
|
|
||||||
|
## Como o Kubernetes aplica requisições e limites de recursos {#how-pods-with-resource-limits-are-run}
|
||||||
|
|
||||||
|
Quando o kubelet inicia um contêiner como parte de um Pod, o kubelet envia as
|
||||||
|
requisições e limites de memória e de CPU ao agente de execução de contêiner.
|
||||||
|
|
||||||
|
No Linux, o agente de execução de contêiner normalmente configura os
|
||||||
|
{{< glossary_tooltip text="cgroups" term_id="cgroup" >}} que aplicam e garantem
|
||||||
|
os limites que você definiu.
|
||||||
|
|
||||||
|
- O limite de CPU determina um teto de quanto tempo de CPU o contêiner pode
|
||||||
|
utilizar. A cada intervalo de agendamento, o núcleo do sistema operacional do
|
||||||
|
Linux verifica se este limite foi excedido; se este for o caso, o núcleo
|
||||||
|
aguarda antes de permitir que aquele cgroup continue sua execução.
|
||||||
|
- O requerimento de CPU normalmente define um método de balanceamento. Se vários
|
||||||
|
contêineres diferentes (cgroups) querem rodar em um sistema disputado, cargas
|
||||||
|
de trabalho com requerimentos maiores de CPU têm mais tempo de CPU alocado
|
||||||
|
para si do que cargas de trabalho com pequenos requerimentos.
|
||||||
|
- O requerimento de memória é usado principalmente durante o agendamento de um
|
||||||
|
Pod. Em um nó que utiliza cgroups v2, o agente de execução de contêiner pode
|
||||||
|
utilizar o requerimento de memória como uma dica para definir valores para
|
||||||
|
`memory.min` e `memory.low`.
|
||||||
|
- O limite de memória define um limite de memória para aquele cgroup. Se o
|
||||||
|
contêiner tenta alocar mais memória que aquele limite, o subsistema
|
||||||
|
_out-of-memory_ do núcleo do sistema operacional Linux é ativado e,
|
||||||
|
normalmente, intervém encerrando um dos processos do contêiner que tentou
|
||||||
|
alocar mais memória. Se o processo em questão for o PID 1 do contêiner, e o
|
||||||
|
contêiner estiver marcado como reinicializável, então o Kubernetes irá
|
||||||
|
reiniciar o contêiner.
|
||||||
|
- O limite de memória para um Pod ou contêiner é também aplicado a páginas em
|
||||||
|
volumes armazenados em memória, como um `emptyDir`. O kubelet considera
|
||||||
|
sistemas de arquivos `tmpfs` em volumes do tipo `emptyDir` como uso de memória
|
||||||
|
em um contêiner, ao invés de armazenamento efêmero local.
|
||||||
|
|
||||||
|
Se um contêiner exceder seu requerimento de memória e o nó em que esse contêiner
|
||||||
|
está rodando ficar com pouca memória no total, é provável que o Pod a que este
|
||||||
|
contêiner pertence seja {{< glossary_tooltip text="removido" term_id="eviction" >}}.
|
||||||
|
|
||||||
|
A um contêiner pode ou não ser permitido exceder seu limite de CPU por períodos
|
||||||
|
de tempo estendidos. No entanto, agentes de execução de contêiner não encerram
|
||||||
|
Pods por uso excessivo de CPU.
|
||||||
|
|
||||||
|
A fim de determinar se um contêiner não pode ser agendado ou está sendo
|
||||||
|
encerrado devido a limites de recursos, consulte a seção de
|
||||||
|
[solução de problemas](#troubleshooting).
|
||||||
|
|
||||||
|
### Monitorando utilização de recursos computacionais e de memória
|
||||||
|
|
||||||
|
O kubelet relata a utilização de recursos de um Pod como parte do
|
||||||
|
[`status`](/docs/concepts/overview/working-with-objects/kubernetes-objects/#object-spec-and-status)
|
||||||
|
do Pod.
|
||||||
|
|
||||||
|
Se ferramentas opcionais para
|
||||||
|
[monitoramento de recursos](/docs/tasks/debug-application-cluster/resource-usage-monitoring/)
|
||||||
|
estiverem disponíveis em seu cluster, a utilização de recursos de um Pod pode
|
||||||
|
ser verificada diretamente através de
|
||||||
|
[API de métricas](/docs/tasks/debug-application-cluster/resource-metrics-pipeline/#the-metrics-api)
|
||||||
|
ou através das suas ferramentas de monitoramento
|
||||||
|
|
||||||
|
## Armazenamento efêmero local
|
||||||
|
|
||||||
|
<!-- feature gate LocalStorageCapacityIsolation -->
|
||||||
|
{{< feature-state for_k8s_version="v1.10" state="beta" >}}
|
||||||
|
|
||||||
|
Nós possuem armazenamento efêmero local, através de dispositivos de escrita
|
||||||
|
conectados localmente ou através de RAM. "Efêmero" significa que não há garantia
|
||||||
|
de longo termo com relação a durabilidade.
|
||||||
|
|
||||||
|
Pods utilizam armazenamento local efêmero para dados temporários, cache e logs.
|
||||||
|
O kubelet pode fornecer armazenamento temporário a Pods que utilizam
|
||||||
|
armazenamento local efêmero para montar {{< glossary_tooltip term_id="volume" text="volumes" >}}
|
||||||
|
do tipo [`emptyDir`](/docs/concepts/storage/volumes/#emptydir) em contêineres.
|
||||||
|
|
||||||
|
O kubelet também utiliza este tipo de armazenamento para
|
||||||
|
[logs de contêineres a nível de nó](/pt-br/docs/concepts/cluster-administration/logging/#logs-no-nível-do-nó),
|
||||||
|
imagens de contêiner e camadas graváveis de contêineres em execução.
|
||||||
|
|
||||||
|
{{< caution >}}
|
||||||
|
Se um nó falhar, os dados em seu armazenamento efêmero podem ser perdidos.
|
||||||
|
Suas aplicações não devem ter expectativas de cumprimento de SLAs de desempenho
|
||||||
|
(como quantidade de operações de entrada e saída de disco por segundo (IOPS),
|
||||||
|
por exemplo) pelo armazenamento local efêmero.
|
||||||
|
{{< /caution >}}
|
||||||
|
|
||||||
|
Com esta funcionalidade em fase beta, o Kubernetes permite que você rastreie,
|
||||||
|
reserve e limite quanto armazenamento local efêmero um Pod pode consumir.
|
||||||
|
|
||||||
|
### Configurações para armazenamento local efêmero {#configurations-for-local-ephemeral-storage}
|
||||||
|
|
||||||
|
O Kubernetes suporta duas formas de configuração para o armazenamento local
|
||||||
|
efêmero em um nó:
|
||||||
|
|
||||||
|
{{< tabs name="local_storage_configurations" >}}
|
||||||
|
{{% tab name="Sistema de arquivos único" %}}
|
||||||
|
Nesta configuração, você armazena todos os tipos diferentes de dados locais
|
||||||
|
efêmeros (volumes do tipo `emptyDir`, camadas graváveis, imagens de contêiner,
|
||||||
|
logs) em um sistema de arquivos único. A forma mais efetiva de configurar o
|
||||||
|
kubelet é dedicar este sistema de arquivos aos dados do Kubernetes (kubelet).
|
||||||
|
|
||||||
|
O kubelet também escreve
|
||||||
|
[logs de contêiner a nível de nó](/pt-br/docs/concepts/cluster-administration/logging/#logs-no-nível-do-nó)
|
||||||
|
e trata estes logs de maneira semelhante ao armazenamento efêmero local.
|
||||||
|
|
||||||
|
O kubelet escreve logs em arquivos dentro do seu diretório de log configurado
|
||||||
|
(`/var/log` por padrão) e possui um diretório base para outros dados armazenados
|
||||||
|
localmente (`/var/lib/kubelet` por padrão).
|
||||||
|
|
||||||
|
Normalmente, ambos os diretórios `/var/lib/kubelet` e `/var/log` encontram-se no
|
||||||
|
sistema de arquivos raiz, e o kubelet é projetado com este desenho em mente.
|
||||||
|
|
||||||
|
Seu nó pode ter tantos outros sistemas de arquivos não utilizados pelo Kubernetes
|
||||||
|
quantos você desejar.
|
||||||
|
{{% /tab %}}
|
||||||
|
|
||||||
|
{{% tab name="Dois sistemas de arquivos" %}}
|
||||||
|
Você tem um sistema de arquivos no nó que você utiliza para dados efêmeros que
|
||||||
|
vêm de Pods em execução: logs e volumes do tipo `emptyDir`. Você pode utilizar
|
||||||
|
este sistema de arquivos para outros dados (por exemplo, logs de sistema não
|
||||||
|
relacionados ao Kubernetes); este sistema de arquivos pode até mesmo ser o
|
||||||
|
sistema de arquivos raiz.
|
||||||
|
|
||||||
|
O kubelet também escreve
|
||||||
|
[logs de contêiner a nível de nó](/pt-br/docs/concepts/cluster-administration/logging/#logs-no-nível-do-nó)
|
||||||
|
no primeiro sistema de arquivos e os trata de forma semelhante ao armazenamento
|
||||||
|
local efêmero.
|
||||||
|
|
||||||
|
Você também tem um segundo sistema de arquivos, separado, conectado a um
|
||||||
|
dispositivo lógico de armazenamento distinto. Nesta configuração, o diretório
|
||||||
|
que você configurou o kubelet para armazenar as camadas de imagens de contêiner
|
||||||
|
e as camadas graváveis de contêineres em execução estará neste segundo sistema
|
||||||
|
de arquivos.
|
||||||
|
|
||||||
|
O primeiro sistema de arquivos não armazena nenhuma camada de imagens de
|
||||||
|
contêiner ou camada gravável.
|
||||||
|
|
||||||
|
Seu nó pode ter tantos outros sistemas de arquivos não utilizados pelo Kubernetes
|
||||||
|
quantos você desejar.
|
||||||
|
{{% /tab %}}
|
||||||
|
{{< /tabs >}}
|
||||||
|
|
||||||
|
|
||||||
|
O kubelet consegue medir quanto armazenamento local está sendo utilizado. O
|
||||||
|
kubelet faz isso desde que:
|
||||||
|
|
||||||
|
- o [_feature gate_](/docs/reference/command-line-tools-reference/feature-gates/)
|
||||||
|
`LocalStorageCapacityIsolation` esteja habilitado (a funcionalidade está
|
||||||
|
ligada por padrão), e
|
||||||
|
- você tenha configurado o nó utilizando uma das configurações suportadas para
|
||||||
|
o armazenamento local efêmero.
|
||||||
|
|
||||||
|
Se você tiver uma configuração diferente, o kubelet não irá aplicar limites de
|
||||||
|
recursos para o armazenamento local efêmero.
|
||||||
|
|
||||||
|
{{< note >}}
|
||||||
|
O kubelet rastreia volumes `emptyDir` que utilizem o sistema de arquivos `tmpfs`
|
||||||
|
como uso de memória de contêiner, ao invés de consumo de armazenamento local
|
||||||
|
efêmero.
|
||||||
|
{{< /note >}}
|
||||||
|
|
||||||
|
### Configurando requerimentos e limites para armazenamento local efêmero
|
||||||
|
|
||||||
|
Você pode especificar o recurso `ephemeral-storage` para gerenciar o
|
||||||
|
armazenamento local efêmero. Cada contêiner de um Pod pode especificar um dos
|
||||||
|
valores abaixo, ou ambos:
|
||||||
|
|
||||||
|
* `spec.containers[].resources.limits.ephemeral-storage`
|
||||||
|
* `spec.containers[].resources.requests.ephemeral-storage`
|
||||||
|
|
||||||
|
Limites e requerimentos de `ephemeral-storage` são medidos em quantidades de
|
||||||
|
bytes. Você pode expressar armazenamento como um inteiro ou como um valor de
|
||||||
|
ponto fixo utilizando um dos seguintes sufixos: E, P, T, G, M, k. Você pode
|
||||||
|
também utilizar os equivalentes de potência de dois: Ei, Pi, Ti, Gi, Mi, Ki.
|
||||||
|
Por exemplo, as quantidades abaixo representam grosseiramente o mesmo valor:
|
||||||
|
|
||||||
|
- `128974848`
|
||||||
|
- `129e6`
|
||||||
|
- `129M`
|
||||||
|
- `123Mi`
|
||||||
|
|
||||||
|
No exemplo a seguir, o Pod tem dois contêineres. Cada contêiner tem um
|
||||||
|
requerimento de 2GiB de armazenamento efêmero local. Cada contêiner tem um
|
||||||
|
limite de 4GiB de armazenamento efêmero local. Portanto, o Pod tem um
|
||||||
|
requerimento de 4GiB e um limite de 8GiB de armazenamento efêmero local.
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
apiVersion: v1
|
||||||
|
kind: Pod
|
||||||
|
metadata:
|
||||||
|
name: frontend
|
||||||
|
spec:
|
||||||
|
containers:
|
||||||
|
- name: app
|
||||||
|
image: images.my-company.example/app:v4
|
||||||
|
resources:
|
||||||
|
requests:
|
||||||
|
ephemeral-storage: "2Gi"
|
||||||
|
limits:
|
||||||
|
ephemeral-storage: "4Gi"
|
||||||
|
volumeMounts:
|
||||||
|
- name: ephemeral
|
||||||
|
mountPath: "/tmp"
|
||||||
|
- name: log-aggregator
|
||||||
|
image: images.my-company.example/log-aggregator:v6
|
||||||
|
resources:
|
||||||
|
requests:
|
||||||
|
ephemeral-storage: "2Gi"
|
||||||
|
limits:
|
||||||
|
ephemeral-storage: "4Gi"
|
||||||
|
volumeMounts:
|
||||||
|
- name: ephemeral
|
||||||
|
mountPath: "/tmp"
|
||||||
|
volumes:
|
||||||
|
- name: ephemeral
|
||||||
|
emptyDir: {}
|
||||||
|
```
|
||||||
|
|
||||||
|
### Como Pods com requerimentos de `ephemeral-storage` são agendados
|
||||||
|
|
||||||
|
Quando você cria um Pod, o Kubernetes seleciona um nó para o Pod rodar. Cada nó
|
||||||
|
tem uma quantidade máxima de armazenamento efêmero local que pode ser fornecida
|
||||||
|
aos Pods. Para mais informações, consulte
|
||||||
|
[_Node Allocatable_](/docs/tasks/administer-cluster/reserve-compute-resources/#node-allocatable).
|
||||||
|
|
||||||
|
O escalonador garante que a soma dos requerimentos de recursos dos contêineres
|
||||||
|
agendados é menor que a capacidade do nó.
|
||||||
|
|
||||||
|
### Gerenciamento do consumo do armazenamento efêmero {#resource-emphemeralstorage-consumption}
|
||||||
|
|
||||||
|
Se o kubelet estiver gerenciando armazenamento local efêmero como um recurso,
|
||||||
|
o kubelet irá medir o consumo de armazenamento em:
|
||||||
|
|
||||||
|
- volumes `emptyDir`, com exceção dos volumes do tipo `tmpfs`
|
||||||
|
- diretórios que armazenem logs a nível de nó
|
||||||
|
- camadas de contêiner graváveis
|
||||||
|
|
||||||
|
Se um Pod estiver utilizando mais armazenamento efêmero do que o permitido, o
|
||||||
|
kubelet irá gerar um sinal de remoção para aquele Pod.
|
||||||
|
|
||||||
|
Para isolamento a nível de contêiner, se o consumo de armazenamento de um
|
||||||
|
contêiner em camadas graváveis e logs exceder seu limite de armazenamento, o
|
||||||
|
kubelet irá marcar o Pod para remoção.
|
||||||
|
|
||||||
|
Para isolamento a nível de Pod, o kubelet calcula um limite de armazenamento
|
||||||
|
total para um Pod somando os limites de cada contêiner naquele Pod. Neste caso,
|
||||||
|
se a soma do consumo de armazenamento efêmero local de todas os contêineres e
|
||||||
|
também dos volumes `emptyDir` de um Pod exceder o limite de armazenamento total
|
||||||
|
do Pod, então o kubelet marca o Pod para remoção.
|
||||||
|
|
||||||
|
{{< caution >}}
|
||||||
|
Se o kubelet não estiver medindo armazenamento efêmero local, um Pod que exeder
|
||||||
|
seu limite de armazenamento local não será removido por exceder os limites de
|
||||||
|
recurso de armazenamento local.
|
||||||
|
|
||||||
|
No entanto, se o espaço de um sistema de arquivos para camadas de contêiner
|
||||||
|
graváveis, logs a nível de nó, ou volumes `emptyDir` ficar reduzido, o nó irá
|
||||||
|
marcar a si próprio com um {{< glossary_tooltip text="_taint_" term_id="taint" >}}
|
||||||
|
indicando que está com armazenamento local reduzido, e esse _taint_ dispara a
|
||||||
|
remoção de Pods que não toleram o _taint_ em questão.
|
||||||
|
|
||||||
|
Veja as [configurações](#configurations-for-local-ephemeral-storage) suportadas
|
||||||
|
para armazenamento efêmero local.
|
||||||
|
{{< /caution >}}
|
||||||
|
|
||||||
|
O kubelet suporta formas diferentes de medir o uso de armazenamento dos Pods:
|
||||||
|
|
||||||
|
{{< tabs name="resource-emphemeralstorage-measurement" >}}
|
||||||
|
{{% tab name="Varredura periódica" %}}
|
||||||
|
O kubelet executa verificações agendadas, em intervalos regulares, que varrem
|
||||||
|
cada volume do tipo `emptyDir`, diretório de log de contêiner, e camada gravável
|
||||||
|
de contêiner.
|
||||||
|
|
||||||
|
A varredura mede quanto espaço está sendo utilizado.
|
||||||
|
|
||||||
|
{{< note >}}
|
||||||
|
Neste modo, o kubelet não rastreia descritores de arquivos abertos para arquivos
|
||||||
|
removidos.
|
||||||
|
|
||||||
|
Se você (ou um contêiner) criar um arquivo dentro de um volume `emptyDir`, um
|
||||||
|
processo ou usuário abrir tal arquivo, e você apagar o arquivo enquanto ele
|
||||||
|
ainda estiver aberto, o nó de índice para o arquivo apagado será mantido até que
|
||||||
|
o arquivo seja fechado novamente. O kubelet, no entanto, não computa este espaço
|
||||||
|
como espaço em uso.
|
||||||
|
{{< /note >}}
|
||||||
|
|
||||||
|
{{% /tab %}}
|
||||||
|
{{% tab name="Quota de projeto do sistema de arquivos" %}}
|
||||||
|
|
||||||
|
Quotas de projeto são uma funcionalidade a nível de sistema operacional para
|
||||||
|
gerenciamento de uso do armazenamento em sistemas de arquivos. Com o Kubernetes,
|
||||||
|
você pode habilitar quotas de projeto para o monitoramento de armazenamento em
|
||||||
|
uso. Tenha certeza que o sistema de arquivos do nó que esteja sendo utilizado em
|
||||||
|
volumes do tipo `emptyDir` possui suporte a quotas de projeto. Por exemplo,
|
||||||
|
os sistemas de arquivos XFS e ext4fs oferecem suporte a quotas de projeto.
|
||||||
|
|
||||||
|
{{< note >}}
|
||||||
|
Quotas de projeto permitem o monitoramento do uso de armazenamento, mas não
|
||||||
|
garantem limites.
|
||||||
|
{{< /note >}}
|
||||||
|
|
||||||
|
O Kubernetes utiliza IDs de projeto iniciando em `1048576`. Os IDs em uso estão
|
||||||
|
registrados nos diretórios `/etc/projects` e `/etc/projid`. Se os IDs de projeto
|
||||||
|
nestes intervalos forem utilizados para outros propósitos no sistema, estes IDs
|
||||||
|
de projeto deverão estar registrados nos diretórios especificados acima para que
|
||||||
|
o Kubernetes não os tente utilizar.
|
||||||
|
|
||||||
|
Quotas fornecem melhor desempenho e mais precisão do que varredura de diretórios.
|
||||||
|
Quando um diretório é atribuído a um projeto, todos os arquivos criados no
|
||||||
|
diretório são também criados no projeto, e o núcleo do sistema pode simplesmente
|
||||||
|
manter controle de quantos blocos estão em uso por arquivos daquele projeto. Se
|
||||||
|
um arquivo é criado e apagado, mas possui um descritor de arquivo aberto, ele
|
||||||
|
continua a consumir espaço. O rastreio de quotas registra este espaço de forma
|
||||||
|
precisa, enquanto varreduras de diretório ignoram o uso de espaço de
|
||||||
|
armazenamento por arquivos apagados.
|
||||||
|
|
||||||
|
Se você deseja utilizar quotas de projeto, você deve:
|
||||||
|
|
||||||
|
* Habilitar o [_feature gate_](/docs/reference/command-line-tools-reference/feature-gates/)
|
||||||
|
`LocalStorageCapacityIsolationFSQuotaMonitoring=true` utilizando o campo
|
||||||
|
`featureGates` na [configuração do kubelet](/docs/reference/config-api/kubelet-config.v1beta1/)
|
||||||
|
ou a opção de linha de comando `--feature-gates`.
|
||||||
|
|
||||||
|
* Garantir que o sistema de arquivos raiz (ou o sistema de arquivos opcional de
|
||||||
|
tempo de execução) tem quotas de projeto habilitadas. Todos os sistemas de
|
||||||
|
arquivos XFS suportam quotas de projeto. Em sistemas de arquivos ext4, você
|
||||||
|
precisa habilitar a funcionalidade de rastreio de quotas de projeto enquanto
|
||||||
|
o sistema de arquivos ainda não está montado.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Para sistema de arquivos ext4, com o volume /dev/block-device não montado
|
||||||
|
sudo tune2fs -O project -Q prjquota /dev/block-device
|
||||||
|
```
|
||||||
|
|
||||||
|
* Garanta que o sistema de arquivos raiz (ou sistema de arquivos opcional de
|
||||||
|
tempo de execução) esteja montado com quotas de projeto habilitadas. Em ambos
|
||||||
|
os sistemas XFS e ext4fs, a opção de montagem é chamada `prjquota`.
|
||||||
|
|
||||||
|
{{% /tab %}}
|
||||||
|
{{< /tabs >}}
|
||||||
|
|
||||||
|
## Recursos estendidos
|
||||||
|
|
||||||
|
Recursos estendidos são nomes de recursos absolutos fora do domínio
|
||||||
|
`kubernetes.io`. Estes recursos permitem a operadores de cluster anunciar e a
|
||||||
|
usuários consumir recursos que não são embutidos pelo Kubernetes.
|
||||||
|
|
||||||
|
Dois passos são necessários para a utilização de recursos estendidos.
|
||||||
|
Primeiramente, o operador do cluster deve anunciar um recurso estendido. Em
|
||||||
|
segundo lugar, os usuários devem solicitar o recurso estendido em Pods.
|
||||||
|
|
||||||
|
### Gerenciando recursos estendidos
|
||||||
|
|
||||||
|
#### Recursos estendidos a nível de nó
|
||||||
|
|
||||||
|
Recursos estendidos a nível de nó são recursos ligados ao nó.
|
||||||
|
|
||||||
|
##### Recursos gerenciados por dispositivos conectados
|
||||||
|
|
||||||
|
Veja [Device Plugin](/docs/concepts/extend-kubernetes/compute-storage-net/device-plugins/)
|
||||||
|
para mais informações sobre como anunciar recursos gerenciados por dispositivos
|
||||||
|
conectados em cada nó.
|
||||||
|
|
||||||
|
##### Outros recursos
|
||||||
|
|
||||||
|
A fim de anunciar um novo recurso estendido a nível de nó, o operador do cluster
|
||||||
|
pode enviar uma requisição HTTP com o método `PATCH` para o servidor da API do
|
||||||
|
Kubernetes para especificar a quantidade disponível em um nó no cluster, através
|
||||||
|
do campo `status.capacity`. Após a realização desta operação, o campo
|
||||||
|
`status.capacity` do nó irá conter um novo recurso. O campo `status.allocatable`
|
||||||
|
é atualizado automaticamente pelo kubelet, de forma assíncrona, com o novo
|
||||||
|
recurso.
|
||||||
|
|
||||||
|
Como o escalonador utiliza o valor do campo `status.allocatable` do nó ao
|
||||||
|
verificar a saúde do Pod, o escalonador somente considerará o novo valor do
|
||||||
|
campo após esta atualização assíncrona. Pode haver um pequeno atraso entre a
|
||||||
|
atualização da capacidade do nó com um novo recurso e o momento em que o
|
||||||
|
primeiro Pod que requer o recurso poderá ser agendado naquele nó.
|
||||||
|
|
||||||
|
**Exemplo**:
|
||||||
|
|
||||||
|
Este exemplo demonstra como utilizar a ferramenta `curl` para criar uma
|
||||||
|
requisição HTTP que anuncia cinco recursos "example.com/foo" no nó `k8s-node-1`,
|
||||||
|
cujo nó da camada de gerenciamento é `k8s-master`.
|
||||||
|
|
||||||
|
```shell
|
||||||
|
curl --header "Content-Type: application/json-patch+json" \
|
||||||
|
--request PATCH \
|
||||||
|
--data '[{"op": "add", "path": "/status/capacity/example.com~1foo", "value": "5"}]' \
|
||||||
|
http://k8s-master:8080/api/v1/nodes/k8s-node-1/status
|
||||||
|
```
|
||||||
|
|
||||||
|
{{< note >}}
|
||||||
|
Na requisição anterior, a notação `~1` é a codificação do caractere `/` no campo
|
||||||
|
`path` para a operação de atualização. O valor do campo `path` em JSON-Patch é
|
||||||
|
interpretado como um JSON-Pointer. Para maiores detalhes, veja
|
||||||
|
[a seção 3 da IETF RFC 6901](https://tools.ietf.org/html/rfc6901#section-3).
|
||||||
|
{{< /note >}}
|
||||||
|
|
||||||
|
#### Recursos estendidos a nível de cluster
|
||||||
|
|
||||||
|
Recursos estendidos a nível de cluster não são vinculados aos nós. Estes
|
||||||
|
recursos são normalmente gerenciados por extensões do escalonador, que manipulam
|
||||||
|
o consumo e as quotas de recursos.
|
||||||
|
|
||||||
|
Você pode especificar os recursos estendidos que são manipulados por extensões
|
||||||
|
do escalonador nas [configurações do kube-scheduler](/docs/reference/config-api/kube-scheduler-config.v1beta3/).
|
||||||
|
|
||||||
|
**Exemplo**:
|
||||||
|
|
||||||
|
A configuração abaixo para uma política do escalonador indica que o recurso
|
||||||
|
estendido a nível de cluster "example.com/foo" é manipulado pelas extensões do
|
||||||
|
escalonador.
|
||||||
|
|
||||||
|
- O escalonador envia um Pod para a extensão do escalonador somente se o Pod
|
||||||
|
solicitar "example.com/foo".
|
||||||
|
- O campo `ignoredByScheduler` especifica que o escalonador não verifica o
|
||||||
|
recurso "example.com/foo" em seu predicado `PodFitsResources`.
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"kind": "Policy",
|
||||||
|
"apiVersion": "v1",
|
||||||
|
"extenders": [
|
||||||
|
{
|
||||||
|
"urlPrefix":"<extender-endpoint>",
|
||||||
|
"bindVerb": "bind",
|
||||||
|
"managedResources": [
|
||||||
|
{
|
||||||
|
"name": "example.com/foo",
|
||||||
|
"ignoredByScheduler": true
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### Consumindo recursos estendidos
|
||||||
|
|
||||||
|
Usuários podem consumir recursos estendidos em especificações de Pods como CPU
|
||||||
|
e memória. O escalonador controla a contagem de recursos de modo que a
|
||||||
|
quantidade alocada simultaneamente a Pods não seja maior que a quantidade
|
||||||
|
disponível.
|
||||||
|
|
||||||
|
O servidor da API limita as quantidades de recursos estendidos a números inteiros.
|
||||||
|
Exemplos de quantidades _válidas_ são `3`, `3000m` e `3Ki`. Exemplos de
|
||||||
|
quantidades _inválidas_ são `0.5` e `1500m`.
|
||||||
|
|
||||||
|
{{< note >}}
|
||||||
|
Recursos estendidos substituem os Recursos Inteiros Opacos.
|
||||||
|
Usuários podem escolher qualquer prefixo de nome de domínio, com exceção do
|
||||||
|
domínio `kubernetes.io`, que é reservado.
|
||||||
|
{{< /note >}}
|
||||||
|
|
||||||
|
Para consumir um recurso estendido em um Pod, inclua o nome do recurso como uma
|
||||||
|
chave no mapa `spec.containers[].resources.limits` na especificação do contêiner.
|
||||||
|
|
||||||
|
{{< note >}}
|
||||||
|
Recursos estendidos não podem ser superdimensionados. Portanto, `request` e
|
||||||
|
`limit` devem ser iguais se ambos estiverem presentes na especificação de um
|
||||||
|
contêiner.
|
||||||
|
{{< /note >}}
|
||||||
|
|
||||||
|
Um Pod só é agendado se todos os seus requerimentos de recursos forem
|
||||||
|
satisfeitos, incluindo CPU, memória e quaisquer recursos estendidos. O Pod
|
||||||
|
permanece no estado `PENDING` enquanto seus requerimentos de recursos não puderem
|
||||||
|
ser satisfeitos.
|
||||||
|
|
||||||
|
**Exemplo**:
|
||||||
|
|
||||||
|
O Pod abaixo requisita duas CPUs e um "example.com/foo" (um recurso estendido).
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
apiVersion: v1
|
||||||
|
kind: Pod
|
||||||
|
metadata:
|
||||||
|
name: my-pod
|
||||||
|
spec:
|
||||||
|
containers:
|
||||||
|
- name: my-container
|
||||||
|
image: myimage
|
||||||
|
resources:
|
||||||
|
requests:
|
||||||
|
cpu: 2
|
||||||
|
example.com/foo: 1
|
||||||
|
limits:
|
||||||
|
example.com/foo: 1
|
||||||
|
```
|
||||||
|
|
||||||
|
## Limitação de PID
|
||||||
|
|
||||||
|
Limites de ID de processo (PID) permitem à configuração de um kubelet limitar o
|
||||||
|
número de PIDs que um dado Pod pode consumir. Consulte
|
||||||
|
[PID Limiting](/docs/concepts/policy/pid-limiting/) para mais informações.
|
||||||
|
|
||||||
|
## Solução de problemas {#troubleshooting}
|
||||||
|
|
||||||
|
### Meus pods estão pendentes com um evento `FailedScheduling`
|
||||||
|
|
||||||
|
Se o escalonador não conseguir encontrar nenhum nó que atenda aos requisitos de
|
||||||
|
recursos do Pod, este Pod permanecerá não-agendado até que um local destino
|
||||||
|
possa ser encontrado. Um [Evento](/docs/reference/kubernetes-api/cluster-resources/event-v1/)
|
||||||
|
é produzido cada vez que o escalonador falhar em encontrar um local para agendar
|
||||||
|
o Pod. Você pode utilizar o utilitário `kubectl` para ver os eventos de um Pod.
|
||||||
|
Por exemplo:
|
||||||
|
|
||||||
|
```shell
|
||||||
|
kubectl describe pod frontend | grep -A 9999999999 Events
|
||||||
|
```
|
||||||
|
```
|
||||||
|
Events:
|
||||||
|
Type Reason Age From Message
|
||||||
|
---- ------ ---- ---- -------
|
||||||
|
Warning FailedScheduling 23s default-scheduler 0/42 nodes available: insufficient cpu
|
||||||
|
```
|
||||||
|
|
||||||
|
No exemplo acima, o Pod de nome "frontend" não pôde ser agendado devido à nenhum
|
||||||
|
nó possuir CPU suficiente para suprir seu requerimento de CPU. Mensagens de erro
|
||||||
|
semelhantes a essa podem sugerir falha devido a falta de memória
|
||||||
|
(`PodExceedsFreeMemory`). De maneira geral, se um Pod estiver pendente com uma
|
||||||
|
mensagem deste tipo, há diversas possibilidades de solução a serem tentadas:
|
||||||
|
|
||||||
|
- Adicione mais nós ao cluster.
|
||||||
|
- Encerre Pods desnecessários para liberar espaço para Pods pendentes.
|
||||||
|
- Verifique se o Pod não é maior que todos os nós. Por exemplo, se todos os nós
|
||||||
|
têm uma capacidade de `cpu: 1`, um Pod que requisita `cpu: 1.1` nunca será
|
||||||
|
agendado.
|
||||||
|
- Verifique se os nós não possuem _taints_. Se a maioria dos seus nós possuem
|
||||||
|
_taints_, e o novo Pod não tolera tal _taint_, o escalonador somente considera
|
||||||
|
agendar o Pod nos nós que não possuem aquele _taint_.
|
||||||
|
|
||||||
|
Você pode verificar capacidades de nós e quantidades alocadas com o comando
|
||||||
|
`kubectl describe nodes`. Por exemplo:
|
||||||
|
|
||||||
|
```shell
|
||||||
|
kubectl describe nodes e2e-test-node-pool-4lw4
|
||||||
|
```
|
||||||
|
```
|
||||||
|
Name: e2e-test-node-pool-4lw4
|
||||||
|
[ ... linhas abreviadas para simplificação ...]
|
||||||
|
Capacity:
|
||||||
|
cpu: 2
|
||||||
|
memory: 7679792Ki
|
||||||
|
pods: 110
|
||||||
|
Allocatable:
|
||||||
|
cpu: 1800m
|
||||||
|
memory: 7474992Ki
|
||||||
|
pods: 110
|
||||||
|
[ ... linhas abreviadas para simplificação ...]
|
||||||
|
Non-terminated Pods: (5 in total)
|
||||||
|
Namespace Name CPU Requests CPU Limits Memory Requests Memory Limits
|
||||||
|
--------- ---- ------------ ---------- --------------- -------------
|
||||||
|
kube-system fluentd-gcp-v1.38-28bv1 100m (5%) 0 (0%) 200Mi (2%) 200Mi (2%)
|
||||||
|
kube-system kube-dns-3297075139-61lj3 260m (13%) 0 (0%) 100Mi (1%) 170Mi (2%)
|
||||||
|
kube-system kube-proxy-e2e-test-... 100m (5%) 0 (0%) 0 (0%) 0 (0%)
|
||||||
|
kube-system monitoring-influxdb-grafana-v4-z1m12 200m (10%) 200m (10%) 600Mi (8%) 600Mi (8%)
|
||||||
|
kube-system node-problem-detector-v0.1-fj7m3 20m (1%) 200m (10%) 20Mi (0%) 100Mi (1%)
|
||||||
|
Allocated resources:
|
||||||
|
(Total limits may be over 100 percent, i.e., overcommitted.)
|
||||||
|
CPU Requests CPU Limits Memory Requests Memory Limits
|
||||||
|
------------ ---------- --------------- -------------
|
||||||
|
680m (34%) 400m (20%) 920Mi (11%) 1070Mi (13%)
|
||||||
|
```
|
||||||
|
|
||||||
|
No exemplo anterior, você pode verificar que se um Pod requisitar mais que 1,120
|
||||||
|
CPUs ou mais que 6,23Gi de memória, tal Pod não caberá neste nó.
|
||||||
|
|
||||||
|
Ao verificar a seção "Pods", você pode observar quais Pods estão consumindo
|
||||||
|
espaço neste nó.
|
||||||
|
|
||||||
|
A quantidade de recursos disponível aos Pods é menor que a capacidade do nó, pois
|
||||||
|
daemons do sistema utilizam uma parcela dos recursos disponíveis. Dentro da API
|
||||||
|
do Kubernetes, cada nó tem um campo `.status.allocatable`
|
||||||
|
(consulte [NodeStatus](/docs/reference/kubernetes-api/cluster-resources/node-v1/#NodeStatus)
|
||||||
|
para mais detalhes).
|
||||||
|
|
||||||
|
O campo `.status.allocatable` descreve a quantidade de recursos que está
|
||||||
|
disponível a Pods naquele nó (por exemplo: 15 CPUs virtuais e 7538 MiB de
|
||||||
|
memória). Para mais informações sobre recursos alocáveis do nó no Kubernetes,
|
||||||
|
veja [Reserve Compute Resources for System Daemons](/docs/tasks/administer-cluster/reserve-compute-resources/).
|
||||||
|
|
||||||
|
Você pode configurar [quotas de recursos](/docs/concepts/policy/resource-quotas/)
|
||||||
|
para limitar a quantidade total de recursos que um namespace pode consumir.
|
||||||
|
O Kubernetes garante quotas para objetos em um namespace específico quando há
|
||||||
|
uma `ResourceQuota` naquele namespace. Por exemplo, se você atribuir namespaces
|
||||||
|
específicos a times diferentes, você pode adicionar `ResourceQuota`s nestes
|
||||||
|
namespaces. Criar quotas de recursos ajuda a evitar que um time utilize tanto de
|
||||||
|
um recurso que chegue a afetar outros times utilizando o mesmo cluster.
|
||||||
|
|
||||||
|
Você deve também considerar o nível de acesso fornecido aos usuários de qualquer
|
||||||
|
namespace: acesso **completo** para escrita permite a alguém com este acesso
|
||||||
|
remover **qualquer** recurso, incluindo uma configuração de `ResourceQuota`.
|
||||||
|
|
||||||
|
### Meu contêiner foi terminado
|
||||||
|
|
||||||
|
Seu contêiner pode ser terminado se faltar recursos para que este rode. Para
|
||||||
|
verificar se um contêiner está sendo terminado por chegar no limite de algum
|
||||||
|
recurso, utilize o comando `kubectl describe pod` no Pod em questão:
|
||||||
|
|
||||||
|
```shell
|
||||||
|
kubectl describe pod simmemleak-hra99
|
||||||
|
```
|
||||||
|
|
||||||
|
A saída será semelhante a:
|
||||||
|
```
|
||||||
|
Name: simmemleak-hra99
|
||||||
|
Namespace: default
|
||||||
|
Image(s): saadali/simmemleak
|
||||||
|
Node: kubernetes-node-tf0f/10.240.216.66
|
||||||
|
Labels: name=simmemleak
|
||||||
|
Status: Running
|
||||||
|
Reason:
|
||||||
|
Message:
|
||||||
|
IP: 10.244.2.75
|
||||||
|
Containers:
|
||||||
|
simmemleak:
|
||||||
|
Image: saadali/simmemleak:latest
|
||||||
|
Limits:
|
||||||
|
cpu: 100m
|
||||||
|
memory: 50Mi
|
||||||
|
State: Running
|
||||||
|
Started: Tue, 07 Jul 2019 12:54:41 -0700
|
||||||
|
Last State: Terminated
|
||||||
|
Reason: OOMKilled
|
||||||
|
Exit Code: 137
|
||||||
|
Started: Fri, 07 Jul 2019 12:54:30 -0700
|
||||||
|
Finished: Fri, 07 Jul 2019 12:54:33 -0700
|
||||||
|
Ready: False
|
||||||
|
Restart Count: 5
|
||||||
|
Conditions:
|
||||||
|
Type Status
|
||||||
|
Ready False
|
||||||
|
Events:
|
||||||
|
Type Reason Age From Message
|
||||||
|
---- ------ ---- ---- -------
|
||||||
|
Normal Scheduled 42s default-scheduler Successfully assigned simmemleak-hra99 to kubernetes-node-tf0f
|
||||||
|
Normal Pulled 41s kubelet Container image "saadali/simmemleak:latest" already present on machine
|
||||||
|
Normal Created 41s kubelet Created container simmemleak
|
||||||
|
Normal Started 40s kubelet Started container simmemleak
|
||||||
|
Normal Killing 32s kubelet Killing container with id ead3fb35-5cf5-44ed-9ae1-488115be66c6: Need to kill Pod
|
||||||
|
```
|
||||||
|
|
||||||
|
No exemplo acima, o campo `Restart Count: 5` indica que o contêiner `simmemleak`
|
||||||
|
deste Pod foi terminado e reiniciado cinco vezes até o momento. A razão
|
||||||
|
`OOMKilled` demonstra que o contêiner tentou consumir mais memória do que o seu
|
||||||
|
limite.
|
||||||
|
|
||||||
|
O próximo passo neste cenário seria vasculhar e depurar o código da aplicação,
|
||||||
|
procurando por vazamentos de memória. Se você determinar que a aplicação está se
|
||||||
|
comportando conforme o esperado, considere aumentar o limite (e possivelmente
|
||||||
|
o requerimento) de memória para aquele contêiner.
|
||||||
|
|
||||||
|
## {{% heading "whatsnext" %}}
|
||||||
|
|
||||||
|
* Pratique [a criação de requerimentos de recursos de memória em contêineres e Pods](/docs/tasks/configure-pod-container/assign-memory-resource/).
|
||||||
|
* Pratique [a criação de requerimentos de CPU em contêineres and Pods](/docs/tasks/configure-pod-container/assign-cpu-resource/).
|
||||||
|
* Leia como a referência da API define um [contêiner](/docs/reference/kubernetes-api/workload-resources/pod-v1/#Container)
|
||||||
|
e seus [requerimentos de recursos](/docs/reference/kubernetes-api/workload-resources/pod-v1/#resources).
|
||||||
|
* Leia sobre [quotas de projeto](https://xfs.org/index.php/XFS_FAQ#Q:_Quota:_Do_quotas_work_on_XFS.3F) no XFS.
|
||||||
|
* Leia mais sobre a [referência de configuração do kube-scheduler (v1beta3)](/docs/reference/config-api/kube-scheduler-config.v1beta3/).
|
||||||
|
|
|
@ -0,0 +1,19 @@
|
||||||
|
---
|
||||||
|
title: cgroup (control group)
|
||||||
|
id: cgroup
|
||||||
|
date: 2019-06-25
|
||||||
|
full_link:
|
||||||
|
short_description: >
|
||||||
|
Um grupo de processos do Linux com isolamento de recursos opcional, contagem e limites.
|
||||||
|
|
||||||
|
aka:
|
||||||
|
tags:
|
||||||
|
- fundamental
|
||||||
|
---
|
||||||
|
Um grupo de processos do Linux com isolamento de recursos opcional, contagem e limites.
|
||||||
|
|
||||||
|
<!--more-->
|
||||||
|
|
||||||
|
cgroup é uma funcionalidade do núcleo de sistema do Linux que limita, conta e
|
||||||
|
isola o uso de recursos (CPU, memória, entrada e saída de disco, rede) para um
|
||||||
|
conjunto de processos.
|
|
@ -0,0 +1,18 @@
|
||||||
|
---
|
||||||
|
title: Evicção
|
||||||
|
id: eviction
|
||||||
|
date: 2022-03-05
|
||||||
|
full_link: /pt-br/docs/concepts/scheduling-eviction/
|
||||||
|
short_description: >
|
||||||
|
Processo de encerramento de um ou mais Pods em Nós
|
||||||
|
aka:
|
||||||
|
tags:
|
||||||
|
- operation
|
||||||
|
---
|
||||||
|
|
||||||
|
Evicção é o processo de encerramento de um ou mais Pods em Nós.
|
||||||
|
|
||||||
|
<!--more-->
|
||||||
|
Existem dois tipos de evicção:
|
||||||
|
* [Evicção por pressão no nó](/docs/concepts/scheduling-eviction/node-pressure-eviction/)
|
||||||
|
* [Evicção iniciada pela API](/docs/concepts/scheduling-eviction/api-eviction/)
|
|
@ -31,7 +31,6 @@ Add-ons 扩展了 Kubernetes 的功能。
|
||||||
* [Canal](https://github.com/tigera/canal/tree/master/k8s-install) unites Flannel and Calico, providing networking and network policy.
|
* [Canal](https://github.com/tigera/canal/tree/master/k8s-install) unites Flannel and Calico, providing networking and network policy.
|
||||||
* [Cilium](https://github.com/cilium/cilium) is a L3 network and network policy plugin that can enforce HTTP/API/L7 policies transparently. Both routing and overlay/encapsulation mode are supported.
|
* [Cilium](https://github.com/cilium/cilium) is a L3 network and network policy plugin that can enforce HTTP/API/L7 policies transparently. Both routing and overlay/encapsulation mode are supported.
|
||||||
* [CNI-Genie](https://github.com/Huawei-PaaS/CNI-Genie) enables Kubernetes to seamlessly connect to a choice of CNI plugins, such as Calico, Canal, Flannel, Romana, or Weave.
|
* [CNI-Genie](https://github.com/Huawei-PaaS/CNI-Genie) enables Kubernetes to seamlessly connect to a choice of CNI plugins, such as Calico, Canal, Flannel, Romana, or Weave.
|
||||||
* [Contiv](http://contiv.github.io) provides configurable networking (native L3 using BGP, overlay using vxlan, classic L2, and Cisco-SDN/ACI) for various use cases and a rich policy framework. Contiv project is fully [open sourced](http://github.com/contiv). The [installer](http://github.com/contiv/install) provides both kubeadm and non-kubeadm based installation options.
|
|
||||||
* [Contrail](http://www.juniper.net/us/en/products-services/sdn/contrail/contrail-networking/), based on [Tungsten Fabric](https://tungsten.io), is an open source, multi-cloud network virtualization and policy management platform. Contrail and Tungsten Fabric are integrated with orchestration systems such as Kubernetes, OpenShift, OpenStack and Mesos, and provide isolation modes for virtual machines, containers/pods and bare metal workloads.
|
* [Contrail](http://www.juniper.net/us/en/products-services/sdn/contrail/contrail-networking/), based on [Tungsten Fabric](https://tungsten.io), is an open source, multi-cloud network virtualization and policy management platform. Contrail and Tungsten Fabric are integrated with orchestration systems such as Kubernetes, OpenShift, OpenStack and Mesos, and provide isolation modes for virtual machines, containers/pods and bare metal workloads.
|
||||||
* [Flannel](https://github.com/flannel-io/flannel#deploying-flannel-manually) is an overlay network provider that can be used with Kubernetes.
|
* [Flannel](https://github.com/flannel-io/flannel#deploying-flannel-manually) is an overlay network provider that can be used with Kubernetes.
|
||||||
* [Knitter](https://github.com/ZTE/Knitter/) is a network solution supporting multiple networking in Kubernetes.
|
* [Knitter](https://github.com/ZTE/Knitter/) is a network solution supporting multiple networking in Kubernetes.
|
||||||
|
@ -55,9 +54,6 @@ Add-ons 扩展了 Kubernetes 的功能。
|
||||||
同时支持路由(routing)和覆盖/封装(overlay/encapsulation)模式。
|
同时支持路由(routing)和覆盖/封装(overlay/encapsulation)模式。
|
||||||
* [CNI-Genie](https://github.com/Huawei-PaaS/CNI-Genie) 使 Kubernetes 无缝连接到一种 CNI 插件,
|
* [CNI-Genie](https://github.com/Huawei-PaaS/CNI-Genie) 使 Kubernetes 无缝连接到一种 CNI 插件,
|
||||||
例如:Flannel、Calico、Canal、Romana 或者 Weave。
|
例如:Flannel、Calico、Canal、Romana 或者 Weave。
|
||||||
* [Contiv](https://contiv.github.io) 为多种用例提供可配置网络(使用 BGP 的原生 L3,使用 vxlan 的覆盖网络,
|
|
||||||
经典 L2 和 Cisco-SDN/ACI)和丰富的策略框架。Contiv 项目完全[开源](https://github.com/contiv)。
|
|
||||||
[安装工具](https://github.com/contiv/install)同时提供基于和不基于 kubeadm 的安装选项。
|
|
||||||
* 基于 [Tungsten Fabric](https://tungsten.io) 的
|
* 基于 [Tungsten Fabric](https://tungsten.io) 的
|
||||||
[Contrail](https://www.juniper.net/us/en/products-services/sdn/contrail/contrail-networking/)
|
[Contrail](https://www.juniper.net/us/en/products-services/sdn/contrail/contrail-networking/)
|
||||||
是一个开源的多云网络虚拟化和策略管理平台,Contrail 和 Tungsten Fabric 与业务流程系统
|
是一个开源的多云网络虚拟化和策略管理平台,Contrail 和 Tungsten Fabric 与业务流程系统
|
||||||
|
|
|
@ -22,7 +22,7 @@ Application logs can help you understand what is happening inside your applicati
|
||||||
|
|
||||||
<!--
|
<!--
|
||||||
However, the native functionality provided by a container engine or runtime is usually not enough for a complete logging solution.
|
However, the native functionality provided by a container engine or runtime is usually not enough for a complete logging solution.
|
||||||
For example, you may want access your application's logs if a container crashes; a pod gets evicted; or a node dies,
|
For example, you may want to access your application's logs if a container crashes; a pod gets evicted; or a node dies,
|
||||||
In a cluster, logs should have a separate storage and lifecycle independent of nodes, pods, or containers. This concept is called _cluster-level-logging_.
|
In a cluster, logs should have a separate storage and lifecycle independent of nodes, pods, or containers. This concept is called _cluster-level-logging_.
|
||||||
-->
|
-->
|
||||||
但是,由容器引擎或运行时提供的原生功能通常不足以构成完整的日志记录方案。
|
但是,由容器引擎或运行时提供的原生功能通常不足以构成完整的日志记录方案。
|
||||||
|
@ -94,12 +94,25 @@ The output is:
|
||||||
```
|
```
|
||||||
|
|
||||||
<!--
|
<!--
|
||||||
You can use `kubectl logs --previous` to retrieve logs from a previous instantiation of a container..If your pod has multiple containers, specify which container's logs you want to access by appending a container name to the command. See the [`kubectl logs` documentation](/docs/reference/generated/kubectl/kubectl-commands#logs) for more details.
|
You can use `kubectl logs --previous` to retrieve logs from a previous instantiation of a container.
|
||||||
|
If your pod has multiple containers, specify which container's logs you want to access by
|
||||||
|
appending a container name to the command, with a `-c` flag, like so:
|
||||||
|
```console
|
||||||
|
kubectl logs counter -c count
|
||||||
|
```
|
||||||
|
See the [`kubectl logs` documentation](/docs/reference/generated/kubectl/kubectl-commands#logs) for more details.
|
||||||
-->
|
-->
|
||||||
你可以使用命令 `kubectl logs --previous` 检索之前容器实例的日志。
|
你可以使用命令 `kubectl logs --previous` 检索之前容器实例的日志。
|
||||||
如果 Pod 中有多个容器,你应该为该命令附加容器名以访问对应容器的日志。
|
如果 Pod 中有多个容器,你应该为该命令附加容器名以访问对应容器的日志。
|
||||||
详见 [`kubectl logs` 文档](/docs/reference/generated/kubectl/kubectl-commands#logs)。
|
详见 [`kubectl logs` 文档](/docs/reference/generated/kubectl/kubectl-commands#logs)。
|
||||||
|
如果 Pod 有多个容器,你应该为该命令附加容器名以访问对应容器的日志,
|
||||||
|
使用 `-c` 标志来指定要访问的容器的日志,如下所示:
|
||||||
|
```console
|
||||||
|
|
||||||
|
kubectl logs counter -c count
|
||||||
|
|
||||||
|
```
|
||||||
|
详见 [kubectl logs 文档](/zh/docs/reference/generated/kubectl/kubectl-commands#logs)。
|
||||||
<!--
|
<!--
|
||||||
## Logging at the node level
|
## Logging at the node level
|
||||||
|
|
||||||
|
@ -280,7 +293,7 @@ Node-level logging creates only one agent per node, and doesn't require any chan
|
||||||
节点级日志在每个节点上仅创建一个代理,不需要对节点上的应用做修改。
|
节点级日志在每个节点上仅创建一个代理,不需要对节点上的应用做修改。
|
||||||
|
|
||||||
<!--
|
<!--
|
||||||
Containers write stdout and stderr, but with no agreed format. A node-level agent collects these logs and forwards them for aggregation.
|
Containers write to stdout and stderr, but with no agreed format. A node-level agent collects these logs and forwards them for aggregation.
|
||||||
-->
|
-->
|
||||||
容器向标准输出和标准错误输出写出数据,但在格式上并不统一。
|
容器向标准输出和标准错误输出写出数据,但在格式上并不统一。
|
||||||
节点级代理
|
节点级代理
|
||||||
|
|
|
@ -1,43 +1,47 @@
|
||||||
---
|
---
|
||||||
title: 为容器管理资源
|
title: 为 Pod 和容器管理资源
|
||||||
content_type: concept
|
content_type: concept
|
||||||
weight: 40
|
weight: 40
|
||||||
feature:
|
feature:
|
||||||
title: 自动装箱
|
title: 自动装箱
|
||||||
description: >
|
description: >
|
||||||
根据资源需求和其他约束自动放置容器,同时避免影响可用性。将关键性工作负载和尽力而为性质的服务工作负载进行混合放置,以提高资源利用率并节省更多资源。
|
根据资源需求和其他约束自动放置容器,同时避免影响可用性。
|
||||||
|
将关键性的和尽力而为性质的工作负载进行混合放置,以提高资源利用率并节省更多资源。
|
||||||
---
|
---
|
||||||
|
|
||||||
<!--
|
<!--
|
||||||
title: Managing Resources for Containers
|
title: Resource Management for Pods and Containers
|
||||||
content_type: concept
|
content_type: concept
|
||||||
weight: 40
|
weight: 40
|
||||||
feature:
|
feature:
|
||||||
title: Automatic binpacking
|
title: Automatic binpacking
|
||||||
description: >
|
description: >
|
||||||
Automatically places containers based on their resource requirements and other constraints, while not sacrificing availability. Mix critical and best-effort workloads in order to drive up utilization and save even more resources.
|
Automatically places containers based on their resource requirements and other constraints, while not sacrificing availability.
|
||||||
|
Mix critical and best-effort workloads in order to drive up utilization and save even more resources.
|
||||||
-->
|
-->
|
||||||
|
|
||||||
<!-- overview -->
|
<!-- overview -->
|
||||||
|
|
||||||
<!--
|
<!--
|
||||||
When you specify a {{< glossary_tooltip term_id="pod" >}}, you can optionally specify how
|
When you specify a {{< glossary_tooltip term_id="pod" >}}, you can optionally specify how
|
||||||
much of each resource a {{< glossary_tooltip text="Container" term_id="container" >}} needs.
|
much of each resource a {{< glossary_tooltip text="container" term_id="container" >}} needs.
|
||||||
The most common resources to specify are CPU and memory (RAM); there are others.
|
The most common resources to specify are CPU and memory (RAM); there are others.
|
||||||
|
|
||||||
When you specify the resource _request_ for Containers in a Pod, the scheduler uses this
|
When you specify the resource _request_ for Containers in a Pod, the
|
||||||
|
{{< glossary_tooltip text="kube-scheduler" term_id="kube-scheduler" >}} uses this
|
||||||
information to decide which node to place the Pod on. When you specify a resource _limit_
|
information to decide which node to place the Pod on. When you specify a resource _limit_
|
||||||
for a Container, the kubelet enforces those limits so that the running container is not
|
for a Container, the kubelet enforces those limits so that the running container is not
|
||||||
allowed to use more of that resource than the limit you set. The kubelet also reserves
|
allowed to use more of that resource than the limit you set. The kubelet also reserves
|
||||||
at least the _request_ amount of that system resource specifically for that container
|
at least the _request_ amount of that system resource specifically for that container
|
||||||
to use.
|
to use.
|
||||||
-->
|
-->
|
||||||
|
|
||||||
当你定义 {{< glossary_tooltip text="Pod" term_id="pod" >}} 时可以选择性地为每个
|
当你定义 {{< glossary_tooltip text="Pod" term_id="pod" >}} 时可以选择性地为每个
|
||||||
{{< glossary_tooltip text="容器" term_id="container" >}}设定所需要的资源数量。
|
{{< glossary_tooltip text="容器" term_id="container" >}}设定所需要的资源数量。
|
||||||
最常见的可设定资源是 CPU 和内存(RAM)大小;此外还有其他类型的资源。
|
最常见的可设定资源是 CPU 和内存(RAM)大小;此外还有其他类型的资源。
|
||||||
|
|
||||||
当你为 Pod 中的 Container 指定了资源 __请求__ 时,调度器就利用该信息决定将 Pod 调度到哪个节点上。
|
当你为 Pod 中的 Container 指定了资源 __请求__ 时,
|
||||||
|
{{< glossary_tooltip text="kube-scheduler" term_id="kube-scheduler" >}}
|
||||||
|
就利用该信息决定将 Pod 调度到哪个节点上。
|
||||||
当你还为 Container 指定了资源 __约束__ 时,kubelet 就可以确保运行的容器不会使用超出所设约束的资源。
|
当你还为 Container 指定了资源 __约束__ 时,kubelet 就可以确保运行的容器不会使用超出所设约束的资源。
|
||||||
kubelet 还会为容器预留所 __请求__ 数量的系统资源,供其使用。
|
kubelet 还会为容器预留所 __请求__ 数量的系统资源,供其使用。
|
||||||
|
|
||||||
|
@ -65,7 +69,7 @@ more RAM.
|
||||||
运行,那么该容器就可以尝试使用更多的内存。
|
运行,那么该容器就可以尝试使用更多的内存。
|
||||||
|
|
||||||
<!--
|
<!--
|
||||||
If you set a `memory` limit of 4GiB for that Container, the kubelet (and
|
If you set a `memory` limit of 4GiB for that container, the kubelet (and
|
||||||
{{< glossary_tooltip text="container runtime" term_id="container-runtime" >}}) enforce the limit.
|
{{< glossary_tooltip text="container runtime" term_id="container-runtime" >}}) enforce the limit.
|
||||||
The runtime prevents the container from using more than the configured resource limit. For example:
|
The runtime prevents the container from using more than the configured resource limit. For example:
|
||||||
when a process in the container tries to consume more than the allowed amount of memory,
|
when a process in the container tries to consume more than the allowed amount of memory,
|
||||||
|
@ -88,15 +92,15 @@ runtimes can have different ways to implement the same restrictions.
|
||||||
|
|
||||||
{{< note >}}
|
{{< note >}}
|
||||||
<!--
|
<!--
|
||||||
If a Container specifies its own memory limit, but does not specify a memory request, Kubernetes
|
If a container specifies its own memory limit, but does not specify a memory request, Kubernetes
|
||||||
automatically assigns a memory request that matches the limit. Similarly, if a Container specifies its own
|
automatically assigns a memory request that matches the limit. Similarly, if a container specifies its own
|
||||||
CPU limit, but does not specify a CPU request, Kubernetes automatically assigns a CPU request that matches
|
CPU limit, but does not specify a CPU request, Kubernetes automatically assigns a CPU request that matches
|
||||||
the limit.
|
the limit.
|
||||||
-->
|
-->
|
||||||
如果某 Container 设置了自己的内存限制但未设置内存请求,Kubernetes
|
如果某容器设置了自己的内存限制但未设置内存请求,Kubernetes
|
||||||
自动为其设置与内存限制相匹配的请求值。类似的,如果某 Container 设置了
|
自动为其设置与内存限制相匹配的请求值。类似的,如果某 Container 设置了
|
||||||
CPU 限制值但未设置 CPU 请求值,则 Kubernetes 自动为其设置 CPU 请求
|
CPU 限制值但未设置 CPU 请求值,则 Kubernetes 自动为其设置 CPU
|
||||||
并使之与 CPU 限制值匹配。
|
请求并使之与 CPU 限制值匹配。
|
||||||
{{< /note >}}
|
{{< /note >}}
|
||||||
|
|
||||||
<!--
|
<!--
|
||||||
|
@ -105,7 +109,7 @@ CPU 限制值但未设置 CPU 请求值,则 Kubernetes 自动为其设置 CPU
|
||||||
*CPU* and *memory* are each a *resource type*. A resource type has a base unit.
|
*CPU* and *memory* are each a *resource type*. A resource type has a base unit.
|
||||||
CPU represents compute processing and is specified in units of [Kubernetes CPUs](#meaning-of-cpu).
|
CPU represents compute processing and is specified in units of [Kubernetes CPUs](#meaning-of-cpu).
|
||||||
Memory is specified in units of bytes.
|
Memory is specified in units of bytes.
|
||||||
If you're using Kubernetes v1.14 or newer, you can specify _huge page_ resources.
|
For Linux workloads, you can specify _huge page_ resources.
|
||||||
Huge pages are a Linux-specific feature where the node kernel allocates blocks of memory
|
Huge pages are a Linux-specific feature where the node kernel allocates blocks of memory
|
||||||
that are much larger than the default page size.
|
that are much larger than the default page size.
|
||||||
|
|
||||||
|
@ -115,10 +119,10 @@ total of 80 MiB), that allocation fails.
|
||||||
-->
|
-->
|
||||||
## 资源类型 {#resource-types}
|
## 资源类型 {#resource-types}
|
||||||
|
|
||||||
*CPU* 和*内存*都是*资源类型*。每种资源类型具有其基本单位。
|
*CPU* 和 *内存* 都是 *资源类型*。每种资源类型具有其基本单位。
|
||||||
CPU 表达的是计算处理能力,其单位是 [Kubernetes CPUs](#meaning-of-cpu)。
|
CPU 表达的是计算处理能力,其单位是 [Kubernetes CPUs](#meaning-of-cpu)。
|
||||||
内存的单位是字节。
|
内存的单位是字节。
|
||||||
如果你使用的是 Kubernetes v1.14 或更高版本,则可以指定巨页(Huge Page)资源。
|
对于 Linux 负载,则可以指定巨页(Huge Page)资源。
|
||||||
巨页是 Linux 特有的功能,节点内核在其中分配的内存块比默认页大小大得多。
|
巨页是 Linux 特有的功能,节点内核在其中分配的内存块比默认页大小大得多。
|
||||||
|
|
||||||
例如,在默认页面大小为 4KiB 的系统上,你可以指定约束 `hugepages-2Mi: 80Mi`。
|
例如,在默认页面大小为 4KiB 的系统上,你可以指定约束 `hugepages-2Mi: 80Mi`。
|
||||||
|
@ -141,16 +145,21 @@ consumed. They are distinct from
|
||||||
[Services](/docs/concepts/services-networking/service/) are objects that can be read and modified
|
[Services](/docs/concepts/services-networking/service/) are objects that can be read and modified
|
||||||
through the Kubernetes API server.
|
through the Kubernetes API server.
|
||||||
-->
|
-->
|
||||||
CPU 和内存统称为*计算资源*,或简称为*资源*。
|
CPU 和内存统称为“计算资源”,或简称为“资源”。
|
||||||
计算资源的数量是可测量的,可以被请求、被分配、被消耗。
|
计算资源的数量是可测量的,可以被请求、被分配、被消耗。
|
||||||
它们与 [API 资源](/zh/docs/concepts/overview/kubernetes-api/) 不同。
|
它们与 [API 资源](/zh/docs/concepts/overview/kubernetes-api/) 不同。
|
||||||
API 资源(如 Pod 和 [Service](/zh/docs/concepts/services-networking/service/))是可通过
|
API 资源(如 Pod 和 [Service](/zh/docs/concepts/services-networking/service/))是可通过
|
||||||
Kubernetes API 服务器读取和修改的对象。
|
Kubernetes API 服务器读取和修改的对象。
|
||||||
|
|
||||||
<!--
|
<!--
|
||||||
## Resource requests and limits of Pod and Container
|
## Resource requests and limits of Pod and container
|
||||||
|
|
||||||
Each Container of a Pod can specify one or more of the following:
|
For each container, you can specify resource limits and requests,
|
||||||
|
including the following:
|
||||||
|
-->
|
||||||
|
## Pod 和 容器的资源请求和约束
|
||||||
|
|
||||||
|
针对每个容器,你都可以指定其资源约束和请求,包括如下选项:
|
||||||
|
|
||||||
* `spec.containers[].resources.limits.cpu`
|
* `spec.containers[].resources.limits.cpu`
|
||||||
* `spec.containers[].resources.limits.memory`
|
* `spec.containers[].resources.limits.memory`
|
||||||
|
@ -159,93 +168,114 @@ Each Container of a Pod can specify one or more of the following:
|
||||||
* `spec.containers[].resources.requests.memory`
|
* `spec.containers[].resources.requests.memory`
|
||||||
* `spec.containers[].resources.requests.hugepages-<size>`
|
* `spec.containers[].resources.requests.hugepages-<size>`
|
||||||
|
|
||||||
Although requests and limits can only be specified on individual Containers, it
|
<!--
|
||||||
is convenient to talk about Pod resource requests and limits. A
|
Although you can only specify requests and limits for individual containers,
|
||||||
*Pod resource request/limit* for a particular resource type is the sum of the
|
it is also useful to think about the overall resource requests and limits for
|
||||||
resource requests/limits of that type for each Container in the Pod.
|
a Pod.
|
||||||
|
A
|
||||||
|
For a particular resource, a *Pod resource request/limit* is the sum of the
|
||||||
|
resource requests/limits of that type for each container in the Pod.
|
||||||
-->
|
-->
|
||||||
|
尽管你只能逐个容器地指定请求和限制值,考虑 Pod 的总体资源请求和约束也是有用的。
|
||||||
## Pod 和 容器的资源请求和约束
|
对特定资源而言,Pod 的资源请求/约束值是 Pod 中各容器对该类型资源的请求/约束值的总和。
|
||||||
|
|
||||||
Pod 中的每个容器都可以指定以下的一个或者多个值:
|
|
||||||
|
|
||||||
- `spec.containers[].resources.limits.cpu`
|
|
||||||
- `spec.containers[].resources.limits.memory`
|
|
||||||
- `spec.containers[].resources.limits.hugepages-<size>`
|
|
||||||
- `spec.containers[].resources.requests.cpu`
|
|
||||||
- `spec.containers[].resources.requests.memory`
|
|
||||||
- `spec.containers[].resources.requests.hugepages-<size>`
|
|
||||||
|
|
||||||
尽管请求和限制值只能在单个容器上指定,我们仍可方便地计算出 Pod 的资源请求和约束。
|
|
||||||
Pod 对特定资源类型的请求/约束值是 Pod 中各容器对该类型资源的请求/约束值的总和。
|
|
||||||
|
|
||||||
<!--
|
<!--
|
||||||
## Resource units in Kubernetes
|
## Resource units in Kubernetes
|
||||||
|
|
||||||
### Meaning of CPU
|
### CPU resource units {#meaning-of-cpu}
|
||||||
|
|
||||||
Limits and requests for CPU resources are measured in *cpu* units.
|
Limits and requests for CPU resources are measured in *cpu* units.
|
||||||
One cpu, in Kubernetes, is equivalent to **1 vCPU/Core** for cloud providers and **1 hyperthread** on bare-metal Intel processors.
|
In Kubernetes, 1 CPU unit is equivalent to **1 physical CPU core**,
|
||||||
|
or **1 virtual core**, depending on whether the node is a physical host
|
||||||
Fractional requests are allowed. A Container with
|
or a virtual machine running inside a physical machine.
|
||||||
`spec.containers[].resources.requests.cpu` of `0.5` is guaranteed half as much
|
|
||||||
CPU as one that asks for 1 CPU. The expression `0.1` is equivalent to the
|
|
||||||
expression `100m`, which can be read as "one hundred millicpu". Some people say
|
|
||||||
"one hundred millicores", and this is understood to mean the same thing. A
|
|
||||||
request with a decimal point, like `0.1`, is converted to `100m` by the API, and
|
|
||||||
precision finer than `1m` is not allowed. For this reason, the form `100m` might
|
|
||||||
be preferred.
|
|
||||||
CPU is always requested as an absolute quantity, never as a relative quantity;
|
|
||||||
0.1 is the same amount of CPU on a single-core, dual-core, or 48-core machine.
|
|
||||||
-->
|
-->
|
||||||
## Kubernetes 中的资源单位 {#resource-units-in-kubernetes}
|
## Kubernetes 中的资源单位 {#resource-units-in-kubernetes}
|
||||||
|
|
||||||
### CPU 的含义 {#meaning-of-cpu}
|
### CPU 资源单位 {#meaning-of-cpu}
|
||||||
|
|
||||||
CPU 资源的约束和请求以 *CPU* 为单位。
|
CPU 资源的约束和请求以 “cpu” 为单位。
|
||||||
|
在 Kubernetes 中,一个 CPU 等于**1 个物理 CPU 核** 或者 **一个虚拟核**,
|
||||||
Kubernetes 中的一个 CPU 等于云平台上的 **1 个 vCPU/核**和裸机 Intel
|
取决于节点是一台物理主机还是运行在某物理主机上的虚拟机。
|
||||||
处理器上的 **1 个超线程**。
|
|
||||||
|
|
||||||
你也可以表达带小数 CPU 的请求。`spec.containers[].resources.requests.cpu` 为 0.5
|
|
||||||
的 Container 肯定能够获得请求 1 CPU 的容器的一半 CPU 资源。表达式 `0.1` 等价于表达式 `100m`,
|
|
||||||
可以看作 “100 millicpu”。有些人说成是“一百毫 cpu”,其实说的是同样的事情。
|
|
||||||
具有小数点(如 `0.1`)的请求由 API 转换为 `100m`;最大精度是 `1m`。
|
|
||||||
因此,或许你应该优先考虑使用 `100m` 的形式。
|
|
||||||
|
|
||||||
CPU 总是按绝对数量来请求的,不可以使用相对数量;
|
|
||||||
0.1 的 CPU 在单核、双核、48 核的机器上的意义是一样的。
|
|
||||||
|
|
||||||
<!--
|
<!--
|
||||||
## Meaning of memory
|
Fractional requests are allowed. When you define a container with
|
||||||
|
`spec.containers[].resources.requests.cpu` set to `0.5`, you are requesting half
|
||||||
|
as much CPU time compared to if you asked for `1.0` CPU.
|
||||||
|
For CPU resource units, the [quantity](/docs/reference/kubernetes-api/common-definitions/quantity/) expression `0.1` is equivalent to the
|
||||||
|
expression `100m`, which can be read as "one hundred millicpu". Some people say
|
||||||
|
"one hundred millicores", and this is understood to mean the same thing.
|
||||||
|
-->
|
||||||
|
你也可以表达带小数 CPU 的请求。
|
||||||
|
当你定义一个容器,将其 `spec.containers[].resources.requests.cpu` 设置为 0.5 时,
|
||||||
|
你所请求的 CPU 是你请求 `1.0` CPU 时的一半。
|
||||||
|
对于 CPU 资源单位,[数量](/docs/reference/kubernetes-api/common-definitions/quantity/)
|
||||||
|
表达式 `0.1` 等价于表达式 `100m`,可以看作 “100 millicpu”。
|
||||||
|
有些人说成是“一百毫核”,其实说的是同样的事情。
|
||||||
|
|
||||||
|
<!--
|
||||||
|
CPU resource is always specified as an absolute amount of resource, never as a relative amount. For example,
|
||||||
|
`500m` CPU represents the roughly same amount of computing power whether that container
|
||||||
|
runs on a single-core, dual-core, or 48-core machine.
|
||||||
|
-->
|
||||||
|
CPU 资源总是设置为资源的绝对数量而非相对数量值。
|
||||||
|
例如,无论容器运行在单核、双核或者 48-核的机器上,`500m` CPU 表示的是大约相同的计算能力。
|
||||||
|
|
||||||
|
{{< note >}}
|
||||||
|
<!--
|
||||||
|
Kubernetes doesn't allow you to specify CPU resources with a precision finer than
|
||||||
|
`1m`. Because of this, it's useful to specify CPU units less than `1.0` or `1000m` using
|
||||||
|
the milliCPU form; for example, `5m` rather than `0.005`.
|
||||||
|
-->
|
||||||
|
Kubernetes 不允许设置精度小于 `1m` 的 CPU 资源。
|
||||||
|
因此,当 CPU 单位小于 `1` 或 `1000m` 时,使用毫核的形式是有用的;
|
||||||
|
例如 `5m` 而不是 `0.005`。
|
||||||
|
{{< /note >}}
|
||||||
|
|
||||||
|
<!--
|
||||||
|
### Memory resource units {#meaning-of-memory}
|
||||||
|
|
||||||
Limits and requests for `memory` are measured in bytes. You can express memory as
|
Limits and requests for `memory` are measured in bytes. You can express memory as
|
||||||
a plain integer or as a fixed-point number using one of these suffixes:
|
a plain integer or as a fixed-point number using one of these
|
||||||
|
[quantity](/docs/reference/kubernetes-api/common-definitions/quantity/) suffixes:
|
||||||
E, P, T, G, M, K. You can also use the power-of-two equivalents: Ei, Pi, Ti, Gi,
|
E, P, T, G, M, K. You can also use the power-of-two equivalents: Ei, Pi, Ti, Gi,
|
||||||
Mi, Ki. For example, the following represent roughly the same value:
|
Mi, Ki. For example, the following represent roughly the same value:
|
||||||
-->
|
-->
|
||||||
## 内存的含义 {#meaning-of-memory}
|
## 内存资源单位 {#meaning-of-memory}
|
||||||
|
|
||||||
内存的约束和请求以字节为单位。你可以使用以下后缀之一以一般整数或定点数字形式来表示内存:
|
`memory` 的约束和请求以字节为单位。
|
||||||
E、P、T、G、M、k。你也可以使用对应的 2 的幂数:Ei、Pi、Ti、Gi、Mi、Ki。
|
你可以使用普通的证书,或者带有以下
|
||||||
|
[数量](/docs/reference/kubernetes-api/common-definitions/quantity/)后缀
|
||||||
|
的定点数字来表示内存:E、P、T、G、M、k。
|
||||||
|
你也可以使用对应的 2 的幂数:Ei、Pi、Ti、Gi、Mi、Ki。
|
||||||
例如,以下表达式所代表的是大致相同的值:
|
例如,以下表达式所代表的是大致相同的值:
|
||||||
|
|
||||||
```
|
```
|
||||||
128974848、129e6、129M、123Mi
|
128974848、129e6、129M、128974848000m、123Mi
|
||||||
```
|
```
|
||||||
|
|
||||||
<!--
|
<!--
|
||||||
Here's an example.
|
Take care about case for suffixes. If you request `400m` of memory, this is a request
|
||||||
The following Pod has two Containers. Each Container has a request of 0.25 cpu
|
for 0.4 bytes. Someone who types that probably meant to ask for 400 mebibytes (`400Mi`)
|
||||||
and 64MiB (2<sup>26</sup> bytes) of memory. Each Container has a limit of 0.5
|
or 400 megabytes (`400M`).
|
||||||
cpu and 128MiB of memory. You can say the Pod has a request of 0.5 cpu and 128
|
|
||||||
MiB of memory, and a limit of 1 cpu and 256MiB of memory.
|
|
||||||
-->
|
-->
|
||||||
下面是个例子。
|
请注意后缀的大小写。如果你请求 `400m` 内存,实际上请求的是 0.4 字节。
|
||||||
|
如果有人这样设定资源请求或限制,可能他的实际想法是申请 400 兆字节(`400Mi`)
|
||||||
|
或者 400M 字节。
|
||||||
|
|
||||||
以下 Pod 有两个 Container。每个 Container 的请求为 0.25 cpu 和 64MiB(2<sup>26</sup> 字节)内存,
|
<!--
|
||||||
每个容器的资源约束为 0.5 cpu 和 128MiB 内存。
|
## Container resources example {#example-1}
|
||||||
你可以认为该 Pod 的资源请求为 0.5 cpu 和 128 MiB 内存,资源限制为 1 cpu 和 256MiB 内存。
|
|
||||||
|
The following Pod has two containers. Both containers are defined with a request for
|
||||||
|
0.25 CPU
|
||||||
|
and 64MiB (2<sup>26</sup> bytes) of memory. Each container has a limit of 0.5
|
||||||
|
CPU and 128MiB of memory. You can say the Pod has a request of 0.5 CPU and 128
|
||||||
|
MiB of memory, and a limit of 1 CPU and 256MiB of memory.
|
||||||
|
-->
|
||||||
|
## 容器资源示例 {#example-1}
|
||||||
|
|
||||||
|
以下 Pod 有两个容器。每个容器的请求为 0.25 CPU 和 64MiB(2<sup>26</sup> 字节)内存,
|
||||||
|
每个容器的资源约束为 0.5 CPU 和 128MiB 内存。
|
||||||
|
你可以认为该 Pod 的资源请求为 0.5 CPU 和 128 MiB 内存,资源限制为 1 CPU 和 256MiB 内存。
|
||||||
|
|
||||||
```yaml
|
```yaml
|
||||||
apiVersion: v1
|
apiVersion: v1
|
||||||
|
@ -256,9 +286,6 @@ spec:
|
||||||
containers:
|
containers:
|
||||||
- name: app
|
- name: app
|
||||||
image: images.my-company.example/app:v4
|
image: images.my-company.example/app:v4
|
||||||
env:
|
|
||||||
- name: MYSQL_ROOT_PASSWORD
|
|
||||||
value: "password"
|
|
||||||
resources:
|
resources:
|
||||||
requests:
|
requests:
|
||||||
memory: "64Mi"
|
memory: "64Mi"
|
||||||
|
@ -284,7 +311,8 @@ When you create a Pod, the Kubernetes scheduler selects a node for the Pod to
|
||||||
run on. Each node has a maximum capacity for each of the resource types: the
|
run on. Each node has a maximum capacity for each of the resource types: the
|
||||||
amount of CPU and memory it can provide for Pods. The scheduler ensures that,
|
amount of CPU and memory it can provide for Pods. The scheduler ensures that,
|
||||||
for each resource type, the sum of the resource requests of the scheduled
|
for each resource type, the sum of the resource requests of the scheduled
|
||||||
Containers is less than the capacity of the node. Note that although actual memory
|
containers is less than the capacity of the node.
|
||||||
|
Note that although actual memory
|
||||||
or CPU resource usage on nodes is very low, the scheduler still refuses to place
|
or CPU resource usage on nodes is very low, the scheduler still refuses to place
|
||||||
a Pod on a node if the capacity check fails. This protects against a resource
|
a Pod on a node if the capacity check fails. This protects against a resource
|
||||||
shortage on a node when resource usage later increases, for example, during a
|
shortage on a node when resource usage later increases, for example, during a
|
||||||
|
@ -300,84 +328,88 @@ daily peak in request rate.
|
||||||
当稍后节点上资源用量增加,例如到达请求率的每日峰值区间时,节点上也不会出现资源不足的问题。
|
当稍后节点上资源用量增加,例如到达请求率的每日峰值区间时,节点上也不会出现资源不足的问题。
|
||||||
|
|
||||||
<!--
|
<!--
|
||||||
## How Pods with resource limits are run
|
## How Kubernetes applies resource requests and limits {#how-pods-with-resource-limits-are-run}
|
||||||
|
|
||||||
When the kubelet starts a Container of a Pod, it passes the CPU and memory limits
|
When the kubelet starts a container of a Pod, the kubelet passes that container's
|
||||||
to the container runtime.
|
requests and limits for memory and CPU to the container runtime.
|
||||||
|
|
||||||
When using Docker:
|
On Linux, the container runtime typically configures
|
||||||
|
kernel {{< glossary_tooltip text="cgroups" term_id="cgroup" >}} that apply and enforce the
|
||||||
|
limits you defined.
|
||||||
-->
|
-->
|
||||||
## 带资源约束的 Pod 如何运行
|
## Kubernetes 应用资源请求与约束的方式 {#how-pods-with-resource-limits-are-run}
|
||||||
|
|
||||||
当 kubelet 启动 Pod 中的 Container 时,它会将 CPU 和内存约束信息传递给容器运行时。
|
当 kubelet 启动 Pod 中的容器时,它会将容器的 CPU 和内存请求与约束信息传递给容器运行时。
|
||||||
|
|
||||||
当使用 Docker 时:
|
在 Linux 系统上,容器运行时通常会配置内核
|
||||||
|
{{< glossary_tooltip text="CGroups" term_id="cgroup" >}},负责应用并实施所定义的请求。
|
||||||
|
|
||||||
<!--
|
<!--
|
||||||
- The `spec.containers[].resources.requests.cpu` is converted to its core value,
|
- The CPU limit defines a hard ceiling on how much CPU time that the container can use.
|
||||||
which is potentially fractional, and multiplied by 1024. The greater of this number
|
During each scheduling interval (time slice), the Linux kernel checks to see if this
|
||||||
or 2 is used as the value of the
|
limit is exceeded; if so, the kernel waits before allowing that cgroup to resume execution.
|
||||||
[`--cpu-shares`](https://docs.docker.com/engine/reference/run/#cpu-share-constraint)
|
|
||||||
flag in the `docker run` command.
|
|
||||||
|
|
||||||
- The `spec.containers[].resources.limits.cpu` is converted to its millicore value and
|
|
||||||
multiplied by 100. The resulting value is the total amount of CPU time in microseconds
|
|
||||||
that a container can use every 100ms. A container cannot use more than its share of
|
|
||||||
CPU time during this interval.
|
|
||||||
|
|
||||||
The default quota period is 100ms. The minimum resolution of CPU quota is 1ms.
|
|
||||||
|
|
||||||
- The `spec.containers[].resources.limits.memory` is converted to an integer, and
|
|
||||||
used as the value of the
|
|
||||||
[`--memory`](https://docs.docker.com/engine/reference/run/#/user-memory-constraints)
|
|
||||||
flag in the `docker run` command.
|
|
||||||
-->
|
-->
|
||||||
|
- CPU 约束值定义的是容器可使用的 CPU 时间的硬性上限。
|
||||||
- `spec.containers[].resources.requests.cpu` 先被转换为可能是小数的基础值,再乘以 1024。
|
在每个调度周期(时间片)期间,Linux 内核检查是否已经超出该约束值;
|
||||||
这个数值和 2 的较大者用作 `docker run` 命令中的
|
内核会在允许该 cgroup 恢复执行之前会等待。
|
||||||
[`--cpu-shares`](https://docs.docker.com/engine/reference/run/#/cpu-share-constraint)
|
<!--
|
||||||
标志的值。
|
- The CPU request typically defines a weighting. If several different containers (cgroups)
|
||||||
- `spec.containers[].resources.limits.cpu` 先被转换为 millicore 值,再乘以 100。
|
want to run on a contended system, workloads with larger CPU requests are allocated more
|
||||||
其结果就是每 100 毫秒内容器可以使用的 CPU 时间总量,单位为微秒。在此期间(100ms),
|
CPU time than workloads with small requests.
|
||||||
容器所使用的 CPU 时间不可以超过它被分配的时间。
|
-->
|
||||||
|
- CPU 请求值定义的是一个权重值。如果若干不同的容器(CGroups)需要在一个共享的系统上竞争运行,
|
||||||
{{< note >}}
|
CPU 请求值大的负载会获得比请求值小的负载更多的 CPU 时间。
|
||||||
默认的配额(Quota)周期为 100 毫秒。CPU 配额的最小精度为 1 毫秒。
|
<!--
|
||||||
{{</ note >}}
|
- The memory request is mainly used during (Kubernetes) Pod scheduling. On a node that uses
|
||||||
|
cgroups v2, the container runtime might use the memory request as a hint to set
|
||||||
- `spec.containers[].resources.limits.memory` 被转换为整数值,作为 `docker run` 命令中的
|
`memory.min` and `memory.low`.
|
||||||
[`--memory`](https://docs.docker.com/engine/reference/run/#/user-memory-constraints)
|
-->
|
||||||
参数值。
|
- 内存请求值主要用于(Kubernetes)Pod 调度期间。在一个启用了 CGroup v2 的节点上,
|
||||||
|
容器运行时可能会使用内存请求值作为设置 `memory.min` 和 `memory.low` 的提示值。
|
||||||
|
<!--
|
||||||
|
- The memory limit defines a memory limit for that cgroup. If the container tries to
|
||||||
|
allocate more memory than this limit, the Linux kernel out-of-memory subsystem activates
|
||||||
|
and, typically, intervenes by stopping one of the processes in the container that tried
|
||||||
|
to allocate memory. If that process is the container's PID 1, and the container is marked
|
||||||
|
as restartable, Kubernetes restarts the container.
|
||||||
|
-->
|
||||||
|
- 内存约束值定义的是 CGroup 的内存约束。如果容器尝试分配的内存量超出约束值,
|
||||||
|
则 Linux 内核的内存不足处理子系统会被激活,并停止尝试分配内存的容器中的某个进程。
|
||||||
|
如果该进程在容器中 PID 为 1,而容器被标记为可重新启动,则 Kubernetes
|
||||||
|
会重新启动该容器。
|
||||||
|
<!--
|
||||||
|
- The memory limit for the Pod or container can also apply to pages in memory backed
|
||||||
|
volumes, such as an `emptyDir`. The kubelet tracks `tmpfs` emptyDir volumes as container
|
||||||
|
memory use, rather than as local ephemeral storage.
|
||||||
|
-->
|
||||||
|
- Pod 或容器的内存约束值也适用于通过内存供应的卷,例如 `emptyDir` 卷。
|
||||||
|
kubelet 会跟踪 `tmpfs` 形式的 emptyDir 卷用量,将其作为容器的内存用量,
|
||||||
|
而不是临时存储用量。
|
||||||
|
|
||||||
<!--
|
<!--
|
||||||
If a Container exceeds its memory limit, it might be terminated. If it is
|
If a container exceeds its memory request, and the node that it runs on becomes short of
|
||||||
restartable, the kubelet will restart it, as with any other type of runtime
|
memory overall, it is likely that the Pod the container belongs to will be
|
||||||
failure.
|
{{< glossary_tooltip text="evicted" term_id="eviction" >}}.
|
||||||
|
|
||||||
If a Container exceeds its memory request, it is likely that its Pod will
|
A container might or might not be allowed to exceed its CPU limit for extended periods of time.
|
||||||
be evicted whenever the node runs out of memory.
|
However, container runtimes don't terminate Pods or containers for excessive CPU usage.
|
||||||
|
|
||||||
A Container might or might not be allowed to exceed its CPU limit for extended
|
To determine whether a container cannot be scheduled or is being killed due to resource limits,
|
||||||
periods of time. However, it will not be killed for excessive CPU usage.
|
see the [Troubleshooting](#troubleshooting) section.
|
||||||
|
|
||||||
To determine whether a Container cannot be scheduled or is being killed due to
|
|
||||||
resource limits, see the
|
|
||||||
[Troubleshooting](#troubleshooting) section.
|
|
||||||
-->
|
-->
|
||||||
如果 Container 超过其内存限制,则可能会被终止。如果容器可重新启动,则与所有其他类型的
|
如果某容器内存用量超过其内存请求值并且所在节点内存不足时,容器所处的 Pod
|
||||||
运行时失效一样,kubelet 将重新启动容器。
|
可能被{{< glossary_tooltip text="逐出" term_id="eviction" >}}.
|
||||||
|
|
||||||
如果一个 Container 内存用量超过其内存请求值,那么当节点内存不足时,容器所处的 Pod 可能被逐出。
|
每个容器可能被允许也可能不被允许使用超过其 CPU 约束的处理时间。
|
||||||
|
但是,容器运行时不会由于 CPU 使用率过高而杀死 Pod 或容器。
|
||||||
|
|
||||||
每个 Container 可能被允许也可能不被允许使用超过其 CPU 约束的处理时间。
|
要确定某容器是否会由于资源约束而无法调度或被杀死,请参阅[疑难解答](#troubleshooting)节。
|
||||||
但是,容器不会由于 CPU 使用率过高而被杀死。
|
|
||||||
|
|
||||||
要确定 Container 是否会由于资源约束而无法调度或被杀死,请参阅[疑难解答](#troubleshooting) 部分。
|
|
||||||
|
|
||||||
<!--
|
<!--
|
||||||
## Monitoring compute & memory resource usage
|
## Monitoring compute & memory resource usage
|
||||||
|
|
||||||
The resource usage of a Pod is reported as part of the Pod status.
|
The kubelet reports the resource usage of a Pod as part of the Pod
|
||||||
|
[`status`](/docs/concepts/overview/working-with-objects/kubernetes-objects/#object-spec-and-status).
|
||||||
|
|
||||||
If optional [tools for monitoring](/docs/tasks/debug-application-cluster/resource-usage-monitoring/)
|
If optional [tools for monitoring](/docs/tasks/debug-application-cluster/resource-usage-monitoring/)
|
||||||
are available in your cluster, then Pod resource usage can be retrieved either
|
are available in your cluster, then Pod resource usage can be retrieved either
|
||||||
|
@ -386,12 +418,12 @@ directly or from your monitoring tools.
|
||||||
-->
|
-->
|
||||||
## 监控计算和内存资源用量
|
## 监控计算和内存资源用量
|
||||||
|
|
||||||
Pod 的资源使用情况是作为 Pod 状态的一部分来报告的。
|
kubelet 会将 Pod 的资源使用情况作为 Pod
|
||||||
|
[`status`](/zh/docs/concepts/overview/working-with-objects/kubernetes-objects/#object-spec-and-status)
|
||||||
|
的一部分来报告的。
|
||||||
|
|
||||||
如果为集群配置了可选的
|
如果为集群配置了可选的[监控工具](/zh/docs/tasks/debug-application-cluster/resource-usage-monitoring/),
|
||||||
[监控工具](/zh/docs/tasks/debug-application-cluster/resource-usage-monitoring/),
|
则可以直接从[指标 API](/zh/docs/tasks/debug-application-cluster/resource-metrics-pipeline/#the-metrics-api)
|
||||||
则可以直接从
|
|
||||||
[指标 API](/zh/docs/tasks/debug-application-cluster/resource-metrics-pipeline/#the-metrics-api)
|
|
||||||
或者监控工具获得 Pod 的资源使用情况。
|
或者监控工具获得 Pod 的资源使用情况。
|
||||||
|
|
||||||
<!--
|
<!--
|
||||||
|
@ -409,6 +441,7 @@ mount [`emptyDir`](https://kubernetes.io/docs/concepts/storage/volumes/#emptydir
|
||||||
## 本地临时存储 {#local-ephemeral-storage}
|
## 本地临时存储 {#local-ephemeral-storage}
|
||||||
|
|
||||||
<!-- feature gate LocalStorageCapacityIsolation -->
|
<!-- feature gate LocalStorageCapacityIsolation -->
|
||||||
|
|
||||||
{{< feature-state for_k8s_version="v1.10" state="beta" >}}
|
{{< feature-state for_k8s_version="v1.10" state="beta" >}}
|
||||||
|
|
||||||
节点通常还可以具有本地的临时性存储,由本地挂接的可写入设备或者有时也用 RAM
|
节点通常还可以具有本地的临时性存储,由本地挂接的可写入设备或者有时也用 RAM
|
||||||
|
@ -425,7 +458,7 @@ The kubelet also uses this kind of storage to hold
|
||||||
[node-level container logs](/docs/concepts/cluster-administration/logging/#logging-at-the-node-level),
|
[node-level container logs](/docs/concepts/cluster-administration/logging/#logging-at-the-node-level),
|
||||||
container images, and the writable layers of running containers.
|
container images, and the writable layers of running containers.
|
||||||
|
|
||||||
If a node fails, the data in its ephemeral storage can be lost.
|
If a node fails, the data in its ephemeral storage can be lost.
|
||||||
Your applications cannot expect any performance SLAs (disk IOPS for example)
|
Your applications cannot expect any performance SLAs (disk IOPS for example)
|
||||||
from local ephemeral storage.
|
from local ephemeral storage.
|
||||||
|
|
||||||
|
@ -569,43 +602,44 @@ kubelet 会将 `tmpfs` emptyDir 卷的用量当作容器内存用量,而不是
|
||||||
<!--
|
<!--
|
||||||
### Setting requests and limits for local ephemeral storage
|
### Setting requests and limits for local ephemeral storage
|
||||||
|
|
||||||
You can use _ephemeral-storage_ for managing local ephemeral storage. Each Container of a Pod can specify one or more of the following:
|
You can use `ephemeral-storage` for managing local ephemeral storage. Each
|
||||||
|
container of a Pod can specify either or both of the following:
|
||||||
|
|
||||||
* `spec.containers[].resources.limits.ephemeral-storage`
|
* `spec.containers[].resources.limits.ephemeral-storage`
|
||||||
* `spec.containers[].resources.requests.ephemeral-storage`
|
* `spec.containers[].resources.requests.ephemeral-storage`
|
||||||
|
|
||||||
Limits and requests for `ephemeral-storage` are measured in bytes. You can express storage as
|
Limits and requests for `ephemeral-storage` are measured in quantities.
|
||||||
a plain integer or as a fixed-point number using one of these suffixes:
|
You can express storage as a plain integer or as a fixed-point number using one of these suffixes:
|
||||||
E, P, T, G, M, K. You can also use the power-of-two equivalents: Ei, Pi, Ti, Gi,
|
E, P, T, G, M, K. You can also use the power-of-two equivalents: Ei, Pi, Ti, Gi,
|
||||||
Mi, Ki. For example, the following represent roughly the same value:
|
Mi, Ki. For example, the following represent roughly the same value:
|
||||||
|
|
||||||
```shell
|
|
||||||
128974848, 129e6, 129M, 123Mi
|
|
||||||
```
|
|
||||||
-->
|
-->
|
||||||
### 为本地临时性存储设置请求和约束值
|
### 为本地临时性存储设置请求和约束值
|
||||||
|
|
||||||
你可以使用 _ephemeral-storage_ 来管理本地临时性存储。
|
你可以使用 `ephemeral-storage` 来管理本地临时性存储。
|
||||||
Pod 中的每个 Container 可以设置以下属性:
|
Pod 中的每个容器可以设置以下属性:
|
||||||
|
|
||||||
* `spec.containers[].resources.limits.ephemeral-storage`
|
* `spec.containers[].resources.limits.ephemeral-storage`
|
||||||
* `spec.containers[].resources.requests.ephemeral-storage`
|
* `spec.containers[].resources.requests.ephemeral-storage`
|
||||||
|
|
||||||
`ephemeral-storage` 的请求和约束值是按字节计量的。你可以使用一般整数或者定点数字
|
`ephemeral-storage` 的请求和约束值是按量纲计量的。你可以使用一般整数或者定点数字
|
||||||
加上下面的后缀来表达存储量:E、P、T、G、M、K。
|
加上下面的后缀来表达存储量:E、P、T、G、M、K。
|
||||||
你也可以使用对应的 2 的幂级数来表达:Ei、Pi、Ti、Gi、Mi、Ki。
|
你也可以使用对应的 2 的幂级数来表达:Ei、Pi、Ti、Gi、Mi、Ki。
|
||||||
例如,下面的表达式所表达的大致是同一个值:
|
例如,下面的表达式所表达的大致是同一个值:
|
||||||
|
|
||||||
```
|
- `128974848`
|
||||||
128974848, 129e6, 129M, 123Mi
|
- `129e6`
|
||||||
```
|
- `129M`
|
||||||
|
- `123Mi`
|
||||||
|
|
||||||
<!--
|
<!--
|
||||||
In the following example, the Pod has two Containers. Each Container has a request of 2GiB of local ephemeral storage. Each Container has a limit of 4GiB of local ephemeral storage. Therefore, the Pod has a request of 4GiB of local ephemeral storage, and a limit of 8GiB of local ephemeral storage.
|
In the following example, the Pod has two containers. Each container has a request of
|
||||||
|
2GiB of local ephemeral storage. Each container has a limit of 4GiB of local ephemeral
|
||||||
|
storage. Therefore, the Pod has a request of 4GiB of local ephemeral storage, and a
|
||||||
|
limit of 8GiB of local ephemeral storage.
|
||||||
-->
|
-->
|
||||||
|
|
||||||
在下面的例子中,Pod 包含两个 Container。每个 Container 请求 2 GiB 大小的本地临时性存储。
|
在下面的例子中,Pod 包含两个容器。每个容器请求 2 GiB 大小的本地临时性存储。
|
||||||
每个 Container 都设置了 4 GiB 作为其本地临时性存储的约束值。
|
每个容器都设置了 4 GiB 作为其本地临时性存储的约束值。
|
||||||
因此,整个 Pod 的本地临时性存储请求是 4 GiB,且其本地临时性存储的约束为 8 GiB。
|
因此,整个 Pod 的本地临时性存储请求是 4 GiB,且其本地临时性存储的约束为 8 GiB。
|
||||||
|
|
||||||
```yaml
|
```yaml
|
||||||
|
@ -644,9 +678,11 @@ spec:
|
||||||
### How Pods with ephemeral-storage requests are scheduled
|
### How Pods with ephemeral-storage requests are scheduled
|
||||||
|
|
||||||
When you create a Pod, the Kubernetes scheduler selects a node for the Pod to
|
When you create a Pod, the Kubernetes scheduler selects a node for the Pod to
|
||||||
run on. Each node has a maximum amount of local ephemeral storage it can provide for Pods. For more information, see [Node Allocatable](/docs/tasks/administer-cluster/reserve-compute-resources/#node-allocatable).
|
run on. Each node has a maximum amount of local ephemeral storage it can provide for Pods.
|
||||||
|
For more information, see
|
||||||
|
[Node Allocatable](/docs/tasks/administer-cluster/reserve-compute-resources/#node-allocatable).
|
||||||
|
|
||||||
The scheduler ensures that the sum of the resource requests of the scheduled Containers is less than the capacity of the node.
|
The scheduler ensures that the sum of the resource requests of the scheduled containers is less than the capacity of the node.
|
||||||
-->
|
-->
|
||||||
|
|
||||||
### 带临时性存储的 Pods 的调度行为
|
### 带临时性存储的 Pods 的调度行为
|
||||||
|
@ -657,7 +693,7 @@ The scheduler ensures that the sum of the resource requests of the scheduled Con
|
||||||
[节点可分配资源](/zh/docs/tasks/administer-cluster/reserve-compute-resources/#node-allocatable)
|
[节点可分配资源](/zh/docs/tasks/administer-cluster/reserve-compute-resources/#node-allocatable)
|
||||||
节。
|
节。
|
||||||
|
|
||||||
调度器会确保所调度的 Containers 的资源请求总和不会超出节点的资源容量。
|
调度器会确保所调度的容器的资源请求总和不会超出节点的资源容量。
|
||||||
|
|
||||||
<!--
|
<!--
|
||||||
### Ephemeral storage consumption management {#resource-emphemeralstorage-consumption}
|
### Ephemeral storage consumption management {#resource-emphemeralstorage-consumption}
|
||||||
|
@ -672,7 +708,7 @@ kubelet measures storage use in:
|
||||||
If a Pod is using more ephemeral storage than you allow it to, the kubelet
|
If a Pod is using more ephemeral storage than you allow it to, the kubelet
|
||||||
sets an eviction signal that triggers Pod eviction.
|
sets an eviction signal that triggers Pod eviction.
|
||||||
|
|
||||||
For container-level isolation, if a Container's writable layer and log
|
For container-level isolation, if a container's writable layer and log
|
||||||
usage exceeds its storage limit, the kubelet marks the Pod for eviction.
|
usage exceeds its storage limit, the kubelet marks the Pod for eviction.
|
||||||
|
|
||||||
For pod-level isolation the kubelet works out an overall Pod storage limit by
|
For pod-level isolation the kubelet works out an overall Pod storage limit by
|
||||||
|
@ -797,7 +833,7 @@ Kubernetes does not use them.
|
||||||
Quotas are faster and more accurate than directory scanning. When a
|
Quotas are faster and more accurate than directory scanning. When a
|
||||||
directory is assigned to a project, all files created under a
|
directory is assigned to a project, all files created under a
|
||||||
directory are created in that project, and the kernel merely has to
|
directory are created in that project, and the kernel merely has to
|
||||||
keep track of how many blocks are in use by files in that project.
|
keep track of how many blocks are in use by files in that project.
|
||||||
If a file is created and deleted, but has an open file descriptor,
|
If a file is created and deleted, but has an open file descriptor,
|
||||||
it continues to consume space. Quota tracking records that space accurately
|
it continues to consume space. Quota tracking records that space accurately
|
||||||
whereas directory scans overlook the storage used by deleted files.
|
whereas directory scans overlook the storage used by deleted files.
|
||||||
|
@ -885,6 +921,7 @@ Extended Resource in Pods.
|
||||||
Node-level extended resources are tied to nodes.
|
Node-level extended resources are tied to nodes.
|
||||||
|
|
||||||
##### Device plugin managed resources
|
##### Device plugin managed resources
|
||||||
|
|
||||||
See [Device
|
See [Device
|
||||||
Plugin](/docs/concepts/extend-kubernetes/compute-storage-net/device-plugins/)
|
Plugin](/docs/concepts/extend-kubernetes/compute-storage-net/device-plugins/)
|
||||||
for how to advertise device plugin managed resources on each node.
|
for how to advertise device plugin managed resources on each node.
|
||||||
|
@ -902,15 +939,13 @@ for how to advertise device plugin managed resources on each node.
|
||||||
|
|
||||||
<!--
|
<!--
|
||||||
##### Other resources
|
##### Other resources
|
||||||
|
|
||||||
To advertise a new node-level extended resource, the cluster operator can
|
To advertise a new node-level extended resource, the cluster operator can
|
||||||
submit a `PATCH` HTTP request to the API server to specify the available
|
submit a `PATCH` HTTP request to the API server to specify the available
|
||||||
quantity in the `status.capacity` for a node in the cluster. After this
|
quantity in the `status.capacity` for a node in the cluster. After this
|
||||||
operation, the node's `status.capacity` will include a new resource. The
|
operation, the node's `status.capacity` will include a new resource. The
|
||||||
`status.allocatable` field is updated automatically with the new resource
|
`status.allocatable` field is updated automatically with the new resource
|
||||||
asynchronously by the kubelet. Note that because the scheduler uses the node
|
asynchronously by the kubelet.
|
||||||
`status.allocatable` value when evaluating Pod fitness, there may be a short
|
|
||||||
delay between patching the node capacity with a new resource and the first Pod
|
|
||||||
that requests the resource to be scheduled on that node.
|
|
||||||
-->
|
-->
|
||||||
##### 其他资源 {#other-resources}
|
##### 其他资源 {#other-resources}
|
||||||
|
|
||||||
|
@ -918,7 +953,16 @@ that requests the resource to be scheduled on that node.
|
||||||
以在集群中节点的 `status.capacity` 中为其配置可用数量。
|
以在集群中节点的 `status.capacity` 中为其配置可用数量。
|
||||||
完成此操作后,节点的 `status.capacity` 字段中将包含新资源。
|
完成此操作后,节点的 `status.capacity` 字段中将包含新资源。
|
||||||
kubelet 会异步地对 `status.allocatable` 字段执行自动更新操作,使之包含新资源。
|
kubelet 会异步地对 `status.allocatable` 字段执行自动更新操作,使之包含新资源。
|
||||||
请注意,由于调度器在评估 Pod 是否适合在某节点上执行时会使用节点的 `status.allocatable` 值,
|
|
||||||
|
<!--
|
||||||
|
Because the scheduler uses the node `status.allocatable` value when
|
||||||
|
evaluating Pod fitness, the shceduler only takes account of the new value after
|
||||||
|
the asynchronous update. There may be a short delay between patching the
|
||||||
|
node capacity with a new resource and the time when the first Pod that requests
|
||||||
|
the resource to be scheduled on that node.
|
||||||
|
-->
|
||||||
|
由于调度器在评估 Pod 是否适合在某节点上执行时会使用节点的 `status.allocatable` 值,
|
||||||
|
调度器只会考虑异步更新之后的新值。
|
||||||
在更新节点容量使之包含新资源之后和请求该资源的第一个 Pod 被调度到该节点之间,
|
在更新节点容量使之包含新资源之后和请求该资源的第一个 Pod 被调度到该节点之间,
|
||||||
可能会有短暂的延迟。
|
可能会有短暂的延迟。
|
||||||
|
|
||||||
|
@ -929,7 +973,6 @@ Here is an example showing how to use `curl` to form an HTTP request that
|
||||||
advertises five "example.com/foo" resources on node `k8s-node-1` whose master
|
advertises five "example.com/foo" resources on node `k8s-node-1` whose master
|
||||||
is `k8s-master`.
|
is `k8s-master`.
|
||||||
-->
|
-->
|
||||||
|
|
||||||
**示例:**
|
**示例:**
|
||||||
|
|
||||||
这是一个示例,显示了如何使用 `curl` 构造 HTTP 请求,公告主节点为 `k8s-master`
|
这是一个示例,显示了如何使用 `curl` 构造 HTTP 请求,公告主节点为 `k8s-master`
|
||||||
|
@ -963,14 +1006,14 @@ Cluster-level extended resources are not tied to nodes. They are usually managed
|
||||||
by scheduler extenders, which handle the resource consumption and resource quota.
|
by scheduler extenders, which handle the resource consumption and resource quota.
|
||||||
|
|
||||||
You can specify the extended resources that are handled by scheduler extenders
|
You can specify the extended resources that are handled by scheduler extenders
|
||||||
in [scheduler policy configuration](/docs/reference/config-api/kube-scheduler-policy-config.v1/)
|
in [scheduler policy configuration](/docs/reference/config-api/kube-scheduler-config.v1beta3/)
|
||||||
-->
|
-->
|
||||||
#### 集群层面的扩展资源 {#cluster-level-extended-resources}
|
#### 集群层面的扩展资源 {#cluster-level-extended-resources}
|
||||||
|
|
||||||
集群层面的扩展资源并不绑定到具体节点。
|
集群层面的扩展资源并不绑定到具体节点。
|
||||||
它们通常由调度器扩展程序(Scheduler Extenders)管理,这些程序处理资源消耗和资源配额。
|
它们通常由调度器扩展程序(Scheduler Extenders)管理,这些程序处理资源消耗和资源配额。
|
||||||
|
|
||||||
你可以在[调度器策略配置](/zh/docs/reference/config-api/kube-scheduler-policy-config.v1/)
|
你可以在[调度器策略配置](/zh/docs/reference/config-api/kube-scheduler-config.v1beta3/)
|
||||||
中指定由调度器扩展程序处理的扩展资源。
|
中指定由调度器扩展程序处理的扩展资源。
|
||||||
|
|
||||||
<!--
|
<!--
|
||||||
|
@ -981,9 +1024,9 @@ cluster-level extended resource "example.com/foo" is handled by the scheduler
|
||||||
extender.
|
extender.
|
||||||
|
|
||||||
- The scheduler sends a Pod to the scheduler extender only if the Pod requests
|
- The scheduler sends a Pod to the scheduler extender only if the Pod requests
|
||||||
"example.com/foo".
|
"example.com/foo".
|
||||||
- The `ignoredByScheduler` field specifies that the scheduler does not check
|
- The `ignoredByScheduler` field specifies that the scheduler does not check
|
||||||
the "example.com/foo" resource in its `PodFitsResources` predicate.
|
the "example.com/foo" resource in its `PodFitsResources` predicate.
|
||||||
-->
|
-->
|
||||||
**示例:**
|
**示例:**
|
||||||
|
|
||||||
|
@ -1091,9 +1134,10 @@ spec:
|
||||||
<!--
|
<!--
|
||||||
## PID limiting
|
## PID limiting
|
||||||
|
|
||||||
Process ID (PID) limits allow for the configuration of a kubelet to limit the number of PIDs that a given Pod can consume. See [Pid Limiting](/docs/concepts/policy/pid-limiting/) for information.
|
Process ID (PID) limits allow for the configuration of a kubelet
|
||||||
|
to limit the number of PIDs that a given Pod can consume. See
|
||||||
|
[PID Limiting](/docs/concepts/policy/pid-limiting/) for information.
|
||||||
-->
|
-->
|
||||||
|
|
||||||
## PID 限制 {#pid-limiting}
|
## PID 限制 {#pid-limiting}
|
||||||
|
|
||||||
进程 ID(PID)限制允许对 kubelet 进行配置,以限制给定 Pod 可以消耗的 PID 数量。
|
进程 ID(PID)限制允许对 kubelet 进行配置,以限制给定 Pod 可以消耗的 PID 数量。
|
||||||
|
@ -1102,43 +1146,52 @@ Process ID (PID) limits allow for the configuration of a kubelet to limit the nu
|
||||||
<!--
|
<!--
|
||||||
## Troubleshooting
|
## Troubleshooting
|
||||||
|
|
||||||
### My Pods are pending with event message failedScheduling
|
### My Pods are pending with event message `FailedScheduling`
|
||||||
|
|
||||||
If the scheduler cannot find any node where a Pod can fit, the Pod remains
|
If the scheduler cannot find any node where a Pod can fit, the Pod remains
|
||||||
unscheduled until a place can be found. An event is produced each time the
|
unscheduled until a place can be found. An
|
||||||
scheduler fails to find a place for the Pod, like this:
|
[Event](/docs/reference/kubernetes-api/cluster-resources/event-v1/) is produced
|
||||||
|
each time the scheduler fails to find a place for the Pod, You can use `kubectl`
|
||||||
|
to view the events for a Pod; for example:
|
||||||
-->
|
-->
|
||||||
## 疑难解答
|
## 疑难解答
|
||||||
|
|
||||||
### 我的 Pod 处于悬决状态且事件信息显示 failedScheduling
|
### 我的 Pod 处于悬决状态且事件信息显示 `FailedScheduling`
|
||||||
|
|
||||||
如果调度器找不到该 Pod 可以匹配的任何节点,则该 Pod 将保持未被调度状态,
|
如果调度器找不到该 Pod 可以匹配的任何节点,则该 Pod 将保持未被调度状态,
|
||||||
直到找到一个可以被调度到的位置。每当调度器找不到 Pod 可以调度的地方时,
|
直到找到一个可以被调度到的位置。每当调度器找不到 Pod 可以调度的地方时,
|
||||||
会产生一个事件,如下所示:
|
会产生一个 [Event](/docs/reference/kubernetes-api/cluster-resources/event-v1/)。
|
||||||
|
你可以使用 `kubectl` 来查看 Pod 的事件;例如:
|
||||||
|
|
||||||
```shell
|
```shell
|
||||||
kubectl describe pod frontend | grep -A 3 Events
|
kubectl describe pod frontend | grep -A 9999999999 Events
|
||||||
```
|
```
|
||||||
|
|
||||||
```
|
```
|
||||||
Events:
|
Events:
|
||||||
FirstSeen LastSeen Count From Subobject PathReason Message
|
Type Reason Age From Message
|
||||||
36s 5s 6 {scheduler} FailedScheduling Failed for reason PodExceedsFreeCPU and possibly others
|
---- ------ ---- ---- -------
|
||||||
|
Warning FailedScheduling 23s default-scheduler 0/42 nodes available: insufficient cpu
|
||||||
```
|
```
|
||||||
|
|
||||||
<!--
|
<!--
|
||||||
In the preceding example, the Pod named "frontend" fails to be scheduled due to
|
In the preceding example, the Pod named "frontend" fails to be scheduled due to
|
||||||
insufficient CPU resource on the node. Similar error messages can also suggest
|
insufficient CPU resource on any node. Similar error messages can also suggest
|
||||||
failure due to insufficient memory (PodExceedsFreeMemory). In general, if a Pod
|
failure due to insufficient memory (PodExceedsFreeMemory). In general, if a Pod
|
||||||
is pending with a message of this type, there are several things to try:
|
is pending with a message of this type, there are several things to try:
|
||||||
|
|
||||||
- Add more nodes to the cluster.
|
- Add more nodes to the cluster.
|
||||||
- Terminate unneeded Pods to make room for pending Pods.
|
- Terminate unneeded Pods to make room for pending Pods.
|
||||||
- Check that the Pod is not larger than all the nodes. For example, if all the
|
- Check that the Pod is not larger than all the nodes. For example, if all the
|
||||||
nodes have a capacity of `cpu: 1`, then a Pod with a request of `cpu: 1.1` will
|
nodes have a capacity of `cpu: 1`, then a Pod with a request of `cpu: 1.1` will
|
||||||
never be scheduled.
|
never be scheduled.
|
||||||
|
- Check for node taints. If most of your nodes are tainted, and the new Pod does
|
||||||
|
not tolerate that taint, the scheduler only considers placements onto the
|
||||||
|
remaining nodes that don't have that taint.
|
||||||
|
|
||||||
You can check node capacities and amounts allocated with the
|
You can check node capacities and amounts allocated with the
|
||||||
`kubectl describe nodes` command. For example:
|
`kubectl describe nodes` command. For example:
|
||||||
-->
|
-->
|
||||||
|
|
||||||
在上述示例中,由于节点上的 CPU 资源不足,名为 “frontend” 的 Pod 无法被调度。
|
在上述示例中,由于节点上的 CPU 资源不足,名为 “frontend” 的 Pod 无法被调度。
|
||||||
由于内存不足(PodExceedsFreeMemory)而导致失败时,也有类似的错误消息。
|
由于内存不足(PodExceedsFreeMemory)而导致失败时,也有类似的错误消息。
|
||||||
一般来说,如果 Pod 处于悬决状态且有这种类型的消息时,你可以尝试如下几件事情:
|
一般来说,如果 Pod 处于悬决状态且有这种类型的消息时,你可以尝试如下几件事情:
|
||||||
|
@ -1147,12 +1200,15 @@ You can check node capacities and amounts allocated with the
|
||||||
- 终止不需要的 Pod,为悬决的 Pod 腾出空间。
|
- 终止不需要的 Pod,为悬决的 Pod 腾出空间。
|
||||||
- 检查 Pod 所需的资源是否超出所有节点的资源容量。例如,如果所有节点的容量都是`cpu:1`,
|
- 检查 Pod 所需的资源是否超出所有节点的资源容量。例如,如果所有节点的容量都是`cpu:1`,
|
||||||
那么一个请求为 `cpu: 1.1` 的 Pod 永远不会被调度。
|
那么一个请求为 `cpu: 1.1` 的 Pod 永远不会被调度。
|
||||||
|
- 检查节点上的污点设置。如果集群中节点上存在污点,而新的 Pod 不能容忍污点,
|
||||||
|
调度器只会考虑将 Pod 调度到不带有该污点的节点上。
|
||||||
|
|
||||||
你可以使用 `kubectl describe nodes` 命令检查节点容量和已分配的资源数量。 例如:
|
你可以使用 `kubectl describe nodes` 命令检查节点容量和已分配的资源数量。 例如:
|
||||||
|
|
||||||
```shell
|
```shell
|
||||||
kubectl describe nodes e2e-test-node-pool-4lw4
|
kubectl describe nodes e2e-test-node-pool-4lw4
|
||||||
```
|
```
|
||||||
|
|
||||||
```
|
```
|
||||||
Name: e2e-test-node-pool-4lw4
|
Name: e2e-test-node-pool-4lw4
|
||||||
[ ... 这里忽略了若干行以便阅读 ...]
|
[ ... 这里忽略了若干行以便阅读 ...]
|
||||||
|
@ -1184,34 +1240,60 @@ Allocated resources:
|
||||||
In the preceding output, you can see that if a Pod requests more than 1120m
|
In the preceding output, you can see that if a Pod requests more than 1120m
|
||||||
CPUs or 6.23Gi of memory, it will not fit on the node.
|
CPUs or 6.23Gi of memory, it will not fit on the node.
|
||||||
|
|
||||||
By looking at the `Pods` section, you can see which Pods are taking up space on
|
By looking at the "Pods" section, you can see which Pods are taking up space on
|
||||||
the node.
|
the node.
|
||||||
|
|
||||||
The amount of resources available to Pods is less than the node capacity, because
|
|
||||||
system daemons use a portion of the available resources. The `allocatable` field
|
|
||||||
[NodeStatus](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#nodestatus-v1-core)
|
|
||||||
gives the amount of resources that are available to Pods. For more information, see
|
|
||||||
[Node Allocatable Resources](https://git.k8s.io/community/contributors/design-proposals/node/node-allocatable.md).
|
|
||||||
The [resource quota](/docs/concepts/policy/resource-quotas/) feature can be configured
|
|
||||||
to limit the total amount of resources that can be consumed. If used in conjunction
|
|
||||||
with namespaces, it can prevent one team from hogging all the resources.
|
|
||||||
-->
|
-->
|
||||||
在上面的输出中,你可以看到如果 Pod 请求超过 1120m CPU 或者 6.23Gi 内存,节点将无法满足。
|
在上面的输出中,你可以看到如果 Pod 请求超过 1120m CPU 或者 6.23Gi 内存,节点将无法满足。
|
||||||
|
|
||||||
通过查看 `Pods` 部分,你将看到哪些 Pod 占用了节点上的资源。
|
通过查看 "Pods" 部分,你将看到哪些 Pod 占用了节点上的资源。
|
||||||
|
|
||||||
可供 Pod 使用的资源量小于节点容量,因为系统守护程序也会使用一部分可用资源。
|
<!--
|
||||||
[NodeStatus](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#nodestatus-v1-core)
|
The amount of resources available to Pods is less than the node capacity, because
|
||||||
的 `allocatable` 字段给出了可用于 Pod 的资源量。
|
system daemons use a portion of the available resources. Within the Kubernetes API,
|
||||||
有关更多信息,请参阅 [节点可分配资源](https://git.k8s.io/community/contributors/design-proposals/node-allocatable.md)。
|
each Node has a `.status.allocatable` field
|
||||||
|
(see [NodeStatus](/docs/reference/kubernetes-api/cluster-resources/node-v1/#NodeStatus)
|
||||||
|
for details).
|
||||||
|
-->
|
||||||
|
Pods 可用的资源量低于节点的资源总量,因为系统守护进程也会使用一部分可用资源。
|
||||||
|
在 Kubernetes API 中,每个 Node 都有一个 `.status.allocatable` 字段
|
||||||
|
(详情参见 [NodeStatus](/docs/reference/kubernetes-api/cluster-resources/node-v1/#NodeStatus))。
|
||||||
|
|
||||||
可以配置 [资源配额](/zh/docs/concepts/policy/resource-quotas/) 功能特性
|
<!--
|
||||||
以限制可以使用的资源总量。
|
The `.status.allocatable` field describes the amount of resources that are available
|
||||||
如果与名字空间配合一起使用,就可以防止一个团队占用所有资源。
|
to Pods on that node (for example: 15 virtual CPUs and 7538 MiB of memory).
|
||||||
|
For more information on node allocatable resources in Kubernetes, see
|
||||||
|
[Reserve Compute Resources for System Daemons](/docs/tasks/administer-cluster/reserve-compute-resources/).
|
||||||
|
-->
|
||||||
|
字段 `.status.allocatable` 描述节点上可以用于 Pod 的资源总量(例如:15 个虚拟
|
||||||
|
CPU、7538 MiB 内存)。关于 Kubernetes 中节点可分配资源的信息,可参阅
|
||||||
|
[为系统守护进程预留计算资源](/zh/docs/tasks/administer-cluster/reserve-compute-resources/)。
|
||||||
|
|
||||||
|
<!--
|
||||||
|
You can configure [resource quotas](/docs/concepts/policy/resource-quotas/)
|
||||||
|
to limit the total amount of resources that a namespace can consume.
|
||||||
|
Kubernetes enforces quotas for objects in particular namespace when there is a
|
||||||
|
ResourceQuota in that namespace.
|
||||||
|
For example, if you assign specific namespaces to different teams, you
|
||||||
|
can add ResourceQuotas into those namespaces. Setting resource quotas helps to
|
||||||
|
prevent one team from using so much of any resource that this over-use affects other teams.
|
||||||
|
|
||||||
|
You should also consider what access you grant to that namespace:
|
||||||
|
**full** write access to a namespace allows someone with that access to remove any
|
||||||
|
resource, include a configured ResourceQuota.
|
||||||
|
-->
|
||||||
|
你可以配置[资源配额](/zh/docs/concepts/policy/resource-quotas/)功能特性以限制每个名字空间可以使用的资源总量。
|
||||||
|
当某名字空间中存在 ResourceQuota 时,Kubernetes 会在该名字空间中的对象强制实施配额。
|
||||||
|
例如,如果你为不同的团队分配名字空间,你可以为这些名字空间添加 ResourceQuota。
|
||||||
|
设置资源配额有助于防止一个团队占用太多资源,以至于这种占用会影响其他团队。
|
||||||
|
|
||||||
|
你还需要考虑为这些名字空间设置授权访问:
|
||||||
|
为名字空间提供 **全部** 的写权限时,具有合适权限的人可能删除所有资源,
|
||||||
|
包括所配置的 ResourceQuota。
|
||||||
|
|
||||||
<!--
|
<!--
|
||||||
### My Container is terminated
|
### My Container is terminated
|
||||||
Your Container might get terminated because it is resource-starved. To check
|
|
||||||
|
Your container might get terminated because it is resource-starved. To check
|
||||||
whether a Container is being killed because it is hitting a resource limit, call
|
whether a Container is being killed because it is hitting a resource limit, call
|
||||||
`kubectl describe pod` on the Pod of interest:
|
`kubectl describe pod` on the Pod of interest:
|
||||||
-->
|
-->
|
||||||
|
@ -1225,6 +1307,11 @@ whether a Container is being killed because it is hitting a resource limit, call
|
||||||
kubectl describe pod simmemleak-hra99
|
kubectl describe pod simmemleak-hra99
|
||||||
```
|
```
|
||||||
|
|
||||||
|
<!--
|
||||||
|
The output is similar to:
|
||||||
|
-->
|
||||||
|
输出类似于:
|
||||||
|
|
||||||
```
|
```
|
||||||
Name: simmemleak-hra99
|
Name: simmemleak-hra99
|
||||||
Namespace: default
|
Namespace: default
|
||||||
|
@ -1235,7 +1322,6 @@ Status: Running
|
||||||
Reason:
|
Reason:
|
||||||
Message:
|
Message:
|
||||||
IP: 10.244.2.75
|
IP: 10.244.2.75
|
||||||
Replication Controllers: simmemleak (1/1 replicas created)
|
|
||||||
Containers:
|
Containers:
|
||||||
simmemleak:
|
simmemleak:
|
||||||
Image: saadali/simmemleak
|
Image: saadali/simmemleak
|
||||||
|
@ -1254,57 +1340,47 @@ Conditions:
|
||||||
Type Status
|
Type Status
|
||||||
Ready False
|
Ready False
|
||||||
Events:
|
Events:
|
||||||
FirstSeen LastSeen Count From SubobjectPath Reason Message
|
Type Reason Age From Message
|
||||||
Tue, 07 Jul 2015 12:53:51 -0700 Tue, 07 Jul 2015 12:53:51 -0700 1 {scheduler } scheduled Successfully assigned simmemleak-hra99 to kubernetes-node-tf0f
|
---- ------ ---- ---- -------
|
||||||
Tue, 07 Jul 2015 12:53:51 -0700 Tue, 07 Jul 2015 12:53:51 -0700 1 {kubelet kubernetes-node-tf0f} implicitly required container POD pulled Pod container image "k8s.gcr.io/pause:0.8.0" already present on machine
|
Normal Scheduled 42s default-scheduler Successfully assigned simmemleak-hra99 to kubernetes-node-tf0f
|
||||||
Tue, 07 Jul 2015 12:53:51 -0700 Tue, 07 Jul 2015 12:53:51 -0700 1 {kubelet kubernetes-node-tf0f} implicitly required container POD created Created with docker id 6a41280f516d
|
Normal Pulled 41s kubelet Container image "saadali/simmemleak:latest" already present on machine
|
||||||
Tue, 07 Jul 2015 12:53:51 -0700 Tue, 07 Jul 2015 12:53:51 -0700 1 {kubelet kubernetes-node-tf0f} implicitly required container POD started Started with docker id 6a41280f516d
|
Normal Created 41s kubelet Created container simmemleak
|
||||||
Tue, 07 Jul 2015 12:53:51 -0700 Tue, 07 Jul 2015 12:53:51 -0700 1 {kubelet kubernetes-node-tf0f} spec.containers{simmemleak} created Created with docker id 87348f12526a
|
Normal Started 40s kubelet Started container simmemleak
|
||||||
|
Normal Killing 32s kubelet Killing container with id ead3fb35-5cf5-44ed-9ae1-488115be66c6: Need to kill Pod
|
||||||
```
|
```
|
||||||
|
|
||||||
<!--
|
<!--
|
||||||
In the preceding example, the `Restart Count: 5` indicates that the `simmemleak`
|
In the preceding example, the `Restart Count: 5` indicates that the `simmemleak`
|
||||||
Container in the Pod was terminated and restarted five times.
|
Container in the Pod was terminated and restarted five times (so far).
|
||||||
|
The `OOMKilled` reason shows that the container tried to use more memory than its limit.
|
||||||
You can call `kubectl get pod` with the `-o go-template=...` option to fetch the status
|
|
||||||
of previously terminated Containers:
|
|
||||||
-->
|
-->
|
||||||
在上面的例子中,`Restart Count: 5` 意味着 Pod 中的 `simmemleak` 容器被终止并重启了五次。
|
在上面的例子中,`Restart Count: 5` 意味着 Pod 中的 `simmemleak`
|
||||||
|
容器被终止并且(到目前为止)重启了五次。
|
||||||
你可以使用 `kubectl get pod` 命令加上 `-o go-template=...` 选项来获取之前终止容器的状态。
|
原因 `OOMKilled` 显示容器尝试使用超出其限制的内存量。
|
||||||
|
|
||||||
```shell
|
|
||||||
kubectl get pod -o go-template='{{range.status.containerStatuses}}{{"Container Name: "}}{{.name}}{{"\r\nLastState: "}}{{.lastState}}{{end}}' simmemleak-hra99
|
|
||||||
```
|
|
||||||
```
|
|
||||||
Container Name: simmemleak
|
|
||||||
LastState: map[terminated:map[exitCode:137 reason:OOM Killed startedAt:2015-07-07T20:58:43Z finishedAt:2015-07-07T20:58:43Z containerID:docker://0e4095bba1feccdfe7ef9fb6ebffe972b4b14285d5acdec6f0d3ae8a22fad8b2]]
|
|
||||||
```
|
|
||||||
|
|
||||||
<!--
|
<!--
|
||||||
You can see that the Container was terminated because of `reason:OOM Killed`, where `OOM` stands for Out Of Memory.
|
Your next step might be to check the application code for a memory leak. If you
|
||||||
|
find that the application is behaving how you expect, consider setting a higher
|
||||||
|
memory limit (and possibly request) for that container.
|
||||||
-->
|
-->
|
||||||
|
你接下来要做的或许是检查应用代码,看看是否存在内存泄露。
|
||||||
你可以看到容器因为 `reason:OOM killed` 而被终止,`OOM` 表示内存不足(Out Of Memory)。
|
如果你发现应用的行为与你所预期的相同,则可以考虑为该容器设置一个更高的内存约束
|
||||||
|
(也可能需要设置请求值)。
|
||||||
|
|
||||||
## {{% heading "whatsnext" %}}
|
## {{% heading "whatsnext" %}}
|
||||||
|
|
||||||
<!--
|
<!--
|
||||||
* Get hands-on experience [assigning Memory resources to Containers and Pods](/docs/tasks/configure-pod-container/assign-memory-resource/).
|
* Get hands-on experience [assigning Memory resources to containers and Pods](/docs/tasks/configure-pod-container/assign-memory-resource/).
|
||||||
* Get hands-on experience [assigning CPU resources to Containers and Pods](/docs/tasks/configure-pod-container/assign-cpu-resource/).
|
* Get hands-on experience [assigning CPU resources to containers and Pods](/docs/tasks/configure-pod-container/assign-cpu-resource/).
|
||||||
* For more details about the difference between requests and limits, see
|
* Read how the API reference defines a [container](/docs/reference/kubernetes-api/workload-resources/pod-v1/#Container)
|
||||||
[Resource QoS](https://git.k8s.io/community/contributors/design-proposals/node/resource-qos.md).
|
and its [resource requirements](https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#resources)
|
||||||
* Read the [Container](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#container-v1-core) API reference
|
* Read about [project quotas](https://xfs.org/docs/xfsdocs-xml-dev/XFS_User_Guide/tmp/en-US/html/xfs-quotas.html) in XFS
|
||||||
* Read the [ResourceRequirements](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#resourcerequirements-v1-core) API reference
|
* Read more about the [kube-scheduler configuration reference (v1beta3)](/docs/reference/config-api/kube-scheduler-config.v1beta3/)
|
||||||
* Read about [project quotas](http://xfs.org/docs/xfsdocs-xml-dev/XFS_User_Guide/tmp/en-US/html/xfs-quotas.html) in XFS
|
|
||||||
* Read more about the [kube-scheduler Policy reference (v1)](/docs/reference/config-api/kube-scheduler-policy-config.v1/)
|
|
||||||
-->
|
-->
|
||||||
|
|
||||||
* 获取[分配内存资源给容器和 Pod ](/zh/docs/tasks/configure-pod-container/assign-memory-resource/) 的实践经验
|
* 获取[分配内存资源给容器和 Pod ](/zh/docs/tasks/configure-pod-container/assign-memory-resource/) 的实践经验
|
||||||
* 获取[分配 CPU 资源给容器和 Pod ](/zh/docs/tasks/configure-pod-container/assign-cpu-resource/) 的实践经验
|
* 获取[分配 CPU 资源给容器和 Pod ](/zh/docs/tasks/configure-pod-container/assign-cpu-resource/) 的实践经验
|
||||||
* 关于请求和约束之间的区别,细节信息可参见[资源服务质量](https://git.k8s.io/community/contributors/design-proposals/node/resource-qos.md)
|
* 阅读 API 参考中 [Container](/docs/reference/kubernetes-api/workload-resources/pod-v1/#Container)
|
||||||
* 阅读 API 参考文档中 [Container](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#container-v1-core) 部分。
|
和其[资源请求](/docs/reference/kubernetes-api/workload-resources/pod-v1/#resources)定义。
|
||||||
* 阅读 API 参考文档中 [ResourceRequirements](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#resourcerequirements-v1-core) 部分。
|
* 阅读 XFS 中[配额](https://xfs.org/docs/xfsdocs-xml-dev/XFS_User_Guide/tmp/en-US/html/xfs-quotas.html)的文档
|
||||||
* 阅读 XFS 中关于[项目配额](https://xfs.org/docs/xfsdocs-xml-dev/XFS_User_Guide/tmp/en-US/html/xfs-quotas.html) 的文档。
|
* 进一步阅读 [kube-scheduler 配置参考 (v1beta3)](/zh/docs/reference/config-api/kube-scheduler-config.v1beta3/)
|
||||||
* 阅读更多关于[kube-scheduler 策略参考 (v1)](/zh/docs/reference/config-api/kube-scheduler-policy-config.v1/) 的文档。
|
|
||||||
|
|
||||||
|
|
|
@ -212,6 +212,7 @@ Operator.
|
||||||
{{% thirdparty-content %}}
|
{{% thirdparty-content %}}
|
||||||
|
|
||||||
* [Charmed Operator Framework](https://juju.is/)
|
* [Charmed Operator Framework](https://juju.is/)
|
||||||
|
* [Kopf](https://github.com/nolar/kopf) (Kubernetes Operator Pythonic Framework)
|
||||||
* [kubebuilder](https://book.kubebuilder.io/)
|
* [kubebuilder](https://book.kubebuilder.io/)
|
||||||
* [KubeOps](https://buehler.github.io/dotnet-operator-sdk/) (dotnet operator SDK)
|
* [KubeOps](https://buehler.github.io/dotnet-operator-sdk/) (dotnet operator SDK)
|
||||||
* [KUDO](https://kudo.dev/) (Kubernetes Universal Declarative Operator)
|
* [KUDO](https://kudo.dev/) (Kubernetes Universal Declarative Operator)
|
||||||
|
@ -225,6 +226,7 @@ you implement yourself
|
||||||
{{% thirdparty-content %}}
|
{{% thirdparty-content %}}
|
||||||
|
|
||||||
* [Charmed Operator Framework](https://juju.is/)
|
* [Charmed Operator Framework](https://juju.is/)
|
||||||
|
* [Kopf](https://github.com/nolar/kopf) (Kubernetes Operator Pythonic Framework)
|
||||||
* [kubebuilder](https://book.kubebuilder.io/)
|
* [kubebuilder](https://book.kubebuilder.io/)
|
||||||
* [KubeOps](https://buehler.github.io/dotnet-operator-sdk/) (dotnet operator SDK)
|
* [KubeOps](https://buehler.github.io/dotnet-operator-sdk/) (dotnet operator SDK)
|
||||||
* [KUDO](https://kudo.dev/) (Kubernetes 通用声明式 Operator)
|
* [KUDO](https://kudo.dev/) (Kubernetes 通用声明式 Operator)
|
||||||
|
|
|
@ -434,7 +434,6 @@ The following example describes how to map secret values into application enviro
|
||||||
* If you are familiar with {{< glossary_tooltip text="Helm Charts" term_id="helm-chart" >}}, [install Service Catalog using Helm](/docs/tasks/service-catalog/install-service-catalog-using-helm/) into your Kubernetes cluster. Alternatively, you can [install Service Catalog using the SC tool](/docs/tasks/service-catalog/install-service-catalog-using-sc/).
|
* If you are familiar with {{< glossary_tooltip text="Helm Charts" term_id="helm-chart" >}}, [install Service Catalog using Helm](/docs/tasks/service-catalog/install-service-catalog-using-helm/) into your Kubernetes cluster. Alternatively, you can [install Service Catalog using the SC tool](/docs/tasks/service-catalog/install-service-catalog-using-sc/).
|
||||||
* View [sample service brokers](https://github.com/openservicebrokerapi/servicebroker/blob/master/gettingStarted.md#sample-service-brokers).
|
* View [sample service brokers](https://github.com/openservicebrokerapi/servicebroker/blob/master/gettingStarted.md#sample-service-brokers).
|
||||||
* Explore the [kubernetes-sigs/service-catalog](https://github.com/kubernetes-sigs/service-catalog) project.
|
* Explore the [kubernetes-sigs/service-catalog](https://github.com/kubernetes-sigs/service-catalog) project.
|
||||||
* View [svc-cat.io](https://svc-cat.io/docs/).
|
|
||||||
-->
|
-->
|
||||||
* 如果你熟悉 {{< glossary_tooltip text="Helm Charts" term_id="helm-chart" >}},
|
* 如果你熟悉 {{< glossary_tooltip text="Helm Charts" term_id="helm-chart" >}},
|
||||||
可以[使用 Helm 安装服务目录](/zh/docs/tasks/service-catalog/install-service-catalog-using-helm/)
|
可以[使用 Helm 安装服务目录](/zh/docs/tasks/service-catalog/install-service-catalog-using-helm/)
|
||||||
|
@ -442,6 +441,5 @@ The following example describes how to map secret values into application enviro
|
||||||
[使用 SC 工具安装服务目录](/zh/docs/tasks/service-catalog/install-service-catalog-using-sc/)。
|
[使用 SC 工具安装服务目录](/zh/docs/tasks/service-catalog/install-service-catalog-using-sc/)。
|
||||||
* 查看[服务代理示例](https://github.com/openservicebrokerapi/servicebroker/blob/master/gettingStarted.md#sample-service-brokers)
|
* 查看[服务代理示例](https://github.com/openservicebrokerapi/servicebroker/blob/master/gettingStarted.md#sample-service-brokers)
|
||||||
* 浏览 [kubernetes-sigs/service-catalog](https://github.com/kubernetes-sigs/service-catalog) 项目
|
* 浏览 [kubernetes-sigs/service-catalog](https://github.com/kubernetes-sigs/service-catalog) 项目
|
||||||
* 查看 [svc-cat.io](https://svc-cat.io/docs/)
|
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -57,13 +57,13 @@ Control plane components can be run on any machine in the cluster. However,
|
||||||
for simplicity, set up scripts typically start all control plane components on
|
for simplicity, set up scripts typically start all control plane components on
|
||||||
the same machine, and do not run user containers on this machine. See
|
the same machine, and do not run user containers on this machine. See
|
||||||
[Creating Highly Available clusters with kubeadm](/docs/setup/production-environment/tools/kubeadm/high-availability/)
|
[Creating Highly Available clusters with kubeadm](/docs/setup/production-environment/tools/kubeadm/high-availability/)
|
||||||
for an example control plane setup that runs across multiple VMs.
|
for an example control plane setup that runs across multiple machines.
|
||||||
-->
|
-->
|
||||||
控制平面组件可以在集群中的任何节点上运行。
|
控制平面组件可以在集群中的任何节点上运行。
|
||||||
然而,为了简单起见,设置脚本通常会在同一个计算机上启动所有控制平面组件,
|
然而,为了简单起见,设置脚本通常会在同一个计算机上启动所有控制平面组件,
|
||||||
并且不会在此计算机上运行用户容器。
|
并且不会在此计算机上运行用户容器。
|
||||||
请参阅[使用 kubeadm 构建高可用性集群](/zh/docs/setup/production-environment/tools/kubeadm/high-availability/)
|
请参阅[使用 kubeadm 构建高可用性集群](/zh/docs/setup/production-environment/tools/kubeadm/high-availability/)
|
||||||
中关于多 VM 控制平面设置的示例。
|
中关于跨多机器控制平面设置的示例。
|
||||||
|
|
||||||
### kube-apiserver
|
### kube-apiserver
|
||||||
|
|
||||||
|
|
|
@ -3,6 +3,7 @@ title: Pod 安全策略
|
||||||
content_type: concept
|
content_type: concept
|
||||||
weight: 30
|
weight: 30
|
||||||
---
|
---
|
||||||
|
|
||||||
<!--
|
<!--
|
||||||
reviewers:
|
reviewers:
|
||||||
- pweil-
|
- pweil-
|
||||||
|
@ -1236,13 +1237,13 @@ denoted as the string `Unmasked`.
|
||||||
### AppArmor
|
### AppArmor
|
||||||
|
|
||||||
Controlled via annotations on the PodSecurityPolicy. Refer to the [AppArmor
|
Controlled via annotations on the PodSecurityPolicy. Refer to the [AppArmor
|
||||||
documentation](/docs/tutorials/clusters/apparmor/#podsecuritypolicy-annotations).
|
documentation](/docs/tutorials/policy/apparmor/#podsecuritypolicy-annotations).
|
||||||
-->
|
-->
|
||||||
### AppArmor
|
### AppArmor
|
||||||
|
|
||||||
通过 PodSecurityPolicy 上的注解来控制。
|
通过 PodSecurityPolicy 上的注解来控制。
|
||||||
详情请参阅
|
详情请参阅
|
||||||
[AppArmor 文档](/zh/docs/tutorials/clusters/apparmor/#podsecuritypolicy-annotations)。
|
[AppArmor 文档](/zh/docs/tutorials/policy/apparmor/#podsecuritypolicy-annotations)。
|
||||||
|
|
||||||
|
|
||||||
<!--
|
<!--
|
||||||
|
|
|
@ -185,15 +185,13 @@ To make use of that label prefix for node isolation:
|
||||||
前缀设置或修改标签。要使用该标签前缀进行节点隔离:
|
前缀设置或修改标签。要使用该标签前缀进行节点隔离:
|
||||||
|
|
||||||
<!--
|
<!--
|
||||||
1. Check that you're using Kubernetes v1.11+ so that NodeRestriction is available.
|
1. Ensure you are using the [Node authorizer](/docs/reference/access-authn-authz/node/) and have _enabled_ the [NodeRestriction admission plugin](/docs/reference/access-authn-authz/admission-controllers/#noderestriction).
|
||||||
2. Ensure you are using the [Node authorizer](/docs/reference/access-authn-authz/node/) and have _enabled_ the [NodeRestriction admission plugin](/docs/reference/access-authn-authz/admission-controllers/#noderestriction).
|
2. Add labels under the `node-restriction.kubernetes.io/` prefix to your Node objects, and use those labels in your node selectors.
|
||||||
3. Add labels under the `node-restriction.kubernetes.io/` prefix to your Node objects, and use those labels in your node selectors.
|
|
||||||
For example, `example.com.node-restriction.kubernetes.io/fips=true` or `example.com.node-restriction.kubernetes.io/pci-dss=true`.
|
For example, `example.com.node-restriction.kubernetes.io/fips=true` or `example.com.node-restriction.kubernetes.io/pci-dss=true`.
|
||||||
-->
|
-->
|
||||||
1. 检查是否在使用 Kubernetes v1.11+,以便 NodeRestriction 功能可用。
|
1. 确保你在使用[节点授权](/zh/docs/reference/access-authn-authz/node/)并且已经 _启用_
|
||||||
2. 确保你在使用[节点授权](/zh/docs/reference/access-authn-authz/node/)并且已经_启用_
|
|
||||||
[NodeRestriction 准入插件](/zh/docs/reference/access-authn-authz/admission-controllers/#noderestriction)。
|
[NodeRestriction 准入插件](/zh/docs/reference/access-authn-authz/admission-controllers/#noderestriction)。
|
||||||
3. 将 `node-restriction.kubernetes.io/` 前缀下的标签添加到 Node 对象,
|
2. 将 `node-restriction.kubernetes.io/` 前缀下的标签添加到 Node 对象,
|
||||||
然后在节点选择器中使用这些标签。
|
然后在节点选择器中使用这些标签。
|
||||||
例如,`example.com.node-restriction.kubernetes.io/fips=true` 或
|
例如,`example.com.node-restriction.kubernetes.io/fips=true` 或
|
||||||
`example.com.node-restriction.kubernetes.io/pci-dss=true`。
|
`example.com.node-restriction.kubernetes.io/pci-dss=true`。
|
||||||
|
@ -216,7 +214,7 @@ feature, greatly expands the types of constraints you can express. The key enhan
|
||||||
3. you can constrain against labels on other pods running on the node (or other topological domain),
|
3. you can constrain against labels on other pods running on the node (or other topological domain),
|
||||||
rather than against labels on the node itself, which allows rules about which pods can and cannot be co-located
|
rather than against labels on the node itself, which allows rules about which pods can and cannot be co-located
|
||||||
-->
|
-->
|
||||||
1. 语言更具表现力(不仅仅是“对完全匹配规则的 AND”)
|
1. 语言表达能力更强(不仅仅是“对完全匹配规则的 AND”)
|
||||||
2. 你可以发现规则是“软需求”/“偏好”,而不是硬性要求,因此,
|
2. 你可以发现规则是“软需求”/“偏好”,而不是硬性要求,因此,
|
||||||
如果调度器无法满足该要求,仍然调度该 Pod
|
如果调度器无法满足该要求,仍然调度该 Pod
|
||||||
3. 你可以使用节点上(或其他拓扑域中)的 Pod 的标签来约束,而不是使用
|
3. 你可以使用节点上(或其他拓扑域中)的 Pod 的标签来约束,而不是使用
|
||||||
|
@ -369,7 +367,7 @@ in the [scheduler configuration](/docs/reference/scheduling/config/). For exampl
|
||||||
例如:
|
例如:
|
||||||
|
|
||||||
```yaml
|
```yaml
|
||||||
apiVersion: kubescheduler.config.k8s.io/v1beta1
|
apiVersion: kubescheduler.config.k8s.io/v1beta3
|
||||||
kind: KubeSchedulerConfiguration
|
kind: KubeSchedulerConfiguration
|
||||||
|
|
||||||
profiles:
|
profiles:
|
||||||
|
|
|
@ -173,7 +173,7 @@ of the scheduler:
|
||||||
* Read about [scheduler performance tuning](/docs/concepts/scheduling-eviction/scheduler-perf-tuning/)
|
* Read about [scheduler performance tuning](/docs/concepts/scheduling-eviction/scheduler-perf-tuning/)
|
||||||
* Read about [Pod topology spread constraints](/docs/concepts/workloads/pods/pod-topology-spread-constraints/)
|
* Read about [Pod topology spread constraints](/docs/concepts/workloads/pods/pod-topology-spread-constraints/)
|
||||||
* Read the [reference documentation](/docs/reference/command-line-tools-reference/kube-scheduler/) for kube-scheduler
|
* Read the [reference documentation](/docs/reference/command-line-tools-reference/kube-scheduler/) for kube-scheduler
|
||||||
* Read the [kube-scheduler config (v1beta1)](/docs/reference/config-api/kube-scheduler-config.v1beta2/) reference
|
* Read the [kube-scheduler config (v1beta3)](/docs/reference/config-api/kube-scheduler-config.v1beta3/) reference
|
||||||
* Learn about [configuring multiple schedulers](/docs/tasks/extend-kubernetes/configure-multiple-schedulers/)
|
* Learn about [configuring multiple schedulers](/docs/tasks/extend-kubernetes/configure-multiple-schedulers/)
|
||||||
* Learn about [topology management policies](/docs/tasks/administer-cluster/topology-manager/)
|
* Learn about [topology management policies](/docs/tasks/administer-cluster/topology-manager/)
|
||||||
* Learn about [Pod Overhead](/docs/concepts/scheduling-eviction/pod-overhead/)
|
* Learn about [Pod Overhead](/docs/concepts/scheduling-eviction/pod-overhead/)
|
||||||
|
@ -181,7 +181,7 @@ of the scheduler:
|
||||||
* 阅读关于 [调度器性能调优](/zh/docs/concepts/scheduling-eviction/scheduler-perf-tuning/)
|
* 阅读关于 [调度器性能调优](/zh/docs/concepts/scheduling-eviction/scheduler-perf-tuning/)
|
||||||
* 阅读关于 [Pod 拓扑分布约束](/zh/docs/concepts/workloads/pods/pod-topology-spread-constraints/)
|
* 阅读关于 [Pod 拓扑分布约束](/zh/docs/concepts/workloads/pods/pod-topology-spread-constraints/)
|
||||||
* 阅读关于 kube-scheduler 的 [参考文档](/zh/docs/reference/command-line-tools-reference/kube-scheduler/)
|
* 阅读关于 kube-scheduler 的 [参考文档](/zh/docs/reference/command-line-tools-reference/kube-scheduler/)
|
||||||
* 阅读 [kube-scheduler 配置参考 (v1beta1)](/zh/docs/reference/config-api/kube-scheduler-config.v1beta2/)
|
* 阅读 [kube-scheduler 配置参考 (v1beta3)](/zh/docs/reference/config-api/kube-scheduler-config.v1beta3/)
|
||||||
* 了解关于 [配置多个调度器](/zh/docs/tasks/extend-kubernetes/configure-multiple-schedulers/) 的方式
|
* 了解关于 [配置多个调度器](/zh/docs/tasks/extend-kubernetes/configure-multiple-schedulers/) 的方式
|
||||||
* 了解关于 [拓扑结构管理策略](/zh/docs/tasks/administer-cluster/topology-manager/)
|
* 了解关于 [拓扑结构管理策略](/zh/docs/tasks/administer-cluster/topology-manager/)
|
||||||
* 了解关于 [Pod 额外开销](/zh/docs/concepts/scheduling-eviction/pod-overhead/)
|
* 了解关于 [Pod 额外开销](/zh/docs/concepts/scheduling-eviction/pod-overhead/)
|
||||||
|
|
|
@ -33,7 +33,7 @@ Kubernetes [Pod 安全性标准(Security Standards)](/zh/docs/concepts/secur
|
||||||
为 Pod 定义不同的隔离级别。这些标准能够让你以一种清晰、一致的方式定义如何限制 Pod 行为。
|
为 Pod 定义不同的隔离级别。这些标准能够让你以一种清晰、一致的方式定义如何限制 Pod 行为。
|
||||||
|
|
||||||
<!--
|
<!--
|
||||||
As an Beta feature, Kubernetes offers a built-in _Pod Security_ {{< glossary_tooltip
|
As a Beta feature, Kubernetes offers a built-in _Pod Security_ {{< glossary_tooltip
|
||||||
text="admission controller" term_id="admission-controller" >}}, the successor
|
text="admission controller" term_id="admission-controller" >}}, the successor
|
||||||
to [PodSecurityPolicies](/docs/concepts/policy/pod-security-policy/). Pod security restrictions
|
to [PodSecurityPolicies](/docs/concepts/policy/pod-security-policy/). Pod security restrictions
|
||||||
are applied at the {{< glossary_tooltip text="namespace" term_id="namespace" >}} level when pods
|
are applied at the {{< glossary_tooltip text="namespace" term_id="namespace" >}} level when pods
|
||||||
|
@ -238,7 +238,7 @@ Pod 通常是通过创建 {{< glossary_tooltip term_id="deployment" >}} 或
|
||||||
<!--
|
<!--
|
||||||
## Exemptions
|
## Exemptions
|
||||||
|
|
||||||
You can define _exemptions_ from pod security enforcement in order allow the creation of pods that
|
You can define _exemptions_ from pod security enforcement in order to allow the creation of pods that
|
||||||
would have otherwise been prohibited due to the policy associated with a given namespace.
|
would have otherwise been prohibited due to the policy associated with a given namespace.
|
||||||
Exemptions can be statically configured in the
|
Exemptions can be statically configured in the
|
||||||
[Admission Controller configuration](/docs/tasks/configure-pod-container/enforce-standards-admission-controller/#configure-the-admission-controller).
|
[Admission Controller configuration](/docs/tasks/configure-pod-container/enforce-standards-admission-controller/#configure-the-admission-controller).
|
||||||
|
|
|
@ -257,9 +257,9 @@ the dedicated fields `nodeName` and `zone`.
|
||||||
|
|
||||||
<!--
|
<!--
|
||||||
Setting arbitrary topology fields on the `endpoint` field of an `EndpointSlice`
|
Setting arbitrary topology fields on the `endpoint` field of an `EndpointSlice`
|
||||||
resource has been deprecated and is not be supported in the v1 API. Instead,
|
resource has been deprecated and is not supported in the v1 API.
|
||||||
the v1 API supports setting individual `nodeName` and `zone` fields. These
|
Instead, the v1 API supports setting individual `nodeName` and `zone` fields.
|
||||||
fields are automatically translated between API versions. For example, the
|
These fields are automatically translated between API versions. For example, the
|
||||||
value of the `"topology.kubernetes.io/zone"` key in the `topology` field in
|
value of the `"topology.kubernetes.io/zone"` key in the `topology` field in
|
||||||
the v1beta1 API is accessible as the `zone` field in the v1 API.
|
the v1beta1 API is accessible as the `zone` field in the v1 API.
|
||||||
-->
|
-->
|
||||||
|
|
|
@ -33,7 +33,7 @@ services are often limited by memory size and can move infrequently
|
||||||
used data into storage that is slower than memory with little impact
|
used data into storage that is slower than memory with little impact
|
||||||
on overall performance.
|
on overall performance.
|
||||||
-->
|
-->
|
||||||
有些应用程序需要额外的存储,但并不关心数据在重启后仍然可用,既是否被持久地保存。
|
有些应用程序需要额外的存储,但并不关心数据在重启后仍然可用。
|
||||||
例如,缓存服务经常受限于内存大小,将不常用的数据转移到比内存慢、但对总体性能的影响很小的存储中。
|
例如,缓存服务经常受限于内存大小,将不常用的数据转移到比内存慢、但对总体性能的影响很小的存储中。
|
||||||
|
|
||||||
<!--
|
<!--
|
||||||
|
@ -219,15 +219,7 @@ As a cluster administrator, you can use a [PodSecurityPolicy](/docs/concepts/pol
|
||||||
-->
|
-->
|
||||||
### 通用临时卷 {#generic-ephemeral-volumes}
|
### 通用临时卷 {#generic-ephemeral-volumes}
|
||||||
|
|
||||||
{{< feature-state for_k8s_version="v1.21" state="beta" >}}
|
{{< feature-state for_k8s_version="v1.23" state="stable" >}}
|
||||||
|
|
||||||
<!--
|
|
||||||
This feature requires the `GenericEphemeralVolume` [feature gate](/docs/reference/command-line-tools-reference/feature-gates/) to be
|
|
||||||
enabled. Because this is a beta feature, it is enabled by default.
|
|
||||||
-->
|
|
||||||
这个特性需要启用 `GenericEphemeralVolume`
|
|
||||||
[特性门控](/zh/docs/reference/command-line-tools-reference/feature-gates/)。
|
|
||||||
因为这是一个 beta 特性,默认情况下启用。
|
|
||||||
|
|
||||||
<!--
|
<!--
|
||||||
Generic ephemeral volumes are similar to `emptyDir` volumes in the
|
Generic ephemeral volumes are similar to `emptyDir` volumes in the
|
||||||
|
@ -419,7 +411,6 @@ two choices:
|
||||||
如果这不符合他们的安全模型,他们有如下选择:
|
如果这不符合他们的安全模型,他们有如下选择:
|
||||||
|
|
||||||
<!--
|
<!--
|
||||||
- Explicitly disable the feature through the feature gate.
|
|
||||||
- Use a [Pod Security
|
- Use a [Pod Security
|
||||||
Policy](/docs/concepts/policy/pod-security-policy/) where the
|
Policy](/docs/concepts/policy/pod-security-policy/) where the
|
||||||
`volumes` list does not contain the `ephemeral` volume type
|
`volumes` list does not contain the `ephemeral` volume type
|
||||||
|
@ -473,11 +464,8 @@ See [local ephemeral storage](/docs/concepts/configuration/manage-resources-cont
|
||||||
|
|
||||||
- For more information on the design, see the
|
- For more information on the design, see the
|
||||||
[Generic ephemeral inline volumes KEP](https://github.com/kubernetes/enhancements/blob/master/keps/sig-storage/1698-generic-ephemeral-volumes/README.md).
|
[Generic ephemeral inline volumes KEP](https://github.com/kubernetes/enhancements/blob/master/keps/sig-storage/1698-generic-ephemeral-volumes/README.md).
|
||||||
- For more information on further development of this feature, see the [enhancement tracking issue #1698](https://github.com/kubernetes/enhancements/issues/1698).
|
|
||||||
-->
|
-->
|
||||||
### 通用临时卷 {#generic-ephemeral-volumes}
|
### 通用临时卷 {#generic-ephemeral-volumes}
|
||||||
|
|
||||||
- 有关设计的更多信息,参阅
|
- 有关设计的更多信息,参阅
|
||||||
[Generic ephemeral inline volumes KEP](https://github.com/kubernetes/enhancements/blob/master/keps/sig-storage/1698-generic-ephemeral-volumes/README.md)。
|
[Generic ephemeral inline volumes KEP](https://github.com/kubernetes/enhancements/blob/master/keps/sig-storage/1698-generic-ephemeral-volumes/README.md)。
|
||||||
- 关于本特性下一步开发的更多信息,参阅
|
|
||||||
[enhancement tracking issue #1698](https://github.com/kubernetes/enhancements/issues/1698)。
|
|
||||||
|
|
|
@ -666,7 +666,7 @@ size that is within the capacity limits of underlying storage provider. You can
|
||||||
|
|
||||||
<!--
|
<!--
|
||||||
Note that,
|
Note that,
|
||||||
although you can a specify a lower amount of storage than what was requested previously,
|
although you can specify a lower amount of storage than what was requested previously,
|
||||||
the new value must still be higher than `.status.capacity`.
|
the new value must still be higher than `.status.capacity`.
|
||||||
Kubernetes does not support shrinking a PVC to less than its current size.
|
Kubernetes does not support shrinking a PVC to less than its current size.
|
||||||
-->
|
-->
|
||||||
|
@ -810,7 +810,7 @@ Helper programs relating to the volume type may be required for consumption of a
|
||||||
<!--
|
<!--
|
||||||
### Capacity
|
### Capacity
|
||||||
|
|
||||||
Generally, a PV will have a specific storage capacity. This is set using the PV's `capacity` attribute. See the Kubernetes [Resource Model](https://git.k8s.io/community/contributors/design-proposals/scheduling/resources.md) to understand the units expected by `capacity`.
|
Generally, a PV will have a specific storage capacity. This is set using the PV's `capacity` attribute. Read the glossary term [Quantity](/docs/reference/glossary/?all=true#term-quantity) to understand the units expected by `capacity`.
|
||||||
|
|
||||||
Currently, storage size is the only resource that can be set or requested. Future attributes may include IOPS, throughput, etc.
|
Currently, storage size is the only resource that can be set or requested. Future attributes may include IOPS, throughput, etc.
|
||||||
-->
|
-->
|
||||||
|
@ -818,9 +818,9 @@ Currently, storage size is the only resource that can be set or requested. Futu
|
||||||
|
|
||||||
一般而言,每个 PV 卷都有确定的存储容量。
|
一般而言,每个 PV 卷都有确定的存储容量。
|
||||||
容量属性是使用 PV 对象的 `capacity` 属性来设置的。
|
容量属性是使用 PV 对象的 `capacity` 属性来设置的。
|
||||||
参考 Kubernetes
|
参考词汇表中的
|
||||||
[资源模型(Resource Model)](https://git.k8s.io/community/contributors/design-proposals/scheduling/resources.md)
|
[量纲(Quantity)](/zh/docs/reference/glossary/?all=true#term-quantity)
|
||||||
设计提案,了解 `capacity` 字段可以接受的单位。
|
词条,了解 `capacity` 字段可以接受的单位。
|
||||||
|
|
||||||
目前,存储大小是可以设置和请求的唯一资源。
|
目前,存储大小是可以设置和请求的唯一资源。
|
||||||
未来可能会包含 IOPS、吞吐量等属性。
|
未来可能会包含 IOPS、吞吐量等属性。
|
||||||
|
@ -1038,19 +1038,19 @@ The following volume types support mount options:
|
||||||
-->
|
-->
|
||||||
以下卷类型支持挂载选项:
|
以下卷类型支持挂载选项:
|
||||||
|
|
||||||
* AWSElasticBlockStore
|
* `awsElasticBlockStore`
|
||||||
* AzureDisk
|
* `azureDisk`
|
||||||
* AzureFile
|
* `azureFile`
|
||||||
* CephFS
|
* `cephfs`
|
||||||
* Cinder (OpenStack 块存储)
|
* `cinder` (**已弃用**于 v1.18)
|
||||||
* GCEPersistentDisk
|
* `gcePersistentDisk`
|
||||||
* Glusterfs
|
* `glusterfs`
|
||||||
* NFS
|
* `iscsi`
|
||||||
* Quobyte 卷
|
* `nfs`
|
||||||
* RBD (Ceph 块设备)
|
* `quobyte` (**已弃用**于 v1.22)
|
||||||
* StorageOS
|
* `rbd`
|
||||||
* VsphereVolume
|
* `storageos` (**已弃用**于 v1.22)
|
||||||
* iSCSI
|
* `vsphereVolume`
|
||||||
|
|
||||||
<!--
|
<!--
|
||||||
Mount options are not validated, If a mount option is invalid, the mount fails.
|
Mount options are not validated, If a mount option is invalid, the mount fails.
|
||||||
|
|
|
@ -26,15 +26,15 @@ between containers running together in a `Pod`.
|
||||||
The Kubernetes {{< glossary_tooltip text="volume" term_id="volume" >}} abstraction
|
The Kubernetes {{< glossary_tooltip text="volume" term_id="volume" >}} abstraction
|
||||||
solves both of these problems.
|
solves both of these problems.
|
||||||
-->
|
-->
|
||||||
Container 中的文件在磁盘上是临时存放的,这给 Container 中运行的较重要的应用
|
Container 中的文件在磁盘上是临时存放的,这给 Container 中运行的较重要的应用程序带来一些问题。
|
||||||
程序带来一些问题。问题之一是当容器崩溃时文件丢失。kubelet 会重新启动容器,
|
问题之一是当容器崩溃时文件丢失。
|
||||||
但容器会以干净的状态重启。
|
kubelet 会重新启动容器,但容器会以干净的状态重启。
|
||||||
第二个问题会在同一 `Pod` 中运行多个容器并共享文件时出现。
|
第二个问题会在同一 `Pod` 中运行多个容器并共享文件时出现。
|
||||||
Kubernetes {{< glossary_tooltip text="卷(Volume)" term_id="volume" >}}
|
Kubernetes {{< glossary_tooltip text="卷(Volume)" term_id="volume" >}}
|
||||||
这一抽象概念能够解决这两个问题。
|
这一抽象概念能够解决这两个问题。
|
||||||
|
|
||||||
<!--
|
<!--
|
||||||
Familiarity with [Pods](/docs/user-guide/pods) is suggested.
|
Familiarity with [Pods](/docs/concepts/workloads/pods/) is suggested.
|
||||||
-->
|
-->
|
||||||
阅读本文前建议你熟悉一下 [Pods](/zh/docs/concepts/workloads/pods)。
|
阅读本文前建议你熟悉一下 [Pods](/zh/docs/concepts/workloads/pods)。
|
||||||
|
|
||||||
|
@ -59,15 +59,15 @@ Docker 提供卷驱动程序,但是其功能非常有限。
|
||||||
Kubernetes supports many types of volumes. A {{< glossary_tooltip term_id="pod" text="Pod" >}}
|
Kubernetes supports many types of volumes. A {{< glossary_tooltip term_id="pod" text="Pod" >}}
|
||||||
can use any number of volume types simultaneously.
|
can use any number of volume types simultaneously.
|
||||||
Ephemeral volume types have a lifetime of a pod, but persistent volumes exist beyond
|
Ephemeral volume types have a lifetime of a pod, but persistent volumes exist beyond
|
||||||
the lifetime of a pod. When a pod ceases to exist, Kubernetes destroys ephemeral volumes;
|
the lifetime of a pod. When a pod ceases to exist, Kubernetes destroys ephemeral volumes;
|
||||||
however, Kubernetes does not destroy persistent volumes.
|
however, Kubernetes does not destroy persistent volumes.
|
||||||
For any kind of volume in a given pod, data is preserved across container restarts.
|
For any kind of volume in a given pod, data is preserved across container restarts.
|
||||||
-->
|
-->
|
||||||
Kubernetes 支持很多类型的卷。
|
Kubernetes 支持很多类型的卷。
|
||||||
{{< glossary_tooltip term_id="pod" text="Pod" >}} 可以同时使用任意数目的卷类型。
|
{{< glossary_tooltip term_id="pod" text="Pod" >}} 可以同时使用任意数目的卷类型。
|
||||||
临时卷类型的生命周期与 Pod 相同,但持久卷可以比 Pod 的存活期长。
|
临时卷类型的生命周期与 Pod 相同,但持久卷可以比 Pod 的存活期长。
|
||||||
当 Pod 不再存在时,Kubernetes 也会销毁临时卷;不过 Kubernetes 不会销毁
|
当 Pod 不再存在时,Kubernetes 也会销毁临时卷;不过 Kubernetes 不会销毁持久卷。
|
||||||
持久卷。对于给定 Pod 中任何类型的卷,在容器重启期间数据都不会丢失。
|
对于给定 Pod 中任何类型的卷,在容器重启期间数据都不会丢失。
|
||||||
|
|
||||||
<!--
|
<!--
|
||||||
At its core, a volume is just a directory, possibly with some data in it, which
|
At its core, a volume is just a directory, possibly with some data in it, which
|
||||||
|
@ -76,27 +76,41 @@ medium that backs it, and the contents of it are determined by the particular
|
||||||
volume type used.
|
volume type used.
|
||||||
-->
|
-->
|
||||||
卷的核心是一个目录,其中可能存有数据,Pod 中的容器可以访问该目录中的数据。
|
卷的核心是一个目录,其中可能存有数据,Pod 中的容器可以访问该目录中的数据。
|
||||||
所采用的特定的卷类型将决定该目录如何形成的、使用何种介质保存数据以及目录中存放
|
所采用的特定的卷类型将决定该目录如何形成的、使用何种介质保存数据以及目录中存放的内容。
|
||||||
的内容。
|
|
||||||
|
|
||||||
<!--
|
<!--
|
||||||
To use a volume, specify the volumes to provide for the Pod in `.spec.volumes`
|
To use a volume, specify the volumes to provide for the Pod in `.spec.volumes`
|
||||||
and declare where to mount those volumes into containers in `.spec.containers[*].volumeMounts`.
|
and declare where to mount those volumes into containers in `.spec.containers[*].volumeMounts`.
|
||||||
A process in a container sees a filesystem view composed from their Docker
|
A process in a container sees a filesystem view composed from the initial contents of
|
||||||
image and volumes. The [Docker image](https://docs.docker.com/userguide/dockerimages/)
|
the {{< glossary_tooltip text="container image" term_id="image" >}}, plus volumes
|
||||||
is at the root of the filesystem hierarchy. Volumes mount at the specified paths within
|
(if defined) mounted inside the container.
|
||||||
the image. Volumes can not mount onto other volumes or have hard links to
|
The process sees a root filesystem that initially matches the contents of the container
|
||||||
other volumes. Each Container in the Pod's configuration must independently specify where to
|
image.
|
||||||
mount each volume.
|
Any writes to within that filesystem hierarchy, if allowed, affect what that process views
|
||||||
|
when it performs a subsequent filesystem access.
|
||||||
-->
|
-->
|
||||||
使用卷时, 在 `.spec.volumes` 字段中设置为 Pod 提供的卷,并在
|
使用卷时, 在 `.spec.volumes` 字段中设置为 Pod 提供的卷,并在
|
||||||
`.spec.containers[*].volumeMounts` 字段中声明卷在容器中的挂载位置。
|
`.spec.containers[*].volumeMounts` 字段中声明卷在容器中的挂载位置。
|
||||||
容器中的进程看到的是由它们的 Docker 镜像和卷组成的文件系统视图。
|
容器中的进程看到的文件系统视图是由它们的 {{< glossary_tooltip text="容器镜像" term_id="image" >}}
|
||||||
[Docker 镜像](https://docs.docker.com/userguide/dockerimages/)
|
的初始内容以及挂载在容器中的卷(如果定义了的话)所组成的。
|
||||||
位于文件系统层次结构的根部。各个卷则挂载在镜像内的指定路径上。
|
其中根文件系统同容器镜像的内容相吻合。
|
||||||
卷不能挂载到其他卷之上,也不能与其他卷有硬链接。
|
任何在该文件系统下的写入操作,如果被允许的话,都会影响接下来容器中进程访问文件系统时所看到的内容。
|
||||||
|
|
||||||
|
<!--
|
||||||
|
Volumes mount at the [specified paths](#using-subpath) within
|
||||||
|
the image.
|
||||||
|
For each container defined within a Pod, you must independently specify where
|
||||||
|
to mount each volume that the container uses.
|
||||||
|
|
||||||
|
Volumes cannot mount within other volumes (but see [Using subPath](#using-subpath)
|
||||||
|
for a related mechanism). Also, a volume cannot contain a hard link to anything in
|
||||||
|
a different volume.
|
||||||
|
-->
|
||||||
|
卷挂载在镜像中的[指定路径](#using-subpath)下。
|
||||||
Pod 配置中的每个容器必须独立指定各个卷的挂载位置。
|
Pod 配置中的每个容器必须独立指定各个卷的挂载位置。
|
||||||
|
|
||||||
|
卷不能挂载到其他卷之上(不过存在一种[使用 subPath](#using-subpath) 的相关机制),也不能与其他卷有硬链接。
|
||||||
|
|
||||||
<!--
|
<!--
|
||||||
## Types of Volumes
|
## Types of Volumes
|
||||||
|
|
||||||
|
@ -116,8 +130,8 @@ volume are persisted and the volume is unmounted. This means that an
|
||||||
EBS volume can be pre-populated with data, and that data can be shared between pods.
|
EBS volume can be pre-populated with data, and that data can be shared between pods.
|
||||||
-->
|
-->
|
||||||
`awsElasticBlockStore` 卷将 Amazon Web服务(AWS)[EBS 卷](https://aws.amazon.com/ebs/)
|
`awsElasticBlockStore` 卷将 Amazon Web服务(AWS)[EBS 卷](https://aws.amazon.com/ebs/)
|
||||||
挂载到你的 Pod 中。与 `emptyDir` 在 Pod 被删除时也被删除不同,EBS 卷的内容在删除 Pod 时
|
挂载到你的 Pod 中。与 `emptyDir` 在 Pod 被删除时也被删除不同,EBS 卷的内容在删除 Pod
|
||||||
会被保留,卷只是被卸载掉了。
|
时会被保留,卷只是被卸载掉了。
|
||||||
这意味着 EBS 卷可以预先填充数据,并且该数据可以在 Pod 之间共享。
|
这意味着 EBS 卷可以预先填充数据,并且该数据可以在 Pod 之间共享。
|
||||||
|
|
||||||
<!--
|
<!--
|
||||||
|
@ -204,9 +218,10 @@ driver](https://github.com/kubernetes-sigs/aws-ebs-csi-driver)
|
||||||
must be installed on the cluster and the `CSIMigration` and `CSIMigrationAWS`
|
must be installed on the cluster and the `CSIMigration` and `CSIMigrationAWS`
|
||||||
beta features must be enabled.
|
beta features must be enabled.
|
||||||
-->
|
-->
|
||||||
如果启用了对 `awsElasticBlockStore` 的 `CSIMigration` 特性支持,所有插件操作都
|
如果启用了对 `awsElasticBlockStore` 的 `CSIMigration`
|
||||||
不再指向树内插件(In-Tree Plugin),转而指向 `ebs.csi.aws.com` 容器存储接口
|
特性支持,所有插件操作都不再指向树内插件(In-Tree Plugin),转而指向
|
||||||
(Container Storage Interface,CSI)驱动。为了使用此特性,必须在集群中安装
|
`ebs.csi.aws.com` 容器存储接口(Container Storage Interface,CSI)驱动。
|
||||||
|
为了使用此特性,必须在集群中安装
|
||||||
[AWS EBS CSI 驱动](https://github.com/kubernetes-sigs/aws-ebs-csi-driver),
|
[AWS EBS CSI 驱动](https://github.com/kubernetes-sigs/aws-ebs-csi-driver),
|
||||||
并确保 `CSIMigration` 和 `CSIMigrationAWS` Beta 功能特性被启用。
|
并确保 `CSIMigration` 和 `CSIMigrationAWS` Beta 功能特性被启用。
|
||||||
|
|
||||||
|
@ -308,8 +323,9 @@ that data can be shared between Pods. The `cephfs` can be mounted by multiple
|
||||||
writers simultaneously.
|
writers simultaneously.
|
||||||
-->
|
-->
|
||||||
`cephfs` 卷允许你将现存的 CephFS 卷挂载到 Pod 中。
|
`cephfs` 卷允许你将现存的 CephFS 卷挂载到 Pod 中。
|
||||||
不像 `emptyDir` 那样会在 Pod 被删除的同时也会被删除,`cephfs` 卷的内容在 Pod 被删除
|
不像 `emptyDir` 那样会在 Pod 被删除的同时也会被删除,`cephfs`
|
||||||
时会被保留,只是卷被卸载了。这意味着 `cephfs` 卷可以被预先填充数据,且这些数据可以在
|
卷的内容在 Pod 被删除时会被保留,只是卷被卸载了。
|
||||||
|
这意味着 `cephfs` 卷可以被预先填充数据,且这些数据可以在
|
||||||
Pod 之间共享。同一 `cephfs` 卷可同时被多个写者挂载。
|
Pod 之间共享。同一 `cephfs` 卷可同时被多个写者挂载。
|
||||||
|
|
||||||
<!--
|
<!--
|
||||||
|
@ -375,7 +391,7 @@ It redirects all plugin operations from the existing in-tree plugin to the
|
||||||
`cinder.csi.openstack.org` Container Storage Interface (CSI) Driver.
|
`cinder.csi.openstack.org` Container Storage Interface (CSI) Driver.
|
||||||
[OpenStack Cinder CSI Driver](https://github.com/kubernetes/cloud-provider-openstack/blob/master/docs/cinder-csi-plugin/using-cinder-csi-plugin.md)
|
[OpenStack Cinder CSI Driver](https://github.com/kubernetes/cloud-provider-openstack/blob/master/docs/cinder-csi-plugin/using-cinder-csi-plugin.md)
|
||||||
must be installed on the cluster.
|
must be installed on the cluster.
|
||||||
You can disable Cinder CSI migration for your cluster by setting the `CSIMigrationOpenStack`
|
You can disable Cinder CSI migration for your cluster by setting the `CSIMigrationOpenStack`
|
||||||
[feature gate](/docs/reference/command-line-tools-reference/feature-gates/) to `false`.
|
[feature gate](/docs/reference/command-line-tools-reference/feature-gates/) to `false`.
|
||||||
If you disable the `CSIMigrationOpenStack` feature, the in-tree Cinder volume plugin takes responsibility
|
If you disable the `CSIMigrationOpenStack` feature, the in-tree Cinder volume plugin takes responsibility
|
||||||
for all aspects of Cinder volume storage management.
|
for all aspects of Cinder volume storage management.
|
||||||
|
@ -399,10 +415,9 @@ provides a way to inject configuration data into Pods.
|
||||||
The data stored in a ConfigMap object can be referenced in a volume of type
|
The data stored in a ConfigMap object can be referenced in a volume of type
|
||||||
`configMap` and then consumed by containerized applications running in a Pod.
|
`configMap` and then consumed by containerized applications running in a Pod.
|
||||||
-->
|
-->
|
||||||
[`configMap`](/zh/docs/tasks/configure-pod-container/configure-pod-configmap/) 卷
|
[`configMap`](/zh/docs/tasks/configure-pod-container/configure-pod-configmap/)
|
||||||
提供了向 Pod 注入配置数据的方法。
|
卷提供了向 Pod 注入配置数据的方法。
|
||||||
ConfigMap 对象中存储的数据可以被 `configMap` 类型的卷引用,然后被 Pod 中运行的
|
ConfigMap 对象中存储的数据可以被 `configMap` 类型的卷引用,然后被 Pod 中运行的容器化应用使用。
|
||||||
容器化应用使用。
|
|
||||||
|
|
||||||
<!--
|
<!--
|
||||||
When referencing a ConfigMap, you provide the name of the ConfigMap in the
|
When referencing a ConfigMap, you provide the name of the ConfigMap in the
|
||||||
|
@ -442,8 +457,8 @@ its `log_level` entry are mounted into the Pod at path "`/etc/config/log_level`"
|
||||||
Note that this path is derived from the volume's `mountPath` and the `path`
|
Note that this path is derived from the volume's `mountPath` and the `path`
|
||||||
keyed with `log_level`.
|
keyed with `log_level`.
|
||||||
-->
|
-->
|
||||||
`log-config` ConfigMap 以卷的形式挂载,并且存储在 `log_level` 条目中的所有内容
|
`log-config` ConfigMap 以卷的形式挂载,并且存储在 `log_level`
|
||||||
都被挂载到 Pod 的 `/etc/config/log_level` 路径下。
|
条目中的所有内容都被挂载到 Pod 的 `/etc/config/log_level` 路径下。
|
||||||
请注意,这个路径来源于卷的 `mountPath` 和 `log_level` 键对应的 `path`。
|
请注意,这个路径来源于卷的 `mountPath` 和 `log_level` 键对应的 `path`。
|
||||||
|
|
||||||
<!--
|
<!--
|
||||||
|
@ -544,8 +559,8 @@ backed volumes are sized to 50% of the memory on a Linux host.
|
||||||
-->
|
-->
|
||||||
|
|
||||||
{{< note >}}
|
{{< note >}}
|
||||||
当启用 `SizeMemoryBackedVolumes` [特性门控](/zh/docs/reference/command-line-tools-reference/feature-gates/)时,
|
当启用 `SizeMemoryBackedVolumes` [特性门控](/zh/docs/reference/command-line-tools-reference/feature-gates/)
|
||||||
你可以为基于内存提供的卷指定大小。
|
时,你可以为基于内存提供的卷指定大小。
|
||||||
如果未指定大小,则基于内存的卷的大小为 Linux 主机上内存的 50%。
|
如果未指定大小,则基于内存的卷的大小为 Linux 主机上内存的 50%。
|
||||||
{{< /note>}}
|
{{< /note>}}
|
||||||
|
|
||||||
|
@ -589,8 +604,8 @@ targetWWNs expect that those WWNs are from multi-path connections.
|
||||||
You must configure FC SAN Zoning to allocate and mask those LUNs (volumes) to the target WWNs beforehand so that Kubernetes hosts can access them.
|
You must configure FC SAN Zoning to allocate and mask those LUNs (volumes) to the target WWNs beforehand so that Kubernetes hosts can access them.
|
||||||
-->
|
-->
|
||||||
{{< note >}}
|
{{< note >}}
|
||||||
你必须配置 FC SAN Zoning,以便预先向目标 WWN 分配和屏蔽这些 LUN(卷),
|
你必须配置 FC SAN Zoning,以便预先向目标 WWN 分配和屏蔽这些 LUN(卷),这样
|
||||||
这样 Kubernetes 主机才可以访问它们。
|
Kubernetes 主机才可以访问它们。
|
||||||
{{< /note >}}
|
{{< /note >}}
|
||||||
|
|
||||||
<!--
|
<!--
|
||||||
|
@ -737,10 +752,10 @@ feature allows the creation of Persistent Disks that are available in two zones
|
||||||
within the same region. In order to use this feature, the volume must be provisioned
|
within the same region. In order to use this feature, the volume must be provisioned
|
||||||
as a PersistentVolume; referencing the volume directly from a Pod is not supported.
|
as a PersistentVolume; referencing the volume directly from a Pod is not supported.
|
||||||
-->
|
-->
|
||||||
[区域持久盘](https://cloud.google.com/compute/docs/disks/#repds) 功能允许你创建能在
|
[区域持久盘](https://cloud.google.com/compute/docs/disks/#repds)
|
||||||
同一区域的两个可用区中使用的持久盘。
|
功能允许你创建能在同一区域的两个可用区中使用的持久盘。
|
||||||
要使用这个功能,必须以持久卷(PersistentVolume)的方式提供卷;直接从 Pod 引用这种卷
|
要使用这个功能,必须以持久卷(PersistentVolume)的方式提供卷;直接从
|
||||||
是不可以的。
|
Pod 引用这种卷是不可以的。
|
||||||
|
|
||||||
<!--
|
<!--
|
||||||
#### Manually provisioning a Regional PD PersistentVolume
|
#### Manually provisioning a Regional PD PersistentVolume
|
||||||
|
@ -750,8 +765,8 @@ Before creating a PersistentVolume, you must create the PD:
|
||||||
-->
|
-->
|
||||||
#### 手动供应基于区域 PD 的 PersistentVolume {#manually-provisioning-regional-pd-pv}
|
#### 手动供应基于区域 PD 的 PersistentVolume {#manually-provisioning-regional-pd-pv}
|
||||||
|
|
||||||
使用[为 GCE PD 定义的存储类](/zh/docs/concepts/storage/storage-classes/#gce) 可以
|
使用[为 GCE PD 定义的存储类](/zh/docs/concepts/storage/storage-classes/#gce)
|
||||||
实现动态供应。在创建 PersistentVolume 之前,你首先要创建 PD。
|
可以实现动态供应。在创建 PersistentVolume 之前,你首先要创建 PD。
|
||||||
|
|
||||||
```shell
|
```shell
|
||||||
gcloud beta compute disks create --size=500GB my-data-disk
|
gcloud beta compute disks create --size=500GB my-data-disk
|
||||||
|
@ -824,8 +839,8 @@ and the kubelet, set the `InTreePluginGCEUnregister` flag to `true`.
|
||||||
|
|
||||||
{{< feature-state for_k8s_version="v1.21" state="alpha" >}}
|
{{< feature-state for_k8s_version="v1.21" state="alpha" >}}
|
||||||
|
|
||||||
要禁止控制器管理器和 kubelet 加载 `gcePersistentDisk` 存储插件,
|
要禁止控制器管理器和 kubelet 加载 `gcePersistentDisk` 存储插件,请将
|
||||||
请将 `InTreePluginGCEUnregister` 标志设置为 `true`。
|
`InTreePluginGCEUnregister` 标志设置为 `true`。
|
||||||
|
|
||||||
<!--
|
<!--
|
||||||
### gitRepo (deprecated) {#gitrepo}
|
### gitRepo (deprecated) {#gitrepo}
|
||||||
|
@ -838,8 +853,8 @@ The gitRepo volume type is deprecated. To provision a container with a git repo,
|
||||||
-->
|
-->
|
||||||
{{< warning >}}
|
{{< warning >}}
|
||||||
`gitRepo` 卷类型已经被废弃。如果需要在容器中提供 git 仓库,请将一个
|
`gitRepo` 卷类型已经被废弃。如果需要在容器中提供 git 仓库,请将一个
|
||||||
[EmptyDir](#emptydir) 卷挂载到 InitContainer 中,使用 git 命令完成仓库的克隆操作,
|
[EmptyDir](#emptydir) 卷挂载到 InitContainer 中,使用 git
|
||||||
然后将 [EmptyDir](#emptydir) 卷挂载到 Pod 的容器中。
|
命令完成仓库的克隆操作,然后将 [EmptyDir](#emptydir) 卷挂载到 Pod 的容器中。
|
||||||
{{< /warning >}}
|
{{< /warning >}}
|
||||||
|
|
||||||
<!--
|
<!--
|
||||||
|
@ -916,8 +931,8 @@ be required to use `readOnly` mounts for the policy to be effective.
|
||||||
HostPath 卷存在许多安全风险,最佳做法是尽可能避免使用 HostPath。
|
HostPath 卷存在许多安全风险,最佳做法是尽可能避免使用 HostPath。
|
||||||
当必须使用 HostPath 卷时,它的范围应仅限于所需的文件或目录,并以只读方式挂载。
|
当必须使用 HostPath 卷时,它的范围应仅限于所需的文件或目录,并以只读方式挂载。
|
||||||
|
|
||||||
如果通过 AdmissionPolicy 限制 HostPath 对特定目录的访问,
|
如果通过 AdmissionPolicy 限制 HostPath 对特定目录的访问,则必须要求
|
||||||
则必须要求 `volumeMounts` 使用 `readOnly` 挂载以使策略生效。
|
`volumeMounts` 使用 `readOnly` 挂载以使策略生效。
|
||||||
{{< /warning >}}
|
{{< /warning >}}
|
||||||
|
|
||||||
<!--
|
<!--
|
||||||
|
@ -990,10 +1005,10 @@ Watch out when using this type of volume, because:
|
||||||
-->
|
-->
|
||||||
当使用这种类型的卷时要小心,因为:
|
当使用这种类型的卷时要小心,因为:
|
||||||
|
|
||||||
* HostPath 卷可能会暴露特权系统凭据(例如 Kubelet)或特权 API(例如容器运行时套接字),
|
* HostPath 卷可能会暴露特权系统凭据(例如 Kubelet)或特权
|
||||||
可用于容器逃逸或攻击集群的其他部分。
|
API(例如容器运行时套接字),可用于容器逃逸或攻击集群的其他部分。
|
||||||
* 具有相同配置(例如基于同一 PodTemplate 创建)的多个 Pod 会由于节点上文件的不同
|
* 具有相同配置(例如基于同一 PodTemplate 创建)的多个 Pod
|
||||||
而在不同节点上有不同的行为。
|
会由于节点上文件的不同而在不同节点上有不同的行为。
|
||||||
* 下层主机上创建的文件或目录只能由 root 用户写入。你需要在
|
* 下层主机上创建的文件或目录只能由 root 用户写入。你需要在
|
||||||
[特权容器](/zh/docs/tasks/configure-pod-container/security-context/)
|
[特权容器](/zh/docs/tasks/configure-pod-container/security-context/)
|
||||||
中以 root 身份运行进程,或者修改主机上的文件权限以便容器能够写入 `hostPath` 卷。
|
中以 root 身份运行进程,或者修改主机上的文件权限以便容器能够写入 `hostPath` 卷。
|
||||||
|
@ -1078,8 +1093,8 @@ unmounted. This means that an iscsi volume can be pre-populated with data, and
|
||||||
that data can be shared between pods.
|
that data can be shared between pods.
|
||||||
-->
|
-->
|
||||||
`iscsi` 卷能将 iSCSI (基于 IP 的 SCSI) 卷挂载到你的 Pod 中。
|
`iscsi` 卷能将 iSCSI (基于 IP 的 SCSI) 卷挂载到你的 Pod 中。
|
||||||
不像 `emptyDir` 那样会在删除 Pod 的同时也会被删除,`iscsi` 卷的内容在删除 Pod 时
|
不像 `emptyDir` 那样会在删除 Pod 的同时也会被删除,`iscsi`
|
||||||
会被保留,卷只是被卸载。
|
卷的内容在删除 Pod 时会被保留,卷只是被卸载。
|
||||||
这意味着 `iscsi` 卷可以被预先填充数据,并且这些数据可以在 Pod 之间共享。
|
这意味着 `iscsi` 卷可以被预先填充数据,并且这些数据可以在 Pod 之间共享。
|
||||||
|
|
||||||
<!--
|
<!--
|
||||||
|
@ -1140,9 +1155,8 @@ The following is an example of PersistentVolume spec using a `local` volume and
|
||||||
`nodeAffinity`:
|
`nodeAffinity`:
|
||||||
-->
|
-->
|
||||||
然而,`local` 卷仍然取决于底层节点的可用性,并不适合所有应用程序。
|
然而,`local` 卷仍然取决于底层节点的可用性,并不适合所有应用程序。
|
||||||
如果节点变得不健康,那么`local` 卷也将变得不可被 Pod 访问。使用它的 Pod 将不能运行。
|
如果节点变得不健康,那么 `local` 卷也将变得不可被 Pod 访问。使用它的 Pod 将不能运行。
|
||||||
使用 `local` 卷的应用程序必须能够容忍这种可用性的降低,以及因底层磁盘的耐用性特征
|
使用 `local` 卷的应用程序必须能够容忍这种可用性的降低,以及因底层磁盘的耐用性特征而带来的潜在的数据丢失风险。
|
||||||
而带来的潜在的数据丢失风险。
|
|
||||||
|
|
||||||
下面是一个使用 `local` 卷和 `nodeAffinity` 的持久卷示例:
|
下面是一个使用 `local` 卷和 `nodeAffinity` 的持久卷示例:
|
||||||
|
|
||||||
|
@ -1198,9 +1212,8 @@ such as node resource requirements, node selectors, Pod affinity, and Pod anti-a
|
||||||
使用 `local` 卷时,建议创建一个 StorageClass 并将其 `volumeBindingMode` 设置为
|
使用 `local` 卷时,建议创建一个 StorageClass 并将其 `volumeBindingMode` 设置为
|
||||||
`WaitForFirstConsumer`。要了解更多详细信息,请参考
|
`WaitForFirstConsumer`。要了解更多详细信息,请参考
|
||||||
[local StorageClass 示例](/zh/docs/concepts/storage/storage-classes/#local)。
|
[local StorageClass 示例](/zh/docs/concepts/storage/storage-classes/#local)。
|
||||||
延迟卷绑定的操作可以确保 Kubernetes 在为 PersistentVolumeClaim 作出绑定决策时,
|
延迟卷绑定的操作可以确保 Kubernetes 在为 PersistentVolumeClaim 作出绑定决策时,会评估
|
||||||
会评估 Pod 可能具有的其他节点约束,例如:如节点资源需求、节点选择器、Pod
|
Pod 可能具有的其他节点约束,例如:如节点资源需求、节点选择器、Pod亲和性和 Pod 反亲和性。
|
||||||
亲和性和 Pod 反亲和性。
|
|
||||||
|
|
||||||
<!--
|
<!--
|
||||||
An external static provisioner can be run separately for improved management of
|
An external static provisioner can be run separately for improved management of
|
||||||
|
@ -1258,10 +1271,9 @@ A `persistentVolumeClaim` volume is used to mount a
|
||||||
are a way for users to "claim" durable storage (such as a GCE PersistentDisk or an
|
are a way for users to "claim" durable storage (such as a GCE PersistentDisk or an
|
||||||
iSCSI volume) without knowing the details of the particular cloud environment.
|
iSCSI volume) without knowing the details of the particular cloud environment.
|
||||||
-->
|
-->
|
||||||
`persistentVolumeClaim` 卷用来将[持久卷](/zh/docs/concepts/storage/persistent-volumes/)(PersistentVolume)
|
`persistentVolumeClaim` 卷用来将[持久卷](/zh/docs/concepts/storage/persistent-volumes/)(PersistentVolume)挂载到 Pod 中。
|
||||||
挂载到 Pod 中。
|
持久卷申领(PersistentVolumeClaim)是用户在不知道特定云环境细节的情况下“申领”持久存储(例如
|
||||||
持久卷申领(PersistentVolumeClaim)是用户在不知道特定云环境细节的情况下"申领"持久存储
|
GCE PersistentDisk 或者 iSCSI 卷)的一种方法。
|
||||||
(例如 GCE PersistentDisk 或者 iSCSI 卷)的一种方法。
|
|
||||||
|
|
||||||
<!--
|
<!--
|
||||||
See the [PersistentVolumes example](/docs/concepts/storage/persistent-volumes/) for more
|
See the [PersistentVolumes example](/docs/concepts/storage/persistent-volumes/) for more
|
||||||
|
@ -1277,8 +1289,8 @@ Kubernetes. [Portworx](https://portworx.com/use-case/kubernetes-storage/) finger
|
||||||
and aggregates capacity across multiple servers. Portworx runs in-guest in virtual machines or on bare metal Linux nodes.
|
and aggregates capacity across multiple servers. Portworx runs in-guest in virtual machines or on bare metal Linux nodes.
|
||||||
-->
|
-->
|
||||||
`portworxVolume` 是一个可伸缩的块存储层,能够以超融合(hyperconverged)的方式与 Kubernetes 一起运行。
|
`portworxVolume` 是一个可伸缩的块存储层,能够以超融合(hyperconverged)的方式与 Kubernetes 一起运行。
|
||||||
[Portworx](https://portworx.com/use-case/kubernetes-storage/) 支持对服务器上存储的指纹处理、
|
[Portworx](https://portworx.com/use-case/kubernetes-storage/)
|
||||||
基于存储能力进行分层以及跨多个服务器整合存储容量。
|
支持对服务器上存储的指纹处理、基于存储能力进行分层以及跨多个服务器整合存储容量。
|
||||||
Portworx 可以以 in-guest 方式在虚拟机中运行,也可以在裸金属 Linux 节点上运行。
|
Portworx 可以以 in-guest 方式在虚拟机中运行,也可以在裸金属 Linux 节点上运行。
|
||||||
|
|
||||||
<!--
|
<!--
|
||||||
|
@ -1324,192 +1336,13 @@ For more details, see the [Portworx volume](https://github.com/kubernetes/exampl
|
||||||
|
|
||||||
更多详情可以参考 [Portworx 卷](https://github.com/kubernetes/examples/tree/master/staging/volumes/portworx/README.md)。
|
更多详情可以参考 [Portworx 卷](https://github.com/kubernetes/examples/tree/master/staging/volumes/portworx/README.md)。
|
||||||
|
|
||||||
### projected
|
### projected (投射)
|
||||||
|
|
||||||
<!--
|
<!--
|
||||||
A `projected` volume maps several existing volume sources into the same directory.
|
A projected volume maps several existing volume sources into the same
|
||||||
|
directory. For more details, see [projected volumes](/docs/concepts/storage/projected-volumes/).
|
||||||
Currently, the following types of volume sources can be projected:
|
|
||||||
-->
|
-->
|
||||||
`projected` 卷类型能将若干现有的卷来源映射到同一目录上。
|
投射卷能将若干现有的卷来源映射到同一目录上。更多详情请参考[投射卷](/zh/docs/concepts/storage/projected-volumes/)。
|
||||||
|
|
||||||
目前,可以映射的卷来源类型如下:
|
|
||||||
|
|
||||||
- [`secret`](#secret)
|
|
||||||
- [`downwardAPI`](#downwardapi)
|
|
||||||
- [`configMap`](#configmap)
|
|
||||||
- `serviceAccountToken`
|
|
||||||
|
|
||||||
<!--
|
|
||||||
All sources are required to be in the same namespace as the Pod. For more details,
|
|
||||||
see the [all-in-one volume design document](https://github.com/kubernetes/community/blob/master/contributors/design-proposals/node/all-in-one-volume.md).
|
|
||||||
-->
|
|
||||||
所有的卷来源需要和 Pod 处于相同的命名空间。
|
|
||||||
更多详情请参考[一体化卷设计文档](https://github.com/kubernetes/community/blob/master/contributors/design-proposals/node/all-in-one-volume.md)。
|
|
||||||
|
|
||||||
<!--
|
|
||||||
#### Example configuration with a secret, a downwardAPI, and a configMap {#example-configuration-secret-downwardapi-configmap}
|
|
||||||
-->
|
|
||||||
|
|
||||||
#### 包含 Secret、downwardAPI 和 configMap 的 Pod 示例 {#example-configuration-secret-downwardapi-configmap}
|
|
||||||
|
|
||||||
```yaml
|
|
||||||
apiVersion: v1
|
|
||||||
kind: Pod
|
|
||||||
metadata:
|
|
||||||
name: volume-test
|
|
||||||
spec:
|
|
||||||
containers:
|
|
||||||
- name: container-test
|
|
||||||
image: busybox
|
|
||||||
volumeMounts:
|
|
||||||
- name: all-in-one
|
|
||||||
mountPath: "/projected-volume"
|
|
||||||
readOnly: true
|
|
||||||
volumes:
|
|
||||||
- name: all-in-one
|
|
||||||
projected:
|
|
||||||
sources:
|
|
||||||
- secret:
|
|
||||||
name: mysecret
|
|
||||||
items:
|
|
||||||
- key: username
|
|
||||||
path: my-group/my-username
|
|
||||||
- downwardAPI:
|
|
||||||
items:
|
|
||||||
- path: "labels"
|
|
||||||
fieldRef:
|
|
||||||
fieldPath: metadata.labels
|
|
||||||
- path: "cpu_limit"
|
|
||||||
resourceFieldRef:
|
|
||||||
containerName: container-test
|
|
||||||
resource: limits.cpu
|
|
||||||
- configMap:
|
|
||||||
name: myconfigmap
|
|
||||||
items:
|
|
||||||
- key: config
|
|
||||||
path: my-group/my-config
|
|
||||||
```
|
|
||||||
|
|
||||||
<!--
|
|
||||||
#### Example configuration: secrets with a non-default permission mode set {#example-configuration-secrets-nondefault-permission-mode}
|
|
||||||
-->
|
|
||||||
|
|
||||||
下面是一个带有非默认访问权限设置的多个 secret 的 Pod 示例:
|
|
||||||
|
|
||||||
```yaml
|
|
||||||
apiVersion: v1
|
|
||||||
kind: Pod
|
|
||||||
metadata:
|
|
||||||
name: volume-test
|
|
||||||
spec:
|
|
||||||
containers:
|
|
||||||
- name: container-test
|
|
||||||
image: busybox
|
|
||||||
volumeMounts:
|
|
||||||
- name: all-in-one
|
|
||||||
mountPath: "/projected-volume"
|
|
||||||
readOnly: true
|
|
||||||
volumes:
|
|
||||||
- name: all-in-one
|
|
||||||
projected:
|
|
||||||
sources:
|
|
||||||
- secret:
|
|
||||||
name: mysecret
|
|
||||||
items:
|
|
||||||
- key: username
|
|
||||||
path: my-group/my-username
|
|
||||||
- secret:
|
|
||||||
name: mysecret2
|
|
||||||
items:
|
|
||||||
- key: password
|
|
||||||
path: my-group/my-password
|
|
||||||
mode: 511
|
|
||||||
```
|
|
||||||
<!--
|
|
||||||
Each projected volume source is listed in the spec under `sources`. The
|
|
||||||
parameters are nearly the same with two exceptions:
|
|
||||||
|
|
||||||
* For secrets, the `secretName` field has been changed to `name` to be consistent
|
|
||||||
with ConfigMap naming.
|
|
||||||
* The `defaultMode` can only be specified at the projected level and not for each
|
|
||||||
volume source. However, as illustrated above, you can explicitly set the `mode`
|
|
||||||
for each individual projection.
|
|
||||||
-->
|
|
||||||
每个被投射的卷来源都在规约中的 `sources` 内列出。参数几乎相同,除了两处例外:
|
|
||||||
|
|
||||||
* 对于 `secret`,`secretName` 字段已被变更为 `name` 以便与 ConfigMap 命名一致。
|
|
||||||
* `defaultMode` 只能在整个投射卷级别指定,而无法针对每个卷来源指定。
|
|
||||||
不过,如上所述,你可以显式地为每个投射项设置 `mode` 值。
|
|
||||||
|
|
||||||
<!--
|
|
||||||
When the `TokenRequestProjection` feature is enabled, you can inject the token
|
|
||||||
for the current [service account](/docs/reference/access-authn-authz/authentication/#service-account-tokens)
|
|
||||||
into a Pod at a specified path. Below is an example:
|
|
||||||
-->
|
|
||||||
|
|
||||||
当开启 `TokenRequestProjection` 功能时,可以将当前
|
|
||||||
[服务帐号](/zh/docs/reference/access-authn-authz/authentication/#service-account-tokens)
|
|
||||||
的令牌注入 Pod 中的指定路径。
|
|
||||||
下面是一个例子:
|
|
||||||
|
|
||||||
```yaml
|
|
||||||
apiVersion: v1
|
|
||||||
kind: Pod
|
|
||||||
metadata:
|
|
||||||
name: sa-token-test
|
|
||||||
spec:
|
|
||||||
containers:
|
|
||||||
- name: container-test
|
|
||||||
image: busybox
|
|
||||||
volumeMounts:
|
|
||||||
- name: token-vol
|
|
||||||
mountPath: "/service-account"
|
|
||||||
readOnly: true
|
|
||||||
volumes:
|
|
||||||
- name: token-vol
|
|
||||||
projected:
|
|
||||||
sources:
|
|
||||||
- serviceAccountToken:
|
|
||||||
audience: api
|
|
||||||
expirationSeconds: 3600
|
|
||||||
path: token
|
|
||||||
```
|
|
||||||
|
|
||||||
<!--
|
|
||||||
The example Pod has a projected volume containing the injected service account
|
|
||||||
token. This token can be used by a Pod's containers to access the Kubernetes API
|
|
||||||
server. The `audience` field contains the intended audience of the
|
|
||||||
token. A recipient of the token must identify itself with an identifier specified
|
|
||||||
in the audience of the token, and otherwise should reject the token. This field
|
|
||||||
is optional and it defaults to the identifier of the API server.
|
|
||||||
-->
|
|
||||||
示例 Pod 具有包含注入服务帐户令牌的映射卷。
|
|
||||||
该令牌可以被 Pod 中的容器用来访问 Kubernetes API 服务器。
|
|
||||||
`audience` 字段包含令牌的预期受众。
|
|
||||||
令牌的接收者必须使用令牌的受众中指定的标识符来标识自己,否则应拒绝令牌。
|
|
||||||
此字段是可选的,默认值是 API 服务器的标识符。
|
|
||||||
|
|
||||||
<!--
|
|
||||||
The `expirationSeconds` is the expected duration of validity of the service account
|
|
||||||
token. It defaults to 1 hour and must be at least 10 minutes (600 seconds). An administrator
|
|
||||||
can also limit its maximum value by specifying the `-service-account-max-token-expiration`
|
|
||||||
option for the API server. The `path` field specifies a relative path to the mount point
|
|
||||||
of the projected volume.
|
|
||||||
-->
|
|
||||||
`expirationSeconds` 是服务帐户令牌的有效期时长。
|
|
||||||
默认值为 1 小时,必须至少 10 分钟(600 秒)。
|
|
||||||
管理员还可以通过设置 API 服务器的 `--service-account-max-token-expiration` 选项来
|
|
||||||
限制其最大值。
|
|
||||||
`path` 字段指定相对于映射卷的挂载点的相对路径。
|
|
||||||
|
|
||||||
{{< note >}}
|
|
||||||
<!--
|
|
||||||
A container using a projected volume source as a [subPath](#using-subpath) volume mount will not
|
|
||||||
receive updates for those volume sources.
|
|
||||||
-->
|
|
||||||
使用投射卷源作为 [subPath](#using-subpath) 卷挂载的容器将不会接收这些卷源的更新。
|
|
||||||
{{< /note >}}
|
|
||||||
|
|
||||||
### quobyte (已弃用) {#quobyte}
|
### quobyte (已弃用) {#quobyte}
|
||||||
|
|
||||||
|
@ -1542,32 +1375,32 @@ Quobyte 的 GitHub 项目包含以 CSI 形式部署 Quobyte 的
|
||||||
|
|
||||||
<!--
|
<!--
|
||||||
An `rbd` volume allows a
|
An `rbd` volume allows a
|
||||||
[Rados Block Device](https://docs.ceph.com/en/latest/rbd/) volume to mount into your
|
[Rados Block Device](https://docs.ceph.com/en/latest/rbd/) (RBD) volume to mount
|
||||||
Pod. Unlike `emptyDir`, which is erased when a Pod is removed, the contents of
|
into your Pod. Unlike `emptyDir`, which is erased when a pod is removed, the
|
||||||
a `rbd` volume are preserved and the volume is merely unmounted. This
|
contents of an `rbd` volume are preserved and the volume is unmounted. This
|
||||||
means that a RBD volume can be pre-populated with data, and that data can
|
means that a RBD volume can be pre-populated with data, and that data can be
|
||||||
be shared between pods.
|
shared between pods.
|
||||||
-->
|
-->
|
||||||
`rbd` 卷允许将 [Rados 块设备](https://docs.ceph.com/en/latest/rbd/) 卷挂载到你的 Pod 中.
|
`rbd` 卷允许将 [Rados 块设备](https://docs.ceph.com/en/latest/rbd/)卷挂载到你的 Pod 中。
|
||||||
不像 `emptyDir` 那样会在删除 Pod 的同时也会被删除,`rbd` 卷的内容在删除 Pod 时
|
不像 `emptyDir` 那样会在删除 Pod 的同时也会被删除,`rbd` 卷的内容在删除 Pod 时会被保存,卷只是被卸载。
|
||||||
会被保存,卷只是被卸载。
|
|
||||||
这意味着 `rbd` 卷可以被预先填充数据,并且这些数据可以在 Pod 之间共享。
|
这意味着 `rbd` 卷可以被预先填充数据,并且这些数据可以在 Pod 之间共享。
|
||||||
|
|
||||||
<!--
|
<!--
|
||||||
You must have your own Ceph installation running before you can use RBD.
|
You must have a Ceph installation running before you can use RBD.
|
||||||
-->
|
-->
|
||||||
{{< caution >}}
|
{{< note >}}
|
||||||
在使用 RBD 之前,你必须安装运行 Ceph。
|
在使用 RBD 之前,你必须安装运行 Ceph。
|
||||||
{{< /caution >}}
|
{{< /note >}}
|
||||||
|
|
||||||
<!--
|
<!--
|
||||||
A feature of RBD is that it can be mounted as read-only by multiple consumers
|
A feature of RBD is that it can be mounted as read-only by multiple consumers
|
||||||
simultaneously. This means that you can pre-populate a volume with your dataset
|
simultaneously. This means that you can pre-populate a volume with your dataset
|
||||||
and then serve it in parallel from as many Pods as you need. Unfortunately,
|
and then serve it in parallel from as many pods as you need. Unfortunately,
|
||||||
RBD volumes can only be mounted by a single consumer in read-write mode.
|
RBD volumes can only be mounted by a single consumer in read-write mode.
|
||||||
Simultaneous writers are not allowed.
|
Simultaneous writers are not allowed.
|
||||||
|
|
||||||
See the [RBD example](https://github.com/kubernetes/examples/tree/master/volumes/rbd) for more details.
|
See the [RBD example](https://github.com/kubernetes/examples/tree/master/volumes/rbd)
|
||||||
|
for more details.
|
||||||
-->
|
-->
|
||||||
RBD 的一个特性是它可以同时被多个用户以只读方式挂载。
|
RBD 的一个特性是它可以同时被多个用户以只读方式挂载。
|
||||||
这意味着你可以用数据集预先填充卷,然后根据需要在尽可能多的 Pod 中并行地使用卷。
|
这意味着你可以用数据集预先填充卷,然后根据需要在尽可能多的 Pod 中并行地使用卷。
|
||||||
|
@ -1576,6 +1409,59 @@ RBD 的一个特性是它可以同时被多个用户以只读方式挂载。
|
||||||
更多详情请参考
|
更多详情请参考
|
||||||
[RBD 示例](https://github.com/kubernetes/examples/tree/master/volumes/rbd)。
|
[RBD 示例](https://github.com/kubernetes/examples/tree/master/volumes/rbd)。
|
||||||
|
|
||||||
|
<!--
|
||||||
|
#### RBD CSI migration
|
||||||
|
-->
|
||||||
|
#### RBD CSI 迁移 {#rbd-csi-migration}
|
||||||
|
|
||||||
|
{{< feature-state for_k8s_version="v1.23" state="alpha" >}}
|
||||||
|
|
||||||
|
<!--
|
||||||
|
The `CSIMigration` feature for `RBD`, when enabled, redirects all plugin
|
||||||
|
operations from the existing in-tree plugin to the `rbd.csi.ceph.com` {{<
|
||||||
|
glossary_tooltip text="CSI" term_id="csi" >}} driver. In order to use this
|
||||||
|
feature, the
|
||||||
|
[Ceph CSI driver](https://github.com/ceph/ceph-csi)
|
||||||
|
must be installed on the cluster and the `CSIMigration` and `csiMigrationRBD`
|
||||||
|
[feature gates](/docs/reference/command-line-tools-reference/feature-gates/)
|
||||||
|
must be enabled.
|
||||||
|
-->
|
||||||
|
启用 RBD 的 `CSIMigration` 功能后,所有插件操作从现有的树内插件重定向到
|
||||||
|
`rbd.csi.ceph.com` {{<glossary_tooltip text="CSI" term_id="csi" >}} 驱动程序。
|
||||||
|
要使用该功能,必须在集群内安装
|
||||||
|
[Ceph CSI 驱动](https://github.com/ceph/ceph-csi),并启用 `CSIMigration` 和 `csiMigrationRBD`
|
||||||
|
[特性门控](/zh/docs/reference/command-line-tools-reference/feature-gates/)。
|
||||||
|
|
||||||
|
<!--
|
||||||
|
As a Kubernetes cluster operator that administers storage, here are the
|
||||||
|
prerequisites that you must complete before you attempt migration to the
|
||||||
|
RBD CSI driver:
|
||||||
|
|
||||||
|
* You must install the Ceph CSI driver (`rbd.csi.ceph.com`), v3.5.0 or above,
|
||||||
|
into your Kubernetes cluster.
|
||||||
|
* considering the `clusterID` field is a required parameter for CSI driver for
|
||||||
|
its operations, but in-tree StorageClass has `monitors` field as a required
|
||||||
|
parameter, a Kubernetes storage admin has to create a clusterID based on the
|
||||||
|
monitors hash ( ex:`#echo -n
|
||||||
|
'<monitors_string>' | md5sum`) in the CSI config map and keep the monitors
|
||||||
|
under this clusterID configuration.
|
||||||
|
* Also, if the value of `adminId` in the in-tree Storageclass is different from
|
||||||
|
`admin`, the `adminSecretName` mentioned in the in-tree Storageclass has to be
|
||||||
|
patched with the base64 value of the `adminId` parameter value, otherwise this
|
||||||
|
step can be skipped.
|
||||||
|
-->
|
||||||
|
{{< note >}}
|
||||||
|
作为一位管理存储的 Kubernetes 集群操作者,在尝试迁移到 RBD CSI 驱动前,你必须完成下列先决事项:
|
||||||
|
|
||||||
|
* 你必须在集群中安装 v3.5.0 或更高版本的 Ceph CSI 驱动(`rbd.csi.ceph.com`)。
|
||||||
|
* 因为 `clusterID` 是 CSI 驱动程序必需的参数,而树内存储类又将 `monitors`
|
||||||
|
作为一个必需的参数,所以 Kubernetes 存储管理者需要根据 `monitors`
|
||||||
|
的哈希值(例:`#echo -n '<monitors_string>' | md5sum`)来创建
|
||||||
|
`clusterID`,并保持该 `monitors` 存在于该 `clusterID` 的配置中。
|
||||||
|
* 同时,如果树内存储类的 `adminId` 的值不是 `admin`,那么其 `adminSecretName`
|
||||||
|
就需要被修改成 `adminId` 参数的 base64 编码值。
|
||||||
|
{{< /note >}}
|
||||||
|
|
||||||
### secret
|
### secret
|
||||||
|
|
||||||
<!--
|
<!--
|
||||||
|
@ -1587,8 +1473,7 @@ non-volatile storage.
|
||||||
-->
|
-->
|
||||||
`secret` 卷用来给 Pod 传递敏感信息,例如密码。你可以将 Secret 存储在 Kubernetes
|
`secret` 卷用来给 Pod 传递敏感信息,例如密码。你可以将 Secret 存储在 Kubernetes
|
||||||
API 服务器上,然后以文件的形式挂在到 Pod 中,无需直接与 Kubernetes 耦合。
|
API 服务器上,然后以文件的形式挂在到 Pod 中,无需直接与 Kubernetes 耦合。
|
||||||
`secret` 卷由 tmpfs(基于 RAM 的文件系统)提供存储,因此它们永远不会被写入非易失性
|
`secret` 卷由 tmpfs(基于 RAM 的文件系统)提供存储,因此它们永远不会被写入非易失性(持久化的)存储器。
|
||||||
(持久化的)存储器。
|
|
||||||
|
|
||||||
<!--
|
<!--
|
||||||
You must create a secret in the Kubernetes API before you can use it.
|
You must create a secret in the Kubernetes API before you can use it.
|
||||||
|
@ -1790,8 +1675,8 @@ must be installed on the cluster and the `CSIMigration` and `CSIMigrationvSphere
|
||||||
当 `vsphereVolume` 的 `CSIMigration` 特性被启用时,所有插件操作都被从树内插件重定向到
|
当 `vsphereVolume` 的 `CSIMigration` 特性被启用时,所有插件操作都被从树内插件重定向到
|
||||||
`csi.vsphere.vmware.com` {{< glossary_tooltip text="CSI" term_id="csi" >}} 驱动。
|
`csi.vsphere.vmware.com` {{< glossary_tooltip text="CSI" term_id="csi" >}} 驱动。
|
||||||
为了使用此功能特性,必须在集群中安装
|
为了使用此功能特性,必须在集群中安装
|
||||||
[vSphere CSI 驱动](https://github.com/kubernetes-sigs/vsphere-csi-driver),
|
[vSphere CSI 驱动](https://github.com/kubernetes-sigs/vsphere-csi-driver),并启用
|
||||||
并启用 `CSIMigration` 和 `CSIMigrationvSphere`
|
`CSIMigration` 和 `CSIMigrationvSphere`
|
||||||
[特性门控](/zh/docs/reference/command-line-tools-reference/feature-gates/)。
|
[特性门控](/zh/docs/reference/command-line-tools-reference/feature-gates/)。
|
||||||
|
|
||||||
<!--
|
<!--
|
||||||
|
@ -1837,6 +1722,28 @@ To turn off the `vsphereVolume` plugin from being loaded by the controller manag
|
||||||
`InTreePluginvSphereUnregister` 特性设置为 `true`。你还必须在所有工作节点上安装
|
`InTreePluginvSphereUnregister` 特性设置为 `true`。你还必须在所有工作节点上安装
|
||||||
`csi.vsphere.vmware.com` {{< glossary_tooltip text="CSI" term_id="csi" >}} 驱动。
|
`csi.vsphere.vmware.com` {{< glossary_tooltip text="CSI" term_id="csi" >}} 驱动。
|
||||||
|
|
||||||
|
<!--
|
||||||
|
#### Portworx CSI migration
|
||||||
|
-->
|
||||||
|
#### Portworx CSI 迁移
|
||||||
|
|
||||||
|
{{< feature-state for_k8s_version="v1.23" state="alpha" >}}
|
||||||
|
|
||||||
|
<!--
|
||||||
|
The `CSIMigration` feature for Portworx has been added but disabled by default in Kubernetes 1.23 since it's in alpha state.
|
||||||
|
It redirects all plugin operations from the existing in-tree plugin to the
|
||||||
|
`pxd.portworx.com` Container Storage Interface (CSI) Driver.
|
||||||
|
[Portworx CSI Driver](https://docs.portworx.com/portworx-install-with-kubernetes/storage-operations/csi/)
|
||||||
|
must be installed on the cluster.
|
||||||
|
To enable the feature, set `CSIMigrationPortworx=true` in kube-controller-manager and kubelet.
|
||||||
|
-->
|
||||||
|
Kubernetes 1.23 中加入了 Portworx 的 `CSIMigration` 功能,但默认不会启用,因为该功能仍处于 alpha 阶段。
|
||||||
|
该功能会将所有的插件操作从现有的树内插件重定向到
|
||||||
|
`pxd.portworx.com` 容器存储接口(Container Storage Interface, CSI)驱动程序。
|
||||||
|
集群中必须安装
|
||||||
|
[Portworx CSI 驱动](https://docs.portworx.com/portworx-install-with-kubernetes/storage-operations/csi/)。
|
||||||
|
要启用此功能,请在 kube-controller-manager 和 kubelet 中设置 `CSIMigrationPortworx=true`。
|
||||||
|
|
||||||
<!--
|
<!--
|
||||||
## Using subPath {#using-subpath}
|
## Using subPath {#using-subpath}
|
||||||
|
|
||||||
|
@ -1844,7 +1751,7 @@ Sometimes, it is useful to share one volume for multiple uses in a single Pod.
|
||||||
The `volumeMounts.subPath` property specifies a sub-path inside the referenced volume
|
The `volumeMounts.subPath` property specifies a sub-path inside the referenced volume
|
||||||
instead of its root.
|
instead of its root.
|
||||||
-->
|
-->
|
||||||
## 使用 subPath {#using-path}
|
## 使用 subPath {#using-subpath}
|
||||||
|
|
||||||
有时,在单个 Pod 中共享卷以供多方使用是很有用的。
|
有时,在单个 Pod 中共享卷以供多方使用是很有用的。
|
||||||
`volumeMounts.subPath` 属性可用于指定所引用的卷内的子路径,而不是其根路径。
|
`volumeMounts.subPath` 属性可用于指定所引用的卷内的子路径,而不是其根路径。
|
||||||
|
@ -1934,6 +1841,7 @@ spec:
|
||||||
volumeMounts:
|
volumeMounts:
|
||||||
- name: workdir1
|
- name: workdir1
|
||||||
mountPath: /logs
|
mountPath: /logs
|
||||||
|
# 包裹变量名的是小括号,而不是大括号
|
||||||
subPathExpr: $(POD_NAME)
|
subPathExpr: $(POD_NAME)
|
||||||
restartPolicy: Never
|
restartPolicy: Never
|
||||||
volumes:
|
volumes:
|
||||||
|
@ -1953,10 +1861,9 @@ Pods.
|
||||||
-->
|
-->
|
||||||
## 资源 {#resources}
|
## 资源 {#resources}
|
||||||
|
|
||||||
`emptyDir` 卷的存储介质(磁盘、SSD 等)是由保存 kubelet 数据的根目录
|
`emptyDir` 卷的存储介质(磁盘、SSD 等)是由保存 kubelet
|
||||||
(通常是 `/var/lib/kubelet`)的文件系统的介质确定。
|
数据的根目录(通常是 `/var/lib/kubelet`)的文件系统的介质确定。
|
||||||
Kubernetes 对 `emptyDir` 卷或者 `hostPath` 卷可以消耗的空间没有限制,
|
Kubernetes 对 `emptyDir` 卷或者 `hostPath` 卷可以消耗的空间没有限制,容器之间或 Pod 之间也没有隔离。
|
||||||
容器之间或 Pod 之间也没有隔离。
|
|
||||||
|
|
||||||
<!--
|
<!--
|
||||||
To learn about requesting space using a resource specification, see
|
To learn about requesting space using a resource specification, see
|
||||||
|
@ -1970,16 +1877,15 @@ To learn about requesting space using a resource specification, see
|
||||||
## Out-of-Tree Volume Plugins
|
## Out-of-Tree Volume Plugins
|
||||||
|
|
||||||
The out-of-tree volume plugins include
|
The out-of-tree volume plugins include
|
||||||
{{< glossary_tooltip text="Container Storage Interface" term_id="csi" >}} (CSI)
|
{{< glossary_tooltip text="Container Storage Interface" term_id="csi" >}} (CSI), and also FlexVolume (which is deprecated). These plugins enable storage vendors to create custom storage plugins
|
||||||
and FlexVolume. They enable storage vendors to create custom storage plugins
|
without adding their plugin source code to the Kubernetes repository.
|
||||||
without adding them to the Kubernetes repository.
|
|
||||||
-->
|
-->
|
||||||
## 树外(Out-of-Tree)卷插件 {#out-of-tree-volume-plugins}
|
## 树外(Out-of-Tree)卷插件 {#out-of-tree-volume-plugins}
|
||||||
|
|
||||||
Out-of-Tree 卷插件包括
|
Out-of-Tree 卷插件包括
|
||||||
{{< glossary_tooltip text="容器存储接口(CSI)" term_id="csi" >}} (CSI)
|
{{< glossary_tooltip text="容器存储接口(CSI)" term_id="csi" >}}
|
||||||
和 FlexVolume。
|
和 FlexVolume(已弃用)。
|
||||||
它们使存储供应商能够创建自定义存储插件,而无需将它们添加到 Kubernetes 代码仓库。
|
它们使存储供应商能够创建自定义存储插件,而无需将插件源码添加到 Kubernetes 代码仓库。
|
||||||
|
|
||||||
<!--
|
<!--
|
||||||
Previously, all volume plugins were "in-tree". The "in-tree" plugins were built, linked, compiled,
|
Previously, all volume plugins were "in-tree". The "in-tree" plugins were built, linked, compiled,
|
||||||
|
@ -1998,8 +1904,7 @@ extensions.
|
||||||
For storage vendors looking to create an out-of-tree volume plugin, please refer
|
For storage vendors looking to create an out-of-tree volume plugin, please refer
|
||||||
to [this FAQ](https://github.com/kubernetes/community/blob/master/sig-storage/volume-plugin-faq.md).
|
to [this FAQ](https://github.com/kubernetes/community/blob/master/sig-storage/volume-plugin-faq.md).
|
||||||
-->
|
-->
|
||||||
CSI 和 FlexVolume 都允许独立于 Kubernetes 代码库开发卷插件,并作为扩展部署
|
CSI 和 FlexVolume 都允许独立于 Kubernetes 代码库开发卷插件,并作为扩展部署(安装)在 Kubernetes 集群上。
|
||||||
(安装)在 Kubernetes 集群上。
|
|
||||||
|
|
||||||
对于希望创建树外(Out-Of-Tree)卷插件的存储供应商,请参考
|
对于希望创建树外(Out-Of-Tree)卷插件的存储供应商,请参考
|
||||||
[卷插件常见问题](https://github.com/kubernetes/community/blob/master/sig-storage/volume-plugin-faq.md)。
|
[卷插件常见问题](https://github.com/kubernetes/community/blob/master/sig-storage/volume-plugin-faq.md)。
|
||||||
|
@ -2053,8 +1958,8 @@ A `csi` volume can be used in a Pod in three different ways:
|
||||||
* with a [CSI ephemeral volume](/docs/concepts/storage/ephemeral-volumes/#csi-ephemeral-volume)
|
* with a [CSI ephemeral volume](/docs/concepts/storage/ephemeral-volumes/#csi-ephemeral-volume)
|
||||||
if the driver supports that (beta feature)
|
if the driver supports that (beta feature)
|
||||||
-->
|
-->
|
||||||
一旦在 Kubernetes 集群上部署了 CSI 兼容卷驱动程序,用户就可以使用 `csi` 卷类型来
|
一旦在 Kubernetes 集群上部署了 CSI 兼容卷驱动程序,用户就可以使用
|
||||||
挂接、挂载 CSI 驱动所提供的卷。
|
`csi` 卷类型来挂接、挂载 CSI 驱动所提供的卷。
|
||||||
|
|
||||||
`csi` 卷可以在 Pod 中以三种方式使用:
|
`csi` 卷可以在 Pod 中以三种方式使用:
|
||||||
|
|
||||||
|
@ -2078,10 +1983,10 @@ persistent volume:
|
||||||
CSI driver components to identify which PV objects belong to the CSI driver.
|
CSI driver components to identify which PV objects belong to the CSI driver.
|
||||||
-->
|
-->
|
||||||
- `driver`:指定要使用的卷驱动名称的字符串值。
|
- `driver`:指定要使用的卷驱动名称的字符串值。
|
||||||
这个值必须与 CSI 驱动程序在 `GetPluginInfoResponse` 中返回的值相对应;
|
这个值必须与 CSI 驱动程序在 `GetPluginInfoResponse` 中返回的值相对应;该接口定义在
|
||||||
该接口定义在 [CSI 规范](https://github.com/container-storage-interface/spec/blob/master/spec.md#getplugininfo)中。
|
[CSI 规范](https://github.com/container-storage-interface/spec/blob/master/spec.md#getplugininfo)中。
|
||||||
Kubernetes 使用所给的值来标识要调用的 CSI 驱动程序;CSI 驱动程序也使用该值来辨识
|
Kubernetes 使用所给的值来标识要调用的 CSI 驱动程序;CSI
|
||||||
哪些 PV 对象属于该 CSI 驱动程序。
|
驱动程序也使用该值来辨识哪些 PV 对象属于该 CSI 驱动程序。
|
||||||
|
|
||||||
<!--
|
<!--
|
||||||
- `volumeHandle`: A string value that uniquely identifies the volume. This value
|
- `volumeHandle`: A string value that uniquely identifies the volume. This value
|
||||||
|
@ -2091,8 +1996,8 @@ persistent volume:
|
||||||
referencing the volume.
|
referencing the volume.
|
||||||
-->
|
-->
|
||||||
- `volumeHandle`:唯一标识卷的字符串值。
|
- `volumeHandle`:唯一标识卷的字符串值。
|
||||||
该值必须与 CSI 驱动在 `CreateVolumeResponse` 的 `volume_id` 字段中返回的值相对应;
|
该值必须与 CSI 驱动在 `CreateVolumeResponse` 的 `volume_id` 字段中返回的值相对应;接口定义在
|
||||||
接口定义在 [CSI spec](https://github.com/container-storage-interface/spec/blob/master/spec.md#createvolume) 中。
|
[CSI 规范](https://github.com/container-storage-interface/spec/blob/master/spec.md#createvolume) 中。
|
||||||
在所有对 CSI 卷驱动程序的调用中,引用该 CSI 卷时都使用此值作为 `volume_id` 参数。
|
在所有对 CSI 卷驱动程序的调用中,引用该 CSI 卷时都使用此值作为 `volume_id` 参数。
|
||||||
|
|
||||||
<!--
|
<!--
|
||||||
|
@ -2101,8 +2006,7 @@ persistent volume:
|
||||||
passed to the CSI driver via the `readonly` field in the
|
passed to the CSI driver via the `readonly` field in the
|
||||||
`ControllerPublishVolumeRequest`.
|
`ControllerPublishVolumeRequest`.
|
||||||
-->
|
-->
|
||||||
- `readOnly`:一个可选的布尔值,指示通过 `ControllerPublished` 关联该卷时是否设置
|
- `readOnly`:一个可选的布尔值,指示通过 `ControllerPublished` 关联该卷时是否设置该卷为只读。默认值是 false。
|
||||||
该卷为只读。默认值是 false。
|
|
||||||
该值通过 `ControllerPublishVolumeRequest` 中的 `readonly` 字段传递给 CSI 驱动。
|
该值通过 `ControllerPublishVolumeRequest` 中的 `readonly` 字段传递给 CSI 驱动。
|
||||||
|
|
||||||
<!--
|
<!--
|
||||||
|
@ -2131,7 +2035,8 @@ persistent volume:
|
||||||
- `volumeAttributes`:一个字符串到字符串的映射表,用来设置卷的静态属性。
|
- `volumeAttributes`:一个字符串到字符串的映射表,用来设置卷的静态属性。
|
||||||
该映射必须与 CSI 驱动程序返回的 `CreateVolumeResponse` 中的 `volume.attributes`
|
该映射必须与 CSI 驱动程序返回的 `CreateVolumeResponse` 中的 `volume.attributes`
|
||||||
字段的映射相对应;
|
字段的映射相对应;
|
||||||
[CSI 规范](https://github.com/container-storage-interface/spec/blob/master/spec.md#createvolume) 中有相应的定义。
|
[CSI 规范](https://github.com/container-storage-interface/spec/blob/master/spec.md#createvolume)
|
||||||
|
中有相应的定义。
|
||||||
该映射通过`ControllerPublishVolumeRequest`、`NodeStageVolumeRequest`、和
|
该映射通过`ControllerPublishVolumeRequest`、`NodeStageVolumeRequest`、和
|
||||||
`NodePublishVolumeRequest` 中的 `volume_attributes` 字段传递给 CSI 驱动。
|
`NodePublishVolumeRequest` 中的 `volume_attributes` 字段传递给 CSI 驱动。
|
||||||
|
|
||||||
|
@ -2239,10 +2144,8 @@ provisioning/delete, attach/detach, mount/unmount and resizing of volumes.
|
||||||
In-tree plugins that support `CSIMigration` and have a corresponding CSI driver implemented
|
In-tree plugins that support `CSIMigration` and have a corresponding CSI driver implemented
|
||||||
are listed in [Types of Volumes](#volume-types).
|
are listed in [Types of Volumes](#volume-types).
|
||||||
-->
|
-->
|
||||||
启用 `CSIMigration` 功能后,针对现有树内插件的操作会被重定向到相应的 CSI 插件
|
启用 `CSIMigration` 功能后,针对现有树内插件的操作会被重定向到相应的 CSI 插件(应已安装和配置)。
|
||||||
(应已安装和配置)。
|
因此,操作员在过渡到取代树内插件的 CSI 驱动时,无需对现有存储类、PV 或 PVC(指树内插件)进行任何配置更改。
|
||||||
因此,操作员在过渡到取代树内插件的 CSI 驱动时,无需对现有存储类、PV 或 PVC
|
|
||||||
(指树内插件)进行任何配置更改。
|
|
||||||
|
|
||||||
所支持的操作和功能包括:配备(Provisioning)/删除、挂接(Attach)/解挂(Detach)、
|
所支持的操作和功能包括:配备(Provisioning)/删除、挂接(Attach)/解挂(Detach)、
|
||||||
挂载(Mount)/卸载(Unmount)和调整卷大小。
|
挂载(Mount)/卸载(Unmount)和调整卷大小。
|
||||||
|
@ -2252,22 +2155,35 @@ are listed in [Types of Volumes](#volume-types).
|
||||||
|
|
||||||
### flexVolume
|
### flexVolume
|
||||||
|
|
||||||
|
{{< feature-state for_k8s_version="v1.23" state="deprecated" >}}
|
||||||
|
|
||||||
<!--
|
<!--
|
||||||
FlexVolume is an out-of-tree plugin interface that has existed in Kubernetes
|
FlexVolume is an out-of-tree plugin interface that uses an exec-based model to interface
|
||||||
since version 1.2 (before CSI). It uses an exec-based model to interface with
|
with storage drivers. The FlexVolume driver binaries must be installed in a pre-defined
|
||||||
drivers. The FlexVolume driver binaries must be installed in a pre-defined volume
|
volume plugin path on each node and in some cases the control plane nodes as well.
|
||||||
plugin path on each node (and in some cases master).
|
|
||||||
|
|
||||||
Pods interact with FlexVolume drivers through the `flexvolume` in-tree plugin.
|
Pods interact with FlexVolume drivers through the `flexVolume` in-tree volume plugin.
|
||||||
More details can be found [here](https://github.com/kubernetes/community/blob/master/contributors/devel/sig-storage/flexvolume.md).
|
For more details, see the FlexVolume [README](https://github.com/kubernetes/community/blob/master/contributors/devel/sig-storage/flexvolume.md#readme) document.
|
||||||
-->
|
-->
|
||||||
FlexVolume 是一个自 1.2 版本(在 CSI 之前)以来在 Kubernetes 中一直存在的树外插件接口。
|
FlexVolume 是一个使用基于 exec 的模型来与驱动程序对接的树外插件接口。
|
||||||
它使用基于 exec 的模型来与驱动程序对接。
|
用户必须在每个节点上的预定义卷插件路径中安装 FlexVolume
|
||||||
用户必须在每个节点(在某些情况下是主控节点)上的预定义卷插件路径中安装
|
驱动程序可执行文件,在某些情况下,控制平面节点中也要安装。
|
||||||
FlexVolume 驱动程序可执行文件。
|
|
||||||
|
|
||||||
Pod 通过 `flexvolume` 树内插件与 Flexvolume 驱动程序交互。
|
Pod 通过 `flexvolume` 树内插件与 FlexVolume 驱动程序交互。
|
||||||
更多详情请参考 [FlexVolume](https://github.com/kubernetes/community/blob/master/contributors/devel/sig-storage/flexvolume.md) 示例。
|
更多详情请参考 FlexVolume [README](https://github.com/kubernetes/community/blob/master/contributors/devel/sig-storage/flexvolume.md#readme) 文档。
|
||||||
|
|
||||||
|
<!--
|
||||||
|
FlexVolume is deprecated. Using an out-of-tree CSI driver is the recommended way to integrate external storage with Kubernetes.
|
||||||
|
|
||||||
|
Maintainers of FlexVolume driver should implement a CSI Driver and help to migrate users of FlexVolume drivers to CSI.
|
||||||
|
Users of FlexVolume should move their workloads to use the equivalent CSI Driver.
|
||||||
|
-->
|
||||||
|
{{< note >}}
|
||||||
|
FlexVolume 已弃用。推荐使用树外 CSI 驱动来将外部存储整合进 Kubernetes。
|
||||||
|
|
||||||
|
FlexVolume 驱动的维护者应开发一个 CSI 驱动并帮助用户从 FlexVolume 驱动迁移到 CSI。
|
||||||
|
FlexVolume 用户应迁移工作负载以使用对等的 CSI 驱动。
|
||||||
|
{{< /note >}}
|
||||||
|
|
||||||
<!--
|
<!--
|
||||||
## Mount propagation
|
## Mount propagation
|
||||||
|
@ -2280,8 +2196,7 @@ Its values are:
|
||||||
-->
|
-->
|
||||||
## 挂载卷的传播 {#mount-propagation}
|
## 挂载卷的传播 {#mount-propagation}
|
||||||
|
|
||||||
挂载卷的传播能力允许将容器安装的卷共享到同一 Pod 中的其他容器,
|
挂载卷的传播能力允许将容器安装的卷共享到同一 Pod 中的其他容器,甚至共享到同一节点上的其他 Pod。
|
||||||
甚至共享到同一节点上的其他 Pod。
|
|
||||||
|
|
||||||
卷的挂载传播特性由 `Container.volumeMounts` 中的 `mountPropagation` 字段控制。
|
卷的挂载传播特性由 `Container.volumeMounts` 中的 `mountPropagation` 字段控制。
|
||||||
它的值包括:
|
它的值包括:
|
||||||
|
@ -2320,8 +2235,8 @@ Its values are:
|
||||||
|
|
||||||
换句话说,如果主机在此挂载卷中挂载任何内容,容器将能看到它被挂载在那里。
|
换句话说,如果主机在此挂载卷中挂载任何内容,容器将能看到它被挂载在那里。
|
||||||
|
|
||||||
类似的,配置了 `Bidirectional` 挂载传播选项的 Pod 如果在同一卷上挂载了内容,
|
类似的,配置了 `Bidirectional` 挂载传播选项的 Pod 如果在同一卷上挂载了内容,挂载传播设置为
|
||||||
挂载传播设置为 `HostToContainer` 的容器都将能看到这一变化。
|
`HostToContainer` 的容器都将能看到这一变化。
|
||||||
|
|
||||||
该模式等同于 [Linux 内核文档](https://www.kernel.org/doc/Documentation/filesystems/sharedsubtree.txt)
|
该模式等同于 [Linux 内核文档](https://www.kernel.org/doc/Documentation/filesystems/sharedsubtree.txt)
|
||||||
中描述的 `rslave` 挂载传播选项。
|
中描述的 `rslave` 挂载传播选项。
|
||||||
|
@ -2366,8 +2281,7 @@ Docker as shown below.
|
||||||
-->
|
-->
|
||||||
### 配置 {#configuration}
|
### 配置 {#configuration}
|
||||||
|
|
||||||
在某些部署环境中,挂载传播正常工作前,必须在 Docker 中正确配置挂载共享(mount share),
|
在某些部署环境中,挂载传播正常工作前,必须在 Docker 中正确配置挂载共享(mount share),如下所示。
|
||||||
如下所示。
|
|
||||||
|
|
||||||
<!--
|
<!--
|
||||||
Edit your Docker's `systemd` service file. Set `MountFlags` as follows:
|
Edit your Docker's `systemd` service file. Set `MountFlags` as follows:
|
||||||
|
|
|
@ -35,7 +35,7 @@ template.
|
||||||
-->
|
-->
|
||||||
## ReplicaSet 的工作原理 {#how-a-replicaset-works}
|
## ReplicaSet 的工作原理 {#how-a-replicaset-works}
|
||||||
|
|
||||||
RepicaSet 是通过一组字段来定义的,包括一个用来识别可获得的 Pod
|
ReplicaSet 是通过一组字段来定义的,包括一个用来识别可获得的 Pod
|
||||||
的集合的选择算符、一个用来标明应该维护的副本个数的数值、一个用来指定应该创建新 Pod
|
的集合的选择算符、一个用来标明应该维护的副本个数的数值、一个用来指定应该创建新 Pod
|
||||||
以满足副本个数条件时要使用的 Pod 模板等等。
|
以满足副本个数条件时要使用的 Pod 模板等等。
|
||||||
每个 ReplicaSet 都通过根据需要创建和 删除 Pod 以使得副本个数达到期望值,
|
每个 ReplicaSet 都通过根据需要创建和 删除 Pod 以使得副本个数达到期望值,
|
||||||
|
|
|
@ -331,7 +331,7 @@ kubectl 将 ReplicationController 缩放为 0 并等待以便在删除 Replicati
|
||||||
|
|
||||||
You can delete a ReplicationController without affecting any of its pods.
|
You can delete a ReplicationController without affecting any of its pods.
|
||||||
|
|
||||||
Using kubectl, specify the `--cascade=false` option to [`kubectl delete`](/docs/reference/generated/kubectl/kubectl-commands#delete).
|
Using kubectl, specify the `--cascade=orphan` option to [`kubectl delete`](/docs/reference/generated/kubectl/kubectl-commands#delete).
|
||||||
|
|
||||||
When using the REST API or Go client library, simply delete the ReplicationController object.
|
When using the REST API or Go client library, simply delete the ReplicationController object.
|
||||||
-->
|
-->
|
||||||
|
@ -339,7 +339,7 @@ When using the REST API or Go client library, simply delete the ReplicationContr
|
||||||
|
|
||||||
你可以删除一个 ReplicationController 而不影响它的任何 Pod。
|
你可以删除一个 ReplicationController 而不影响它的任何 Pod。
|
||||||
|
|
||||||
使用 kubectl,为 [`kubectl delete`](/docs/reference/generated/kubectl/kubectl-commands#delete) 指定 `--cascade=false` 选项。
|
使用 kubectl,为 [`kubectl delete`](/docs/reference/generated/kubectl/kubectl-commands#delete) 指定 `--cascade=orphan` 选项。
|
||||||
|
|
||||||
当使用 REST API 或 Go 客户端库时,只需删除 ReplicationController 对象。
|
当使用 REST API 或 Go 客户端库时,只需删除 ReplicationController 对象。
|
||||||
|
|
||||||
|
@ -501,12 +501,12 @@ ReplicationController 永远被限制在这个狭隘的职责范围内。
|
||||||
我们甚至计划考虑批量创建 Pod 的机制(查阅 [#170](https://issue.k8s.io/170))。
|
我们甚至计划考虑批量创建 Pod 的机制(查阅 [#170](https://issue.k8s.io/170))。
|
||||||
|
|
||||||
<!--
|
<!--
|
||||||
The ReplicationController is intended to be a composable building-block primitive. We expect higher-level APIs and/or tools to be built on top of it and other complementary primitives for user convenience in the future. The "macro" operations currently supported by kubectl (run, scale, rolling-update) are proof-of-concept examples of this. For instance, we could imagine something like [Asgard](http://techblog.netflix.com/2012/06/asgard-web-based-cloud-management-and.html) managing ReplicationControllers, auto-scalers, services, scheduling policies, canaries, etc.
|
The ReplicationController is intended to be a composable building-block primitive. We expect higher-level APIs and/or tools to be built on top of it and other complementary primitives for user convenience in the future. The "macro" operations currently supported by kubectl (run, scale, rolling-update) are proof-of-concept examples of this. For instance, we could imagine something like [Asgard](https://netflixtechblog.com/asgard-web-based-cloud-management-and-deployment-2c9fc4e4d3a1) managing ReplicationControllers, auto-scalers, services, scheduling policies, canaries, etc.
|
||||||
-->
|
-->
|
||||||
ReplicationController 旨在成为可组合的构建基元。
|
ReplicationController 旨在成为可组合的构建基元。
|
||||||
我们希望在它和其他补充原语的基础上构建更高级别的 API 或者工具,以便于将来的用户使用。
|
我们希望在它和其他补充原语的基础上构建更高级别的 API 或者工具,以便于将来的用户使用。
|
||||||
kubectl 目前支持的 "macro" 操作(运行、缩放、滚动更新)就是这方面的概念示例。
|
kubectl 目前支持的 "macro" 操作(运行、缩放、滚动更新)就是这方面的概念示例。
|
||||||
例如,我们可以想象类似于 [Asgard](https://techblog.netflix.com/2012/06/asgaard-web-based-cloud-management-and.html)
|
例如,我们可以想象类似于 [Asgard](https://netflixtechblog.com/asgard-web-based-cloud-management-and-deployment-2c9fc4e4d3a1)
|
||||||
的东西管理 ReplicationController、自动定标器、服务、调度策略、金丝雀发布等。
|
的东西管理 ReplicationController、自动定标器、服务、调度策略、金丝雀发布等。
|
||||||
|
|
||||||
<!--
|
<!--
|
||||||
|
@ -544,16 +544,14 @@ Note that we recommend using Deployments instead of directly using Replica Sets,
|
||||||
<!--
|
<!--
|
||||||
### Deployment (Recommended)
|
### Deployment (Recommended)
|
||||||
|
|
||||||
[`Deployment`](/docs/concepts/workloads/controllers/deployment/) is a higher-level API object that updates its underlying Replica Sets and their Pods
|
[`Deployment`](/docs/concepts/workloads/controllers/deployment/) is a higher-level API object that updates its underlying Replica Sets and their Pods.
|
||||||
in a similar fashion as `kubectl rolling-update`. Deployments are recommended if you want this rolling update functionality,
|
Deployments are recommended if you want the rolling update functionality,
|
||||||
because unlike `kubectl rolling-update`, they are declarative, server-side, and have additional features.
|
because they are declarative, server-side, and have additional features.
|
||||||
-->
|
-->
|
||||||
### Deployment (推荐)
|
### Deployment (推荐)
|
||||||
|
|
||||||
[`Deployment`](/zh/docs/concepts/workloads/controllers/deployment/) 是一种更高级别的 API 对象,
|
[`Deployment`](/zh/docs/concepts/workloads/controllers/deployment/) 是一种更高级别的 API 对象,用于更新其底层 ReplicaSet 及其 Pod。
|
||||||
它以类似于 `kubectl rolling-update` 的方式更新其底层 ReplicaSet 及其 Pod。
|
如果你想要这种滚动更新功能,那么推荐使用 Deployment,因为它们是声明式的、服务端的,并且具有其它特性。
|
||||||
如果你想要这种滚动更新功能,那么推荐使用 Deployment,因为与 `kubectl rolling-update` 不同,
|
|
||||||
它们是声明式的、服务端的,并且具有其它特性。
|
|
||||||
|
|
||||||
<!--
|
<!--
|
||||||
### Bare Pods
|
### Bare Pods
|
||||||
|
@ -596,11 +594,19 @@ ReplicationController。
|
||||||
并且在机器准备重新启动或者关闭时安全地终止。
|
并且在机器准备重新启动或者关闭时安全地终止。
|
||||||
|
|
||||||
<!--
|
<!--
|
||||||
## For more information
|
## {{% heading "whatsnext" %}}
|
||||||
|
|
||||||
Read [Run Stateless AP Replication Controller](/docs/tutorials/stateless-application/run-stateless-ap-replication-controller/).
|
* Learn about [Pods](/docs/concepts/workloads/pods).
|
||||||
|
* Learn about [Deployment](/docs/concepts/workloads/controllers/deployment/), the replacement
|
||||||
|
for ReplicationController.
|
||||||
|
* `ReplicationController` is part of the Kubernetes REST API.
|
||||||
|
Read the {{< api-reference page="workload-resources/replication-controller-v1" >}}
|
||||||
|
object definition to understand the API for replication controllers.
|
||||||
-->
|
-->
|
||||||
## 更多信息
|
## {{% heading "whatsnext" %}}
|
||||||
|
|
||||||
请阅读[运行无状态的 ReplicationController](/zh/docs/tasks/run-application/run-stateless-application-deployment/)。
|
- 了解 [Pods](/zh/docs/concepts/workloads/pods)。
|
||||||
|
- 了解 [Depolyment](/zh/docs/concepts/workloads/controllers/deployment/),ReplicationController 的替代品。
|
||||||
|
- `ReplicationController` 是 Kubernetes REST API 的一部分,阅读 {{< api-reference page="workload-resources/replication-controller-v1" >}}
|
||||||
|
对象定义以了解 replication controllers 的 API。
|
||||||
|
|
||||||
|
|
|
@ -1,65 +1,54 @@
|
||||||
---
|
---
|
||||||
title: 已完成资源的 TTL 控制器
|
title: 已完成 Job 的自动清理
|
||||||
content_type: concept
|
content_type: concept
|
||||||
weight: 70
|
weight: 70
|
||||||
---
|
---
|
||||||
<!--
|
<!--
|
||||||
title: TTL Controller for Finished Resources
|
title: Automatic Clean-up for Finished Jobs
|
||||||
content_type: concept
|
content_type: concept
|
||||||
weight: 70
|
weight: 70
|
||||||
-->
|
-->
|
||||||
|
|
||||||
<!-- overview -->
|
<!-- overview -->
|
||||||
|
|
||||||
{{< feature-state for_k8s_version="v1.21" state="beta" >}}
|
{{< feature-state for_k8s_version="v1.23" state="stable" >}}
|
||||||
|
|
||||||
<!--
|
<!--
|
||||||
The TTL controller provides a TTL mechanism to limit the lifetime of resource
|
TTL-after-finished {{<glossary_tooltip text="controller" term_id="controller">}} provides a
|
||||||
objects that have finished execution. TTL controller only handles
|
TTL (time to live) mechanism to limit the lifetime of resource objects that
|
||||||
{{< glossary_tooltip text="Jobs" term_id="job" >}} for now,
|
have finished execution. TTL controller only handles
|
||||||
and may be expanded to handle other resources that will finish execution,
|
{{< glossary_tooltip text="Jobs" term_id="job" >}}.
|
||||||
such as Pods and custom resources.
|
|
||||||
-->
|
-->
|
||||||
TTL 控制器提供了一种 TTL 机制来限制已完成执行的资源对象的生命周期。
|
TTL-after-finished {{<glossary_tooltip text="控制器" term_id="controller">}} 提供了一种 TTL 机制来限制已完成执行的资源对象的生命周期。
|
||||||
TTL 控制器目前只处理 {{< glossary_tooltip text="Job" term_id="job" >}},
|
TTL 控制器目前只处理 {{< glossary_tooltip text="Job" term_id="job" >}}。
|
||||||
可能以后会扩展以处理将完成执行的其他资源,例如 Pod 和自定义资源。
|
|
||||||
|
|
||||||
<!--
|
|
||||||
This feature is currently beta and enabled by default, and can be disabled via
|
|
||||||
[feature gate](/docs/reference/command-line-tools-reference/feature-gates/)
|
|
||||||
`TTLAfterFinished` in both kube-apiserver and kube-controller-manager.
|
|
||||||
-->
|
|
||||||
此功能目前是 Beta 版而自动启用,并且可以通过 `kube-apiserver` 和
|
|
||||||
`kube-controller-manager` 上的
|
|
||||||
[特性门控](/zh/docs/reference/command-line-tools-reference/feature-gates/)
|
|
||||||
`TTLAfterFinished` 禁用。
|
|
||||||
|
|
||||||
<!-- body -->
|
<!-- body -->
|
||||||
|
|
||||||
<!--
|
<!--
|
||||||
## TTL Controller
|
## TTL-after-finished Controller
|
||||||
|
|
||||||
The TTL controller only supports Jobs for now. A cluster operator can use this feature to clean
|
The TTL-after-finished controller is only supported for Jobs. A cluster operator can use this feature to clean
|
||||||
up finished Jobs (either `Complete` or `Failed`) automatically by specifying the
|
up finished Jobs (either `Complete` or `Failed`) automatically by specifying the
|
||||||
`.spec.ttlSecondsAfterFinished` field of a Job, as in this
|
`.spec.ttlSecondsAfterFinished` field of a Job, as in this
|
||||||
[example](/docs/concepts/workloads/controllers/job/#clean-up-finished-jobs-automatically).
|
[example](/docs/concepts/workloads/controllers/job/#clean-up-finished-jobs-automatically).
|
||||||
-->
|
-->
|
||||||
## TTL 控制器
|
## TTL-after-finished 控制器
|
||||||
|
|
||||||
TTL 控制器现在只支持 Job。集群操作员可以通过指定 Job 的 `.spec.ttlSecondsAfterFinished`
|
TTL-after-finished 控制器只支持 Job。集群操作员可以通过指定 Job 的 `.spec.ttlSecondsAfterFinished`
|
||||||
字段来自动清理已结束的作业(`Complete` 或 `Failed`),如
|
字段来自动清理已结束的作业(`Complete` 或 `Failed`),如
|
||||||
[示例](/zh/docs/concepts/workloads/controllers/job/#clean-up-finished-jobs-automatically)
|
[示例](/zh/docs/concepts/workloads/controllers/job/#clean-up-finished-jobs-automatically)
|
||||||
所示。
|
所示。
|
||||||
|
|
||||||
<!--
|
<!--
|
||||||
The TTL controller will assume that a resource is eligible to be cleaned up
|
The TTL-after-finished controller will assume that a job is eligible to be cleaned up
|
||||||
TTL seconds after the resource has finished, in other words, when the TTL has expired. When the
|
TTL seconds after the job has finished, in other words, when the TTL has expired. When the
|
||||||
TTL controller cleans up a resource, it will delete it cascadingly, i.e. delete
|
TTL-after-finished controller cleans up a job, it will delete it cascadingly, that is to say it will delete
|
||||||
its dependent objects together with it. Note that when the resource is deleted,
|
its dependent objects together with it. Note that when the job is deleted,
|
||||||
its lifecycle guarantees, such as finalizers, will be honored.
|
its lifecycle guarantees, such as finalizers, will be honored.
|
||||||
-->
|
-->
|
||||||
TTL 控制器假设资源能在执行完成后的 TTL 秒内被清理,也就是当 TTL 过期后。
|
TTL-after-finished 控制器假设作业能在执行完成后的 TTL 秒内被清理,也就是当 TTL 过期后。
|
||||||
当 TTL 控制器清理资源时,它将做级联删除操作,即删除资源对象的同时也删除其依赖对象。
|
当 TTL 控制器清理作业时,它将做级联删除操作,即删除资源对象的同时也删除其依赖对象。
|
||||||
注意,当资源被删除时,由该资源的生命周期保证其终结器(Finalizers)等被执行。
|
注意,当资源被删除时,由该资源的生命周期保证其终结器(Finalizers)等被执行。
|
||||||
|
|
||||||
<!--
|
<!--
|
||||||
|
@ -69,24 +58,24 @@ The TTL seconds can be set at any time. Here are some examples for setting the
|
||||||
可以随时设置 TTL 秒。以下是设置 Job 的 `.spec.ttlSecondsAfterFinished` 字段的一些示例:
|
可以随时设置 TTL 秒。以下是设置 Job 的 `.spec.ttlSecondsAfterFinished` 字段的一些示例:
|
||||||
|
|
||||||
<!--
|
<!--
|
||||||
* Specify this field in the resource manifest, so that a Job can be cleaned up
|
* Specify this field in the job manifest, so that a Job can be cleaned up
|
||||||
automatically some time after it finishes.
|
automatically some time after it finishes.
|
||||||
* Set this field of existing, already finished resources, to adopt this new feature.
|
* Set this field of existing, already finished jobs, to adopt this new feature.
|
||||||
* Use a
|
* Use a
|
||||||
[mutating admission webhook](/docs/reference/access-authn-authz/extensible-admission-controllers/#admission-webhooks)
|
[mutating admission webhook](/docs/reference/access-authn-authz/extensible-admission-controllers/#admission-webhooks)
|
||||||
to set this field dynamically at resource creation time. Cluster administrators can
|
to set this field dynamically at job creation time. Cluster administrators can
|
||||||
use this to enforce a TTL policy for finished resources.
|
use this to enforce a TTL policy for finished jobs.
|
||||||
* Use a
|
* Use a
|
||||||
[mutating admission webhook](/docs/reference/access-authn-authz/extensible-admission-controllers/#admission-webhooks)
|
[mutating admission webhook](/docs/reference/access-authn-authz/extensible-admission-controllers/#admission-webhooks)
|
||||||
to set this field dynamically after the resource has finished, and choose
|
to set this field dynamically after the job has finished, and choose
|
||||||
different TTL values based on resource status, labels, etc.
|
different TTL values based on job status, labels, etc.
|
||||||
-->
|
-->
|
||||||
* 在资源清单(manifest)中指定此字段,以便 Job 在完成后的某个时间被自动清除。
|
* 在作业清单(manifest)中指定此字段,以便 Job 在完成后的某个时间被自动清除。
|
||||||
* 将此字段设置为现有的、已完成的资源,以采用此新功能。
|
* 将此字段设置为现有的、已完成的作业,以采用此新功能。
|
||||||
* 在创建资源时使用 [mutating admission webhook](/zh/docs/reference/access-authn-authz/extensible-admission-controllers/#admission-webhooks)
|
* 在创建作业时使用 [mutating admission webhook](/zh/docs/reference/access-authn-authz/extensible-admission-controllers/#admission-webhooks)
|
||||||
动态设置该字段。集群管理员可以使用它对完成的资源强制执行 TTL 策略。
|
动态设置该字段。集群管理员可以使用它对完成的作业强制执行 TTL 策略。
|
||||||
* 使用 [mutating admission webhook](/zh/docs/reference/access-authn-authz/extensible-admission-controllers/#admission-webhooks)
|
* 使用 [mutating admission webhook](/zh/docs/reference/access-authn-authz/extensible-admission-controllers/#admission-webhooks)
|
||||||
在资源完成后动态设置该字段,并根据资源状态、标签等选择不同的 TTL 值。
|
在作业完成后动态设置该字段,并根据作业状态、标签等选择不同的 TTL 值。
|
||||||
|
|
||||||
<!--
|
<!--
|
||||||
## Caveat
|
## Caveat
|
||||||
|
@ -94,16 +83,16 @@ The TTL seconds can be set at any time. Here are some examples for setting the
|
||||||
### Updating TTL Seconds
|
### Updating TTL Seconds
|
||||||
|
|
||||||
Note that the TTL period, e.g. `.spec.ttlSecondsAfterFinished` field of Jobs,
|
Note that the TTL period, e.g. `.spec.ttlSecondsAfterFinished` field of Jobs,
|
||||||
can be modified after the resource is created or has finished. However, once the
|
can be modified after the job is created or has finished. However, once the
|
||||||
Job becomes eligible to be deleted (when the TTL has expired), the system won't
|
Job becomes eligible to be deleted (when the TTL has expired), the system won't
|
||||||
guarantee that the Jobs will be kept, even if an update to extend the TTL
|
guarantee that the Jobs will be kept, even if an update to extend the TTL
|
||||||
returns a successful API response.
|
returns a successful API response.
|
||||||
-->
|
-->
|
||||||
## 警告
|
## 警告
|
||||||
|
|
||||||
### 更新 TTL 秒
|
### 更新 TTL 秒数
|
||||||
|
|
||||||
请注意,在创建资源或已经执行结束后,仍可以修改其 TTL 周期,例如 Job 的
|
请注意,在创建 Job 或已经执行结束后,仍可以修改其 TTL 周期,例如 Job 的
|
||||||
`.spec.ttlSecondsAfterFinished` 字段。
|
`.spec.ttlSecondsAfterFinished` 字段。
|
||||||
但是一旦 Job 变为可被删除状态(当其 TTL 已过期时),即使您通过 API 增加其 TTL
|
但是一旦 Job 变为可被删除状态(当其 TTL 已过期时),即使您通过 API 增加其 TTL
|
||||||
时长得到了成功的响应,系统也不保证 Job 将被保留。
|
时长得到了成功的响应,系统也不保证 Job 将被保留。
|
||||||
|
@ -111,25 +100,21 @@ returns a successful API response.
|
||||||
<!--
|
<!--
|
||||||
### Time Skew
|
### Time Skew
|
||||||
|
|
||||||
Because TTL controller uses timestamps stored in the Kubernetes resources to
|
Because TTL-after-finished controller uses timestamps stored in the Kubernetes resources to
|
||||||
determine whether the TTL has expired or not, this feature is sensitive to time
|
determine whether the TTL has expired or not, this feature is sensitive to time
|
||||||
skew in the cluster, which may cause TTL controller to clean up resource objects
|
skew in the cluster, which may cause TTL-after-finished controller to clean up resource objects
|
||||||
at the wrong time.
|
at the wrong time.
|
||||||
-->
|
-->
|
||||||
### 时间偏差 {#time-skew}
|
### 时间偏差 {#time-skew}
|
||||||
|
|
||||||
由于 TTL 控制器使用存储在 Kubernetes 资源中的时间戳来确定 TTL 是否已过期,
|
由于 TTL-after-finished 控制器使用存储在 Kubernetes 资源中的时间戳来确定 TTL 是否已过期,
|
||||||
因此该功能对集群中的时间偏差很敏感,这可能导致 TTL 控制器在错误的时间清理资源对象。
|
因此该功能对集群中的时间偏差很敏感,这可能导致 TTL-after-finished 控制器在错误的时间清理资源对象。
|
||||||
|
|
||||||
<!--
|
<!--
|
||||||
In Kubernetes, it's required to run NTP on all nodes
|
Clocks aren't always correct, but the difference should be
|
||||||
(see [#6159](https://github.com/kubernetes/kubernetes/issues/6159#issuecomment-93844058))
|
|
||||||
to avoid time skew. Clocks aren't always correct, but the difference should be
|
|
||||||
very small. Please be aware of this risk when setting a non-zero TTL.
|
very small. Please be aware of this risk when setting a non-zero TTL.
|
||||||
-->
|
-->
|
||||||
在 Kubernetes 中,需要在所有节点上运行 NTP(参见
|
时钟并不总是如此正确,但差异应该很小。
|
||||||
[#6159](https://github.com/kubernetes/kubernetes/issues/6159#issuecomment-93844058))
|
|
||||||
以避免时间偏差。时钟并不总是如此正确,但差异应该很小。
|
|
||||||
设置非零 TTL 时请注意避免这种风险。
|
设置非零 TTL 时请注意避免这种风险。
|
||||||
|
|
||||||
## {{% heading "whatsnext" %}}
|
## {{% heading "whatsnext" %}}
|
||||||
|
|
|
@ -44,7 +44,7 @@ the documentation, the website style, the processes for reviewing and merging
|
||||||
pull requests, or other aspects of the documentation. For maximum transparency,
|
pull requests, or other aspects of the documentation. For maximum transparency,
|
||||||
these types of proposals need to be discussed in a SIG Docs meeting or on the
|
these types of proposals need to be discussed in a SIG Docs meeting or on the
|
||||||
[kubernetes-sig-docs mailing list](https://groups.google.com/forum/#!forum/kubernetes-sig-docs).
|
[kubernetes-sig-docs mailing list](https://groups.google.com/forum/#!forum/kubernetes-sig-docs).
|
||||||
In addition, it can really help to have some context about the way things
|
In addition, it can help to have some context about the way things
|
||||||
currently work and why past decisions have been made before proposing sweeping
|
currently work and why past decisions have been made before proposing sweeping
|
||||||
changes. The quickest way to get answers to questions about how the documentation
|
changes. The quickest way to get answers to questions about how the documentation
|
||||||
currently works is to ask in the `#sig-docs` Slack channel on
|
currently works is to ask in the `#sig-docs` Slack channel on
|
||||||
|
@ -55,7 +55,7 @@ currently works is to ask in the `#sig-docs` Slack channel on
|
||||||
评审和合并 PR 的流程或者文档的其他方面产生改进的想法。
|
评审和合并 PR 的流程或者文档的其他方面产生改进的想法。
|
||||||
为了尽可能透明化,这些提议都需要在 SIG Docs 会议或
|
为了尽可能透明化,这些提议都需要在 SIG Docs 会议或
|
||||||
[kubernetes-sig-docs 邮件列表](https://groups.google.com/forum/#!forum/kubernetes-sig-docs)上讨论。
|
[kubernetes-sig-docs 邮件列表](https://groups.google.com/forum/#!forum/kubernetes-sig-docs)上讨论。
|
||||||
此外,在提出全面的改进之前,这些讨论能真正帮助我们了解有关“当前工作如何运作”和“以往的决定是为何做出”的背景。
|
此外,在提出全面的改进之前,这些讨论能帮助我们了解有关“当前工作如何运作”和“以往的决定是为何做出”的背景。
|
||||||
想了解文档的当前运作方式,最快的途径是咨询 [kubernetes.slack.com](https://kubernetes.slack.com)
|
想了解文档的当前运作方式,最快的途径是咨询 [kubernetes.slack.com](https://kubernetes.slack.com)
|
||||||
中的 `#sig-docs` 聊天群组。
|
中的 `#sig-docs` 聊天群组。
|
||||||
|
|
||||||
|
@ -96,7 +96,7 @@ refer to
|
||||||
The SIG Docs representative for a given release coordinates the following tasks:
|
The SIG Docs representative for a given release coordinates the following tasks:
|
||||||
|
|
||||||
- Monitor the feature-tracking spreadsheet for new or changed features with an
|
- Monitor the feature-tracking spreadsheet for new or changed features with an
|
||||||
impact on documentation. If documentation for a given feature won't be ready
|
impact on documentation. If the documentation for a given feature won't be ready
|
||||||
for the release, the feature may not be allowed to go into the release.
|
for the release, the feature may not be allowed to go into the release.
|
||||||
- Attend sig-release meetings regularly and give updates on the status of the
|
- Attend sig-release meetings regularly and give updates on the status of the
|
||||||
docs for the release.
|
docs for the release.
|
||||||
|
@ -151,19 +151,21 @@ SIG Docs [批准人(Approvers)](/zh/docs/contribute/participating/#approvers
|
||||||
|
|
||||||
<!--
|
<!--
|
||||||
- Being available on the [Kubernetes #sig-docs channel](https://kubernetes.slack.com) to answer questions from new contributors.
|
- Being available on the [Kubernetes #sig-docs channel](https://kubernetes.slack.com) to answer questions from new contributors.
|
||||||
- Working with PR wranglers to identify good first issues for new contributors.
|
- Working with PR wranglers to identify [good first issues](https://kubernetes.dev/docs/guide/help-wanted/#good-first-issue) for new contributors.
|
||||||
- Mentoring new contributors through their first few PRs to the docs repo.
|
- Mentoring new contributors through their first few PRs to the docs repo.
|
||||||
- Helping new contributors create the more complex PRs they need to become Kubernetes members.
|
- Helping new contributors create the more complex PRs they need to become Kubernetes members.
|
||||||
- [Sponsoring contributors](/docs/contribute/advanced/#sponsor-a-new-contributor) on their path to becoming Kubernetes members.
|
-[Sponsoring contributors](/docs/contribute/advanced/#sponsor-a-new-contributor) on their path to becoming Kubernetes members.
|
||||||
|
- Hosting a monthly meeting to help and mentor new contributors.
|
||||||
-->
|
-->
|
||||||
- 监听 [Kubernetes #sig-docs 频道](https://kubernetes.slack.com) 上新贡献者的 Issue。
|
- 监听 [Kubernetes #sig-docs 频道](https://kubernetes.slack.com) 上新贡献者的 Issue。
|
||||||
- 与 PR 管理者合作为新参与者寻找合适的第一个 issues。
|
- 与 PR 管理者合作为新参与者寻找[合适的第一个 issues](https://kubernetes.dev/docs/guide/help-wanted/#good-first-issue) 。
|
||||||
- 通过前几个 PR 指导新贡献者为文档存储库作贡献。
|
- 通过前几个 PR 指导新贡献者为文档存储库作贡献。
|
||||||
- 帮助新的贡献者创建成为 Kubernetes 成员所需的更复杂的 PR。
|
- 帮助新的贡献者创建成为 Kubernetes 成员所需的更复杂的 PR。
|
||||||
- [为贡献者提供保荐](#sponsor-a-new-contributor),使其成为 Kubernetes 成员。
|
- [为贡献者提供保荐](#sponsor-a-new-contributor),使其成为 Kubernetes 成员。
|
||||||
|
- 每月召开一次会议,帮助和指导新的贡献者。
|
||||||
|
|
||||||
<!--
|
<!--
|
||||||
Current New Contributor Ambassadors are announced at each SIG-Docs meeting, and in the [Kubernetes #sig-docs channel](https://kubernetes.slack.com).
|
Current New Contributor Ambassadors are announced at each SIG-Docs meeting and in the [Kubernetes #sig-docs channel](https://kubernetes.slack.com).
|
||||||
-->
|
-->
|
||||||
当前新贡献者大使将在每次 SIG 文档会议上以及 [Kubernetes #sig-docs 频道](https://kubernetes.slack.com)中宣布。
|
当前新贡献者大使将在每次 SIG 文档会议上以及 [Kubernetes #sig-docs 频道](https://kubernetes.slack.com)中宣布。
|
||||||
|
|
||||||
|
@ -205,37 +207,37 @@ membership in the Kubernetes organization.
|
||||||
<!--
|
<!--
|
||||||
## Serve as a SIG Co-chair
|
## Serve as a SIG Co-chair
|
||||||
|
|
||||||
SIG Docs [approvers](/docs/contribute/participating/#approvers) can serve a term as a co-chair of SIG Docs.
|
SIG Docs [members](/docs/contribute/participate/roles-and-responsibilities/#members)
|
||||||
|
can serve a term as a co-chair of SIG Docs.
|
||||||
|
|
||||||
### Prerequisites
|
### Prerequisites
|
||||||
-->
|
-->
|
||||||
## 担任 SIG 联合主席
|
## 担任 SIG 联合主席
|
||||||
|
|
||||||
SIG Docs [批准人(Approvers)](/zh/docs/contribute/participate/roles-and-responsibilities/#approvers)
|
SIG Docs [成员(Members)](/zh/docs/contribute/participate/roles-and-responsibilities/#members)
|
||||||
可以担任 SIG Docs 的联合主席。
|
可以担任 SIG Docs 的联合主席。
|
||||||
|
|
||||||
### 前提条件
|
### 前提条件
|
||||||
|
|
||||||
<!--
|
<!--
|
||||||
Approvers must meet the following requirements to be a co-chair:
|
A Kubernetes member must meet the following requirements to be a co-chair:
|
||||||
|
|
||||||
- Have been a SIG Docs approver for at least 6 months
|
|
||||||
- Have [led a Kubernetes docs release](/docs/contribute/advanced/#coordinate-docs-for-a-kubernetes-release) or shadowed two releases
|
|
||||||
- Understand SIG Docs workflows and tooling: git, Hugo, localization, blog subproject
|
- Understand SIG Docs workflows and tooling: git, Hugo, localization, blog subproject
|
||||||
- Understand how other Kubernetes SIGs and repositories affect the SIG Docs workflow, including: [teams in k/org](https://github.com/kubernetes/org/blob/master/config/kubernetes/sig-docs/teams.yaml), [process in k/community](https://github.com/kubernetes/community/tree/master/sig-docs), plugins in [k/test-infra](https://github.com/kubernetes/test-infra/), and the role of [SIG Architecture](https://github.com/kubernetes/community/tree/master/sig-architecture).
|
- Understand how other Kubernetes SIGs and repositories affect the SIG Docs workflow, including: [teams in k/org](https://github.com/kubernetes/org/blob/master/config/kubernetes/sig-docs/teams.yaml), the [process in k/community](https://github.com/kubernetes/community/tree/master/sig-docs), plugins in [k/test-infra](https://github.com/kubernetes/test-infra/), and the role of [SIG Architecture](https://github.com/kubernetes/community/tree/master/sig-architecture).
|
||||||
|
In addition, understand how the [Kubernetes docs release process](/docs/contribute/advanced/#coordinate-docs-for-a-kubernetes-release) works.
|
||||||
|
- Approved by the SIG Docs community either directly or via lazy consensus.
|
||||||
- Commit at least 5 hours per week (and often more) to the role for a minimum of 6 months
|
- Commit at least 5 hours per week (and often more) to the role for a minimum of 6 months
|
||||||
-->
|
-->
|
||||||
Approvers 必须满足以下要求才能成为联合主席:
|
Kubernetes 成员必须满足以下要求才能成为联合主席:
|
||||||
|
|
||||||
- 已维持 SIG Docs approver 身份至少 6 个月
|
|
||||||
- [曾领导 Kubernetes 文档发布](/zh/docs/contribute/advanced/#coordinate-docs-for-a-kubernetes-release)
|
|
||||||
或者在两个版本发布中有实习经历
|
|
||||||
- 理解 SIG Docs 工作流程和工具:git、Hugo、本地化、博客子项目
|
- 理解 SIG Docs 工作流程和工具:git、Hugo、本地化、博客子项目
|
||||||
- 理解其他 Kubernetes SIG 和仓库会如何影响 SIG Docs 工作流程,包括:
|
- 理解其他 Kubernetes SIG 和仓库会如何影响 SIG Docs 工作流程,包括:
|
||||||
[k/org 中的团队](https://github.com/kubernetes/org/blob/master/config/kubernetes/sig-docs/teams.yaml)、
|
[k/org 中的团队](https://github.com/kubernetes/org/blob/master/config/kubernetes/sig-docs/teams.yaml)、
|
||||||
[k/community 中的流程](https://github.com/kubernetes/community/tree/master/sig-docs)、
|
[k/community 中的流程](https://github.com/kubernetes/community/tree/master/sig-docs)、
|
||||||
[k/test-infra](https://github.com/kubernetes/test-infra/) 中的插件、
|
[k/test-infra](https://github.com/kubernetes/test-infra/) 中的插件、
|
||||||
[SIG Architecture](https://github.com/kubernetes/community/tree/master/sig-architecture) 中的角色。
|
[SIG Architecture](https://github.com/kubernetes/community/tree/master/sig-architecture) 中的角色。
|
||||||
|
此外,了解 [Kubernetes 文档发布流程](/docs/contribute/advanced/#coordinate-docs-for-a-kubernetes-release) 的工作原理。
|
||||||
|
- 由 SIG Docs 社区直接或通过惰性共识批准。
|
||||||
- 在至少 6 个月的时段内,确保每周至少投入 5 个小时(通常更多)
|
- 在至少 6 个月的时段内,确保每周至少投入 5 个小时(通常更多)
|
||||||
|
|
||||||
<!--
|
<!--
|
||||||
|
|
|
@ -493,6 +493,7 @@ Home | [All heading and subheading URLs](/docs/home/)
|
||||||
Setup | [All heading and subheading URLs](/docs/setup/)
|
Setup | [All heading and subheading URLs](/docs/setup/)
|
||||||
Tutorials | [Kubernetes Basics](/docs/tutorials/kubernetes-basics/), [Hello Minikube](/docs/tutorials/hello-minikube/)
|
Tutorials | [Kubernetes Basics](/docs/tutorials/kubernetes-basics/), [Hello Minikube](/docs/tutorials/hello-minikube/)
|
||||||
Site strings | [All site strings](#Site-strings-in-i18n) in a new localized TOML file
|
Site strings | [All site strings](#Site-strings-in-i18n) in a new localized TOML file
|
||||||
|
Releases | [All heading and subheading URLs](/releases)
|
||||||
-->
|
-->
|
||||||
描述 | 网址
|
描述 | 网址
|
||||||
-----|-----
|
-----|-----
|
||||||
|
@ -500,7 +501,7 @@ Site strings | [All site strings](#Site-strings-in-i18n) in a new localized TOML
|
||||||
安装 | [所有标题和副标题网址](/zh/docs/setup/)
|
安装 | [所有标题和副标题网址](/zh/docs/setup/)
|
||||||
教程 | [Kubernetes 基础](/zh/docs/tutorials/kubernetes-basics/), [Hello Minikube](/zh/docs/tutorials/hello-minikube/)
|
教程 | [Kubernetes 基础](/zh/docs/tutorials/kubernetes-basics/), [Hello Minikube](/zh/docs/tutorials/hello-minikube/)
|
||||||
网站字符串 | [所有网站字符串](#Site-strings-in-i18n)
|
网站字符串 | [所有网站字符串](#Site-strings-in-i18n)
|
||||||
|
发行版本 | [所有标题和副标题 URL](/releases)
|
||||||
<!--
|
<!--
|
||||||
Translated documents must reside in their own `content/**/` subdirectory, but otherwise follow the same URL path as the English source. For example, to prepare the [Kubernetes Basics](/docs/tutorials/kubernetes-basics/) tutorial for translation into German, create a subfolder under the `content/de/` folder and copy the English source:
|
Translated documents must reside in their own `content/**/` subdirectory, but otherwise follow the same URL path as the English source. For example, to prepare the [Kubernetes Basics](/docs/tutorials/kubernetes-basics/) tutorial for translation into German, create a subfolder under the `content/de/` folder and copy the English source:
|
||||||
-->
|
-->
|
||||||
|
@ -616,6 +617,24 @@ Some language teams have their own language-specific style guide and glossary. F
|
||||||
一些语言团队有自己的特定语言样式指南和词汇表。
|
一些语言团队有自己的特定语言样式指南和词汇表。
|
||||||
例如,请参见[中文本地化指南](/zh/docs/contribute/localization_zh/)。
|
例如,请参见[中文本地化指南](/zh/docs/contribute/localization_zh/)。
|
||||||
|
|
||||||
|
<!--
|
||||||
|
### Language specific Zoom meetings
|
||||||
|
|
||||||
|
If the localization project needs a separate meeting time, contact a SIG Docs Co-Chair or Tech Lead to create a new reoccurring Zoom meeting and calendar invite. This is only needed when the the team is large enough to sustain and require a separate meeting.
|
||||||
|
|
||||||
|
Per CNCF policy, the localization teams must upload their meetings to the SIG Docs YouTube playlist. A SIG Docs Co-Chair or Tech Lead can help with the process until SIG Docs automates it.
|
||||||
|
|
||||||
|
-->
|
||||||
|
|
||||||
|
### 特定语言的 Zoom 会议
|
||||||
|
|
||||||
|
如果本地化项目需要单独的会议时间,
|
||||||
|
请联系 SIG Docs 联合主席或技术主管以创建新的重复 Zoom 会议和日历邀请。
|
||||||
|
仅当团队维持在足够大的规模并需要单独的会议时才需要这样做。
|
||||||
|
|
||||||
|
根据 CNCF 政策,本地化团队必须将他们的会议上传到 SIG Docs YouTube 播放列表。
|
||||||
|
SIG Docs 联合主席或技术主管可以帮助完成该过程,直到 SIG Docs 实现自动化。
|
||||||
|
|
||||||
<!--
|
<!--
|
||||||
## Branching strategy
|
## Branching strategy
|
||||||
|
|
||||||
|
@ -685,7 +704,7 @@ Teams must merge localized content into the same branch from which the content w
|
||||||
For example:
|
For example:
|
||||||
|
|
||||||
- a localization branch sourced from `main` must be merged into `main`.
|
- a localization branch sourced from `main` must be merged into `main`.
|
||||||
- a localization branch sourced from `release-{{ skew "prevMinorVersion" }}` must be merged into `release-{{ skew "prevMinorVersion" }}`.
|
- a localization branch sourced from `release-{{% skew "prevMinorVersion" %}}` must be merged into `release-{{% skew "prevMinorVersion" %}}`.
|
||||||
|
|
||||||
{{< note >}}
|
{{< note >}}
|
||||||
If your localization branch was created from `main` branch but it is not merged into `main` before new release branch `{{< release-branch >}}` created, merge it into both `main` and new release branch `{{< release-branch >}}`. To merge your localization branch into new release branch `{{< release-branch >}}`, you need to switch upstream branch of your localization branch to `{{< release-branch >}}`.
|
If your localization branch was created from `main` branch but it is not merged into `main` before new release branch `{{< release-branch >}}` created, merge it into both `main` and new release branch `{{< release-branch >}}`. To merge your localization branch into new release branch `{{< release-branch >}}`, you need to switch upstream branch of your localization branch to `{{< release-branch >}}`.
|
||||||
|
|
|
@ -352,7 +352,7 @@ Website 的仓库中 `scripts/linkchecker.py` 是一个工具,可用来检查
|
||||||
- attach,挂接
|
- attach,挂接
|
||||||
- autoscale,自动扩缩容
|
- autoscale,自动扩缩容
|
||||||
- bearer token,持有者令牌
|
- bearer token,持有者令牌
|
||||||
- capabilities权能字
|
- capabilities
|
||||||
* 当泛指某主体执行某操作的能力时,可直译为“能力”
|
* 当泛指某主体执行某操作的能力时,可直译为“能力”
|
||||||
* 当特指 Linux 操作系统上的[权限控制](http://man7.org/linux/man-pages/man7/capabilities.7.html)机制时,译为“权能字”
|
* 当特指 Linux 操作系统上的[权限控制](http://man7.org/linux/man-pages/man7/capabilities.7.html)机制时,译为“权能字”
|
||||||
- certificate authority,证书机构
|
- certificate authority,证书机构
|
||||||
|
|
|
@ -153,7 +153,7 @@ Prow 命令。
|
||||||
{{< table caption="Prow commands for reviewing" >}}
|
{{< table caption="Prow commands for reviewing" >}}
|
||||||
Prow Command | Role Restrictions | Description
|
Prow Command | Role Restrictions | Description
|
||||||
:------------|:------------------|:-----------
|
:------------|:------------------|:-----------
|
||||||
`/lgtm` | Anyone, but triggers automation if a Reviewer or Approver uses it | Signals that you've finished reviewing a PR and are satisfied with the changes.
|
`/lgtm` | Organization members | Signals that you've finished reviewing a PR and are satisfied with the changes.
|
||||||
`/approve` | Approvers | Approves a PR for merging.
|
`/approve` | Approvers | Approves a PR for merging.
|
||||||
`/assign` | Reviewers or Approvers | Assigns a person to review or approve a PR
|
`/assign` | Reviewers or Approvers | Assigns a person to review or approve a PR
|
||||||
`/close` | Reviewers or Approvers | Closes an issue or PR.
|
`/close` | Reviewers or Approvers | Closes an issue or PR.
|
||||||
|
@ -167,7 +167,7 @@ of commands you can use in a PR.
|
||||||
{{< table caption="评阅用 Prow 命令" >}}
|
{{< table caption="评阅用 Prow 命令" >}}
|
||||||
Prow 命令 | 角色限制 | 描述
|
Prow 命令 | 角色限制 | 描述
|
||||||
:------------|:------------------|:-----------
|
:------------|:------------------|:-----------
|
||||||
`/lgtm` | 任何人均可使用,但只有评阅人和批准人使用此命令的时候才会触发自动化操作 | 用来表明你已经完成 PR 的评阅并对其所作变更表示满意
|
`/lgtm` | 组织成员 | 用来表明你已经完成 PR 的评阅并对其所作变更表示满意
|
||||||
`/approve` | 批准人 | 批准某 PR 可以合并
|
`/approve` | 批准人 | 批准某 PR 可以合并
|
||||||
`/assign` |评阅人或批准人 | 指派某人来评阅或批准某 PR
|
`/assign` |评阅人或批准人 | 指派某人来评阅或批准某 PR
|
||||||
`/close` | 评阅人或批准人 | 关闭 Issue 或 PR
|
`/close` | 评阅人或批准人 | 关闭 Issue 或 PR
|
||||||
|
@ -233,7 +233,7 @@ finds issues that might need triage.
|
||||||
`priority/important-longterm` | Do this within 6 months.
|
`priority/important-longterm` | Do this within 6 months.
|
||||||
`priority/backlog` | Deferrable indefinitely. Do when resources are available.
|
`priority/backlog` | Deferrable indefinitely. Do when resources are available.
|
||||||
`priority/awaiting-more-evidence` | Placeholder for a potentially good issue so it doesn't get lost.
|
`priority/awaiting-more-evidence` | Placeholder for a potentially good issue so it doesn't get lost.
|
||||||
`help` or `good first issue` | Suitable for someone with very little Kubernetes or SIG Docs experience. See [Help Wanted and Good First Issue Labels](https://github.com/kubernetes/community/blob/master/contributors/guide/help-wanted.md) for more information.
|
`help` or `good first issue` | Suitable for someone with very little Kubernetes or SIG Docs experience. See [Help Wanted and Good First Issue Labels](https://kubernetes.dev/docs/guide/help-wanted/) for more information.
|
||||||
|
|
||||||
At your discretion, take ownership of an issue and submit a PR for it
|
At your discretion, take ownership of an issue and submit a PR for it
|
||||||
(especially if it's quick or relates to work you're already doing).
|
(especially if it's quick or relates to work you're already doing).
|
||||||
|
@ -252,7 +252,7 @@ the [kubernetes-sig-docs mailing list](https://groups.google.com/forum/#!forum/k
|
||||||
`priority/important-longterm` | 应在 6 个月内处理
|
`priority/important-longterm` | 应在 6 个月内处理
|
||||||
`priority/backlog` | 可无限期地推迟,可在人手充足时处理
|
`priority/backlog` | 可无限期地推迟,可在人手充足时处理
|
||||||
`priority/awaiting-more-evidence` | 占位符,标示 Issue 可能是一个不错的 Issue,避免该 Issue 被忽略或遗忘
|
`priority/awaiting-more-evidence` | 占位符,标示 Issue 可能是一个不错的 Issue,避免该 Issue 被忽略或遗忘
|
||||||
`help` or `good first issue` | 适合对 Kubernetes 或 SIG Docs 经验较少的贡献者来处理。更多信息可参考[需要帮助和入门候选 Issue 标签](https://github.com/kubernetes/community/blob/master/contributors/guide/help-wanted.md)。
|
`help` or `good first issue` | 适合对 Kubernetes 或 SIG Docs 经验较少的贡献者来处理。更多信息可参考[需要帮助和入门候选 Issue 标签](https://kubernetes.dev/docs/guide/help-wanted/)。
|
||||||
{{< /table >}}
|
{{< /table >}}
|
||||||
|
|
||||||
基于你自己的判断,你可以选择某 Issue 来处理,为之发起 PR
|
基于你自己的判断,你可以选择某 Issue 来处理,为之发起 PR
|
||||||
|
|
|
@ -24,9 +24,9 @@ overview:
|
||||||
Kubernetes 是一个开源的容器编排引擎,用来对容器化应用进行自动化部署、 扩缩和管理。该项目托管在 <a href="https://www.cncf.io/about">CNCF</a>。
|
Kubernetes 是一个开源的容器编排引擎,用来对容器化应用进行自动化部署、 扩缩和管理。该项目托管在 <a href="https://www.cncf.io/about">CNCF</a>。
|
||||||
# cards:
|
# cards:
|
||||||
# - name: concepts
|
# - name: concepts
|
||||||
# title: "Understand the basics"
|
# title: "Understand Kubernetes"
|
||||||
# description: "Learn about Kubernetes and its fundamental concepts."
|
# description: "Learn about Kubernetes and its fundamental concepts."
|
||||||
# button: "Learn Concepts"
|
# button: "View Concepts"
|
||||||
# button_path: "/docs/concepts"
|
# button_path: "/docs/concepts"
|
||||||
# - name: tutorials
|
# - name: tutorials
|
||||||
# title: "Try Kubernetes"
|
# title: "Try Kubernetes"
|
||||||
|
@ -68,9 +68,9 @@ overview:
|
||||||
# description: This website contains documentation for the current and previous 4 versions of Kubernetes.
|
# description: This website contains documentation for the current and previous 4 versions of Kubernetes.
|
||||||
cards:
|
cards:
|
||||||
- name: concepts
|
- name: concepts
|
||||||
title: "了解基本知识"
|
title: "了解 Kubernetes"
|
||||||
description: "了解 Kubernetes 和其基础概念。"
|
description: "了解 Kubernetes 和其基础概念。"
|
||||||
button: "了解概念"
|
button: "查看概念"
|
||||||
button_path: "/zh/docs/concepts"
|
button_path: "/zh/docs/concepts"
|
||||||
- name: tutorials
|
- name: tutorials
|
||||||
title: "尝试 Kubernetes"
|
title: "尝试 Kubernetes"
|
||||||
|
@ -106,7 +106,7 @@ cards:
|
||||||
title: K8s 发布说明
|
title: K8s 发布说明
|
||||||
description: 如果你正在安装或升级 Kubernetes,最好参考最新的发布说明。
|
description: 如果你正在安装或升级 Kubernetes,最好参考最新的发布说明。
|
||||||
button: "下载 Kubernetes"
|
button: "下载 Kubernetes"
|
||||||
button_path: "/zh/docs/setup/release/notes"
|
button_path: "/releases/download"
|
||||||
- name: about
|
- name: about
|
||||||
title: 关于文档
|
title: 关于文档
|
||||||
description: 本网站包含了当前及前 4 个版本的 Kubernetes 文档。
|
description: 本网站包含了当前及前 4 个版本的 Kubernetes 文档。
|
||||||
|
|
|
@ -59,8 +59,8 @@ client libraries:
|
||||||
- [Kubernetes Python client library](https://github.com/kubernetes-client/python)
|
- [Kubernetes Python client library](https://github.com/kubernetes-client/python)
|
||||||
- [Kubernetes Java client library](https://github.com/kubernetes-client/java)
|
- [Kubernetes Java client library](https://github.com/kubernetes-client/java)
|
||||||
- [Kubernetes JavaScript client library](https://github.com/kubernetes-client/javascript)
|
- [Kubernetes JavaScript client library](https://github.com/kubernetes-client/javascript)
|
||||||
- [Kubernetes Dotnet client library](https://github.com/kubernetes-client/csharp)
|
- [Kubernetes C# client library](https://github.com/kubernetes-client/csharp)
|
||||||
- [Kubernetes Haskell Client library](https://github.com/kubernetes-client/haskell)
|
- [Kubernetes Haskell client library](https://github.com/kubernetes-client/haskell)
|
||||||
-->
|
-->
|
||||||
## 官方支持的客户端库
|
## 官方支持的客户端库
|
||||||
|
|
||||||
|
@ -71,19 +71,19 @@ client libraries:
|
||||||
- [Kubernetes Python 语言客户端库](https://github.com/kubernetes-client/python)
|
- [Kubernetes Python 语言客户端库](https://github.com/kubernetes-client/python)
|
||||||
- [Kubernetes Java 语言客户端库](https://github.com/kubernetes-client/java)
|
- [Kubernetes Java 语言客户端库](https://github.com/kubernetes-client/java)
|
||||||
- [Kubernetes JavaScript 语言客户端库](https://github.com/kubernetes-client/javascript)
|
- [Kubernetes JavaScript 语言客户端库](https://github.com/kubernetes-client/javascript)
|
||||||
- [Kubernetes Dotnet 语言客户端库](https://github.com/kubernetes-client/csharp)
|
- [Kubernetes C# 语言客户端库](https://github.com/kubernetes-client/csharp)
|
||||||
- [Kubernetes Haskell 语言客户端库](https://github.com/kubernetes-client/haskell)
|
- [Kubernetes Haskell 语言客户端库](https://github.com/kubernetes-client/haskell)
|
||||||
|
|
||||||
<!--
|
<!--
|
||||||
## CLI
|
## CLI
|
||||||
|
|
||||||
* [kubectl](/docs/reference/kubectl/overview/) - Main CLI tool for running commands and managing Kubernetes clusters.
|
* [kubectl](/docs/reference/kubectl/) - Main CLI tool for running commands and managing Kubernetes clusters.
|
||||||
* [JSONPath](/docs/reference/kubectl/jsonpath/) - Syntax guide for using [JSONPath expressions](https://goessner.net/articles/JsonPath/) with kubectl.
|
* [JSONPath](/docs/reference/kubectl/jsonpath/) - Syntax guide for using [JSONPath expressions](https://goessner.net/articles/JsonPath/) with kubectl.
|
||||||
* [kubeadm](/docs/reference/setup-tools/kubeadm/) - CLI tool to easily provision a secure Kubernetes cluster.
|
* [kubeadm](/docs/reference/setup-tools/kubeadm/) - CLI tool to easily provision a secure Kubernetes cluster.
|
||||||
-->
|
-->
|
||||||
## CLI
|
## CLI
|
||||||
|
|
||||||
* [kubectl](/zh/docs/reference/kubectl/overview/) - 主要的 CLI 工具,用于运行命令和管理 Kubernetes 集群。
|
* [kubectl](/zh/docs/reference/kubectl/) - 主要的 CLI 工具,用于运行命令和管理 Kubernetes 集群。
|
||||||
* [JSONPath](/zh/docs/reference/kubectl/jsonpath/) - 通过 kubectl 使用
|
* [JSONPath](/zh/docs/reference/kubectl/jsonpath/) - 通过 kubectl 使用
|
||||||
[JSONPath 表达式](https://goessner.net/articles/JsonPath/) 的语法指南。
|
[JSONPath 表达式](https://goessner.net/articles/JsonPath/) 的语法指南。
|
||||||
* [kubeadm](/zh/docs/reference/setup-tools/kubeadm/) - 此 CLI 工具可轻松配置安全的 Kubernetes 集群。
|
* [kubeadm](/zh/docs/reference/setup-tools/kubeadm/) - 此 CLI 工具可轻松配置安全的 Kubernetes 集群。
|
||||||
|
@ -105,6 +105,8 @@ client libraries:
|
||||||
|
|
||||||
* [Scheduler Policies](/docs/reference/scheduling/policies)
|
* [Scheduler Policies](/docs/reference/scheduling/policies)
|
||||||
* [Scheduler Profiles](/docs/reference/scheduling/config#profiles)
|
* [Scheduler Profiles](/docs/reference/scheduling/config#profiles)
|
||||||
|
* List of [ports and protocols](/docs/reference/ports-and-protocols/) that
|
||||||
|
should be open on control plane and worker nodes
|
||||||
-->
|
-->
|
||||||
## 组件
|
## 组件
|
||||||
|
|
||||||
|
@ -121,6 +123,8 @@ client libraries:
|
||||||
|
|
||||||
* [调度策略](/zh/docs/reference/scheduling/policies)
|
* [调度策略](/zh/docs/reference/scheduling/policies)
|
||||||
* [调度配置](/zh/docs/reference/scheduling/config#profiles)
|
* [调度配置](/zh/docs/reference/scheduling/config#profiles)
|
||||||
|
* 应该在控制平面和工作节点上打开的 [端口和协议](/zh/docs/reference/ports-and-protocols/) -
|
||||||
|
列表
|
||||||
|
|
||||||
<!--
|
<!--
|
||||||
## Config APIs
|
## Config APIs
|
||||||
|
@ -130,12 +134,18 @@ configure kubernetes components or tools. Most of these APIs are not exposed
|
||||||
by the API server in a RESTful way though they are essential for a user or an
|
by the API server in a RESTful way though they are essential for a user or an
|
||||||
operator to use or manage a cluster.
|
operator to use or manage a cluster.
|
||||||
|
|
||||||
* [kubelet configuration (v1beta1)](/docs/reference/config-api/kubelet-config.v1beta1/)
|
* [kube-apiserver configuration (v1alpha1)](/docs/reference/config-api/apiserver-config.v1alpha1/)
|
||||||
* [kube-scheduler configuration (v1beta1)](/docs/reference/config-api/kube-scheduler-config.v1beta1/)
|
* [kube-apiserver configuration (v1)](/docs/reference/config-api/apiserver-config.v1/)
|
||||||
* [kube-scheduler policy reference (v1)](/docs/reference/config-api/kube-scheduler-policy-config.v1/)
|
* [kube-apiserver encryption (v1)](/docs/reference/config-api/apiserver-encryption.v1/)
|
||||||
|
* [kubelet configuration (v1alpha1)](/docs/reference/config-api/kubelet-config.v1alpha1/) and
|
||||||
|
[kubelet configuration (v1beta1)](/docs/reference/config-api/kubelet-config.v1beta1/)
|
||||||
|
* [kubelet credential providers (v1alpha1)](/docs/reference/config-api/kubelet-credentialprovider.v1alpha1/)
|
||||||
|
* [kube-scheduler configuration (v1beta2)](/docs/reference/config-api/kube-scheduler-config.v1beta2/) and
|
||||||
|
[kube-scheduler configuration (v1beta3)](/docs/reference/config-api/kube-scheduler-config.v1beta3/)
|
||||||
* [kube-proxy configuration (v1alpha1)](/docs/reference/config-api/kube-proxy-config.v1alpha1/)
|
* [kube-proxy configuration (v1alpha1)](/docs/reference/config-api/kube-proxy-config.v1alpha1/)
|
||||||
* [`audit.k8s.io/v1` API](/docs/reference/config-api/apiserver-audit.v1/)
|
* [`audit.k8s.io/v1` API](/docs/reference/config-api/apiserver-audit.v1/)
|
||||||
* [Client authentication API (v1beta1)](/docs/reference/config-api/client-authentication.v1beta1/)
|
* [Client authentication API (v1beta1)](/docs/reference/config-api/client-authentication.v1beta1/) and
|
||||||
|
[Client authentication API (v1)](/docs/reference/config-api/client-authentication.v1/)
|
||||||
* [WebhookAdmission configuration (v1)](/docs/reference/config-api/apiserver-webhookadmission.v1/)
|
* [WebhookAdmission configuration (v1)](/docs/reference/config-api/apiserver-webhookadmission.v1/)
|
||||||
-->
|
-->
|
||||||
## 配置 API
|
## 配置 API
|
||||||
|
@ -144,14 +154,32 @@ operator to use or manage a cluster.
|
||||||
尽管这些 API 对于用户或操作者使用或管理集群来说是必不可少的,
|
尽管这些 API 对于用户或操作者使用或管理集群来说是必不可少的,
|
||||||
它们大都没有以 RESTful 的方式在 API 服务器上公开。
|
它们大都没有以 RESTful 的方式在 API 服务器上公开。
|
||||||
|
|
||||||
* [kubelet 配置 (v1beta1)](/zh/docs/reference/config-api/kubelet-config.v1beta1/)
|
* [kube-apiserver 配置 (v1alpha1)](/zh/docs/reference/config-api/apiserver-config.v1alpha1/)
|
||||||
* [kube-scheduler 配置 (v1beta1)](/zh/docs/reference/config-api/kube-scheduler-config.v1beta1/)
|
* [kube-apiserver 配置 (v1)](/zh/docs/reference/config-api/apiserver-config.v1/)
|
||||||
* [kube-scheduler 策略参考 (v1)](/zh/docs/reference/config-api/kube-scheduler-policy-config.v1/)
|
* [kube-apiserver 加密 (v1)](/zh/docs/reference/config-api/apiserver-encryption.v1/)
|
||||||
|
* [kubelet 配置 (v1alpha1)](/zh/docs/reference/config-api/kubelet-config.v1alpha1/) 和
|
||||||
|
[kubelet 配置 (v1beta1)](/zh/docs/reference/config-api/kubelet-config.v1beta1/)
|
||||||
|
* [kubelet 凭据驱动 (v1alpha1)](/zh/docs/reference/config-api/kubelet-credentialprovider.v1alpha1/)
|
||||||
|
* [kube-scheduler 配置 (v1beta2)](/zh/docs/reference/config-api/kube-scheduler-config.v1beta2/) 和
|
||||||
|
[kube-scheduler 配置 (v1beta3)](/zh/docs/reference/config-api/kube-scheduler-config.v1beta3/)
|
||||||
* [kube-proxy 配置 (v1alpha1)](/zh/docs/reference/config-api/kube-proxy-config.v1alpha1/)
|
* [kube-proxy 配置 (v1alpha1)](/zh/docs/reference/config-api/kube-proxy-config.v1alpha1/)
|
||||||
* [`audit.k8s.io/v1` API](/zh/docs/reference/config-api/apiserver-audit.v1/)
|
* [`audit.k8s.io/v1` API](/zh/docs/reference/config-api/apiserver-audit.v1/)
|
||||||
* [客户端认证 API (v1beta1)](/zh/docs/reference/config-api/client-authentication.v1beta1/)
|
* [客户端认证 API (v1beta1)](/zh/docs/reference/config-api/client-authentication.v1beta1/) 和
|
||||||
|
[客户端认证 API (v1)](/zh/docs/reference/config-api/client-authentication.v1/)
|
||||||
* [WebhookAdmission 配置 (v1)](/zh/docs/reference/config-api/apiserver-webhookadmission.v1/)
|
* [WebhookAdmission 配置 (v1)](/zh/docs/reference/config-api/apiserver-webhookadmission.v1/)
|
||||||
|
|
||||||
|
<!--
|
||||||
|
## Config API for kubeadm
|
||||||
|
|
||||||
|
* [v1beta2](/docs/reference/config-api/kubeadm-config.v1beta2/)
|
||||||
|
* [v1beta3](/docs/reference/config-api/kubeadm-config.v1beta3/)
|
||||||
|
-->
|
||||||
|
|
||||||
|
## kubeadm 的配置 API
|
||||||
|
|
||||||
|
* [v1beta2](/zh/docs/reference/config-api/kubeadm-config.v1beta2/)
|
||||||
|
* [v1beta3](/zh/docs/reference/config-api/kubeadm-config.v1beta3/)
|
||||||
|
|
||||||
<!--
|
<!--
|
||||||
## Design Docs
|
## Design Docs
|
||||||
|
|
||||||
|
|
|
@ -1145,7 +1145,7 @@ The following HTTP headers can be used to performing an impersonation request:
|
||||||
|
|
||||||
* `Impersonate-User`: The username to act as.
|
* `Impersonate-User`: The username to act as.
|
||||||
* `Impersonate-Group`: A group name to act as. Can be provided multiple times to set multiple groups. Optional. Requires "Impersonate-User".
|
* `Impersonate-Group`: A group name to act as. Can be provided multiple times to set multiple groups. Optional. Requires "Impersonate-User".
|
||||||
* `Impersonate-Extra-( extra name )`: A dynamic header used to associate extra fields with the user. Optional. Requires "Impersonate-User". In order to be preserved consistently, `( extra name )` should be lower-case, and any characters which aren't [legal in HTTP header labels](https://tools.ietf.org/html/rfc7230#section-3.2.6) MUST be utf8 and [percent-encoded](https://tools.ietf.org/html/rfc3986#section-2.1).
|
* `Impersonate-Extra-( extra name )`: A dynamic header used to associate extra fields with the user. Optional. Requires "Impersonate-User". In order to be preserved consistently, `( extra name )` must be lower-case, and any characters which aren't [legal in HTTP header labels](https://tools.ietf.org/html/rfc7230#section-3.2.6) MUST be utf8 and [percent-encoded](https://tools.ietf.org/html/rfc3986#section-2.1).
|
||||||
* `Impersonate-Uid`: A unique identifier that represents the user being impersonated. Optional. Requires "Impersonate-User". Kubernetes does not impose any format requirements on this string.
|
* `Impersonate-Uid`: A unique identifier that represents the user being impersonated. Optional. Requires "Impersonate-User". Kubernetes does not impose any format requirements on this string.
|
||||||
-->
|
-->
|
||||||
以下 HTTP 头部字段可用来执行伪装请求:
|
以下 HTTP 头部字段可用来执行伪装请求:
|
||||||
|
@ -1841,7 +1841,8 @@ Certificates)。
|
||||||
|
|
||||||
<!--
|
<!--
|
||||||
Optionally, the response can include the expiry of the credential formatted as a
|
Optionally, the response can include the expiry of the credential formatted as a
|
||||||
RFC3339 timestamp. Presence or absence of an expiry has the following impact:
|
[RFC 3339](https://datatracker.ietf.org/doc/html/rfc3339) timestamp.
|
||||||
|
Presence or absence of an expiry has the following impact:
|
||||||
|
|
||||||
- If an expiry is included, the bearer token and TLS credentials are cached until
|
- If an expiry is included, the bearer token and TLS credentials are cached until
|
||||||
the expiry time is reached, or if the server responds with a 401 HTTP status code,
|
the expiry time is reached, or if the server responds with a 401 HTTP status code,
|
||||||
|
@ -1849,7 +1850,9 @@ RFC3339 timestamp. Presence or absence of an expiry has the following impact:
|
||||||
- If an expiry is omitted, the bearer token and TLS credentials are cached until
|
- If an expiry is omitted, the bearer token and TLS credentials are cached until
|
||||||
the server responds with a 401 HTTP status code or until the process exits.
|
the server responds with a 401 HTTP status code or until the process exits.
|
||||||
-->
|
-->
|
||||||
作为一种可选方案,响应中还可以包含以 RFC3339 时间戳格式给出的证书到期时间。
|
作为一种可选方案,响应中还可以包含以
|
||||||
|
[RFC 3339](https://datatracker.ietf.org/doc/html/rfc3339)
|
||||||
|
时间戳格式给出的证书到期时间。
|
||||||
证书到期时间的有无会有如下影响:
|
证书到期时间的有无会有如下影响:
|
||||||
|
|
||||||
- 如果响应中包含了到期时间,持有者令牌和 TLS 凭据会被缓存,直到到期期限到来、
|
- 如果响应中包含了到期时间,持有者令牌和 TLS 凭据会被缓存,直到到期期限到来、
|
||||||
|
|
|
@ -113,7 +113,7 @@ controller on the controller manager.
|
||||||
|
|
||||||
Each valid token is backed by a secret in the `kube-system` namespace. You can
|
Each valid token is backed by a secret in the `kube-system` namespace. You can
|
||||||
find the full design doc
|
find the full design doc
|
||||||
[here](https://github.com/kubernetes/community/blob/{{< param "githubbranch" >}}/contributors/design-proposals/cluster-lifecycle/bootstrap-discovery.md).
|
[here](https://github.com/kubernetes/community/blob/master/contributors/design-proposals/cluster-lifecycle/bootstrap-discovery.md).
|
||||||
|
|
||||||
Here is what the secret looks like.
|
Here is what the secret looks like.
|
||||||
-->
|
-->
|
||||||
|
@ -121,7 +121,7 @@ Here is what the secret looks like.
|
||||||
|
|
||||||
每个合法的令牌背后对应着 `kube-system` 名字空间中的某个 Secret 对象。
|
每个合法的令牌背后对应着 `kube-system` 名字空间中的某个 Secret 对象。
|
||||||
你可以从
|
你可以从
|
||||||
[这里](https://github.com/kubernetes/community/blob/{{< param "githubbranch" >}}/contributors/design-proposals/cluster-lifecycle/bootstrap-discovery.md).
|
[这里](https://github.com/kubernetes/community/blob/master/contributors/design-proposals/cluster-lifecycle/bootstrap-discovery.md)
|
||||||
找到完整设计文档。
|
找到完整设计文档。
|
||||||
|
|
||||||
这是 Secret 看起来的样子。
|
这是 Secret 看起来的样子。
|
||||||
|
|
|
@ -3,7 +3,7 @@ title: 证书签名请求
|
||||||
content_type: concept
|
content_type: concept
|
||||||
weight: 20
|
weight: 20
|
||||||
---
|
---
|
||||||
<!--
|
<!--
|
||||||
reviewers:
|
reviewers:
|
||||||
- liggitt
|
- liggitt
|
||||||
- mikedanese
|
- mikedanese
|
||||||
|
@ -18,22 +18,22 @@ weight: 20
|
||||||
|
|
||||||
{{< feature-state for_k8s_version="v1.19" state="stable" >}}
|
{{< feature-state for_k8s_version="v1.19" state="stable" >}}
|
||||||
|
|
||||||
<!--
|
<!--
|
||||||
The Certificates API enables automation of
|
The Certificates API enables automation of
|
||||||
[X.509](https://www.itu.int/rec/T-REC-X.509) credential provisioning by providing
|
[X.509](https://www.itu.int/rec/T-REC-X.509) credential provisioning by providing
|
||||||
a programmatic interface for clients of the Kubernetes API to request and obtain
|
a programmatic interface for clients of the Kubernetes API to request and obtain
|
||||||
X.509 {{< glossary_tooltip term_id="certificate" text="certificates" >}}
|
X.509 {{< glossary_tooltip term_id="certificate" text="certificates" >}}
|
||||||
from a Certificate Authority (CA).
|
from a Certificate Authority (CA).
|
||||||
|
|
||||||
A CertificateSigningRequest (CSR) resource is used to request that a certificate be signed
|
A CertificateSigningRequest (CSR) resource is used to request that a certificate be signed
|
||||||
by a denoted signer, after which the request may be approved or denied before
|
by a denoted signer, after which the request may be approved or denied before
|
||||||
finally being signed.
|
finally being signed.
|
||||||
-->
|
-->
|
||||||
证书 API 支持
|
证书 API 支持
|
||||||
[X.509](https://www.itu.int/rec/T-REC-X.509)
|
[X.509](https://www.itu.int/rec/T-REC-X.509)
|
||||||
的自动化配置,
|
的自动化配置,
|
||||||
它为 Kubernetes API 的客户端提供一个编程接口,
|
它为 Kubernetes API 的客户端提供一个编程接口,
|
||||||
用于从证书颁发机构(CA)请求并获取 X.509
|
用于从证书颁发机构(CA)请求并获取 X.509
|
||||||
{{< glossary_tooltip term_id="certificate" text="证书" >}}。
|
{{< glossary_tooltip term_id="certificate" text="证书" >}}。
|
||||||
|
|
||||||
CertificateSigningRequest(CSR)资源用来向指定的签名者申请证书签名,
|
CertificateSigningRequest(CSR)资源用来向指定的签名者申请证书签名,
|
||||||
|
@ -41,7 +41,7 @@ CertificateSigningRequest(CSR)资源用来向指定的签名者申请证书
|
||||||
|
|
||||||
<!-- body -->
|
<!-- body -->
|
||||||
|
|
||||||
<!--
|
<!--
|
||||||
## Request signing process
|
## Request signing process
|
||||||
|
|
||||||
The CertificateSigningRequest resource type allows a client to ask for an X.509 certificate
|
The CertificateSigningRequest resource type allows a client to ask for an X.509 certificate
|
||||||
|
@ -64,13 +64,13 @@ CertificateSigningRequest 使用 `spec.signerName` 字段标示 _签名者_(
|
||||||
在 Kubernetes v1.22 和以后的版本,客户可以可选地设置 `spec.expirationSeconds`
|
在 Kubernetes v1.22 和以后的版本,客户可以可选地设置 `spec.expirationSeconds`
|
||||||
字段来为颁发的证书设定一个特定的有效期。该字段的最小有效值是 `600`,也就是 10 分钟。
|
字段来为颁发的证书设定一个特定的有效期。该字段的最小有效值是 `600`,也就是 10 分钟。
|
||||||
|
|
||||||
<!--
|
<!--
|
||||||
Once created, a CertificateSigningRequest must be approved before it can be signed.
|
Once created, a CertificateSigningRequest must be approved before it can be signed.
|
||||||
Depending on the signer selected, a CertificateSigningRequest may be automatically approved
|
Depending on the signer selected, a CertificateSigningRequest may be automatically approved
|
||||||
by a {{< glossary_tooltip text="controller" term_id="controller" >}}.
|
by a {{< glossary_tooltip text="controller" term_id="controller" >}}.
|
||||||
Otherwise, a CertificateSigningRequest must be manually approved
|
Otherwise, a CertificateSigningRequest must be manually approved
|
||||||
either via the REST API (or client-go)
|
either via the REST API (or client-go)
|
||||||
or by running `kubectl certificate approve`.
|
or by running `kubectl certificate approve`.
|
||||||
Likewise, a CertificateSigningRequest may also be denied,
|
Likewise, a CertificateSigningRequest may also be denied,
|
||||||
which tells the configured signer that it must not sign the request.
|
which tells the configured signer that it must not sign the request.
|
||||||
-->
|
-->
|
||||||
|
@ -83,13 +83,13 @@ which tells the configured signer that it must not sign the request.
|
||||||
这就相当于通知了指定的签名者,这个证书不能签名。
|
这就相当于通知了指定的签名者,这个证书不能签名。
|
||||||
|
|
||||||
<!--
|
<!--
|
||||||
For certificates that have been approved, the next step is signing.
|
For certificates that have been approved, the next step is signing.
|
||||||
The relevant signing controller
|
The relevant signing controller
|
||||||
first validates that the signing conditions are met and then creates a certificate.
|
first validates that the signing conditions are met and then creates a certificate.
|
||||||
The signing controller then updates the CertificateSigningRequest,
|
The signing controller then updates the CertificateSigningRequest,
|
||||||
storing the new certificate into
|
storing the new certificate into
|
||||||
the `status.certificate` field of the existing CertificateSigningRequest object. The
|
the `status.certificate` field of the existing CertificateSigningRequest object. The
|
||||||
`status.certificate` field is either empty or contains a X.509 certificate,
|
`status.certificate` field is either empty or contains a X.509 certificate,
|
||||||
encoded in PEM format.
|
encoded in PEM format.
|
||||||
The CertificateSigningRequest `status.certificate` field is empty until the signer does this.
|
The CertificateSigningRequest `status.certificate` field is empty until the signer does this.
|
||||||
-->
|
-->
|
||||||
|
@ -100,8 +100,8 @@ The CertificateSigningRequest `status.certificate` field is empty until the sign
|
||||||
此时,字段 `status.certificate` 要么为空,要么包含一个用 PEM 编码的 X.509 证书。
|
此时,字段 `status.certificate` 要么为空,要么包含一个用 PEM 编码的 X.509 证书。
|
||||||
直到签名完成前,CertificateSigningRequest 的字段 `status.certificate` 都为空。
|
直到签名完成前,CertificateSigningRequest 的字段 `status.certificate` 都为空。
|
||||||
|
|
||||||
<!--
|
<!--
|
||||||
Once the `status.certificate` field has been populated,
|
Once the `status.certificate` field has been populated,
|
||||||
the request has been completed and clients can now
|
the request has been completed and clients can now
|
||||||
fetch the signed certificate PEM data from the CertificateSigningRequest resource.
|
fetch the signed certificate PEM data from the CertificateSigningRequest resource.
|
||||||
The signers can instead deny certificate signing if the approval conditions are not met.
|
The signers can instead deny certificate signing if the approval conditions are not met.
|
||||||
|
@ -110,10 +110,10 @@ The signers can instead deny certificate signing if the approval conditions are
|
||||||
客户端现在可以从 CertificateSigningRequest 资源中获取已签名的证书的 PEM 数据。
|
客户端现在可以从 CertificateSigningRequest 资源中获取已签名的证书的 PEM 数据。
|
||||||
当然如果不满足签名条件,签名者可以拒签。
|
当然如果不满足签名条件,签名者可以拒签。
|
||||||
|
|
||||||
<!--
|
<!--
|
||||||
In order to reduce the number of old CertificateSigningRequest resources left
|
In order to reduce the number of old CertificateSigningRequest resources left
|
||||||
in a cluster, a garbage collection
|
in a cluster, a garbage collection
|
||||||
controller runs periodically.
|
controller runs periodically.
|
||||||
The garbage collection removes CertificateSigningRequests that have not changed
|
The garbage collection removes CertificateSigningRequests that have not changed
|
||||||
state for some duration:
|
state for some duration:
|
||||||
|
|
||||||
|
@ -133,10 +133,10 @@ state for some duration:
|
||||||
* 挂起的请求:24小时后自动删除
|
* 挂起的请求:24小时后自动删除
|
||||||
* 所有请求:在颁发的证书过期后自动删除
|
* 所有请求:在颁发的证书过期后自动删除
|
||||||
|
|
||||||
<!--
|
<!--
|
||||||
## Signers
|
## Signers
|
||||||
|
|
||||||
Custom signerNames can also be specified. All signers should provide information about how they work
|
Custom signerNames can also be specified. All signers should provide information about how they work
|
||||||
so that clients can predict what will happen to their CSRs.
|
so that clients can predict what will happen to their CSRs.
|
||||||
This includes:
|
This includes:
|
||||||
-->
|
-->
|
||||||
|
@ -147,18 +147,18 @@ This includes:
|
||||||
以便客户端可以预期到他们的 CSR 将发生什么。
|
以便客户端可以预期到他们的 CSR 将发生什么。
|
||||||
此类信息包括:
|
此类信息包括:
|
||||||
|
|
||||||
<!--
|
<!--
|
||||||
1. **Trust distribution**: how trust (CA bundles) are distributed.
|
1. **Trust distribution**: how trust (CA bundles) are distributed.
|
||||||
2. **Permitted subjects**: any restrictions on and behavior
|
2. **Permitted subjects**: any restrictions on and behavior
|
||||||
when a disallowed subject is requested.
|
when a disallowed subject is requested.
|
||||||
3. **Permitted x509 extensions**: including IP subjectAltNames, DNS subjectAltNames,
|
3. **Permitted x509 extensions**: including IP subjectAltNames, DNS subjectAltNames,
|
||||||
Email subjectAltNames, URI subjectAltNames etc,
|
Email subjectAltNames, URI subjectAltNames etc,
|
||||||
and behavior when a disallowed extension is requested.
|
and behavior when a disallowed extension is requested.
|
||||||
4. **Permitted key usages / extended key usages**: any restrictions on and behavior
|
4. **Permitted key usages / extended key usages**: any restrictions on and behavior
|
||||||
when usages different than the signer-determined usages are specified in the CSR.
|
when usages different than the signer-determined usages are specified in the CSR.
|
||||||
5. **Expiration/certificate lifetime**: whether it is fixed by the signer, configurable by the admin, determined by the CSR `spec.expirationSeconds` field, etc
|
5. **Expiration/certificate lifetime**: whether it is fixed by the signer, configurable by the admin, determined by the CSR `spec.expirationSeconds` field, etc
|
||||||
and the behavior when the signer-determined expiration is different from the CSR `spec.expirationSeconds` field.
|
and the behavior when the signer-determined expiration is different from the CSR `spec.expirationSeconds` field.
|
||||||
6. **CA bit allowed/disallowed**: and behavior if a CSR contains a request
|
6. **CA bit allowed/disallowed**: and behavior if a CSR contains a request
|
||||||
a for a CA certificate when the signer does not permit it.
|
a for a CA certificate when the signer does not permit it.
|
||||||
-->
|
-->
|
||||||
1. **信任分发**:信任(CA 证书包)是如何分发的。
|
1. **信任分发**:信任(CA 证书包)是如何分发的。
|
||||||
|
@ -171,7 +171,7 @@ This includes:
|
||||||
以及签名者决定的过期时间与 CSR `spec.expirationSeconds` 字段不同时的应对手段。
|
以及签名者决定的过期时间与 CSR `spec.expirationSeconds` 字段不同时的应对手段。
|
||||||
6. **允许/不允许 CA 位**:当 CSR 包含一个签名者并不允许的 CA 证书的请求时,相应的应对手段。
|
6. **允许/不允许 CA 位**:当 CSR 包含一个签名者并不允许的 CA 证书的请求时,相应的应对手段。
|
||||||
|
|
||||||
<!--
|
<!--
|
||||||
Commonly, the `status.certificate` field contains a single PEM-encoded X.509
|
Commonly, the `status.certificate` field contains a single PEM-encoded X.509
|
||||||
certificate once the CSR is approved and the certificate is issued. Some
|
certificate once the CSR is approved and the certificate is issued. Some
|
||||||
signers store multiple certificates into the `status.certificate` field. In
|
signers store multiple certificates into the `status.certificate` field. In
|
||||||
|
@ -211,7 +211,7 @@ Kubernetes API servers prior to v1.22 will silently drop this field when the obj
|
||||||
v1.22 版本之前的 Kubernetes API 服务器会在创建对象的时候忽略该字段。
|
v1.22 版本之前的 Kubernetes API 服务器会在创建对象的时候忽略该字段。
|
||||||
{{< /note >}}
|
{{< /note >}}
|
||||||
|
|
||||||
<!--
|
<!--
|
||||||
### Kubernetes signers
|
### Kubernetes signers
|
||||||
|
|
||||||
Kubernetes provides built-in signers that each have a well-known `signerName`:
|
Kubernetes provides built-in signers that each have a well-known `signerName`:
|
||||||
|
@ -220,7 +220,7 @@ Kubernetes provides built-in signers that each have a well-known `signerName`:
|
||||||
|
|
||||||
Kubernetes提供了内置的签名者,每个签名者都有一个众所周知的 `signerName`:
|
Kubernetes提供了内置的签名者,每个签名者都有一个众所周知的 `signerName`:
|
||||||
|
|
||||||
<!--
|
<!--
|
||||||
1. `kubernetes.io/kube-apiserver-client`: signs certificates that will be honored as client certificates by the API server.
|
1. `kubernetes.io/kube-apiserver-client`: signs certificates that will be honored as client certificates by the API server.
|
||||||
Never auto-approved by {{< glossary_tooltip term_id="kube-controller-manager" >}}.
|
Never auto-approved by {{< glossary_tooltip term_id="kube-controller-manager" >}}.
|
||||||
1. Trust distribution: signed certificates must be honored as client-certificates by the kube-apiserver. The CA bundle is not distributed by any other means.
|
1. Trust distribution: signed certificates must be honored as client-certificates by the kube-apiserver. The CA bundle is not distributed by any other means.
|
||||||
|
@ -250,7 +250,7 @@ Kubernetes提供了内置的签名者,每个签名者都有一个众所周知
|
||||||
设置为 `--cluster-signing-duration` 选项和 CSR 对象的 `spec.expirationSeconds` 字段(如有设置该字段)中的最小值。
|
设置为 `--cluster-signing-duration` 选项和 CSR 对象的 `spec.expirationSeconds` 字段(如有设置该字段)中的最小值。
|
||||||
1. 允许/不允许 CA 位:不允许。
|
1. 允许/不允许 CA 位:不允许。
|
||||||
|
|
||||||
<!--
|
<!--
|
||||||
1. `kubernetes.io/kube-apiserver-client-kubelet`: signs client certificates that will be honored as client certificates by the
|
1. `kubernetes.io/kube-apiserver-client-kubelet`: signs client certificates that will be honored as client certificates by the
|
||||||
API server.
|
API server.
|
||||||
May be auto-approved by {{< glossary_tooltip term_id="kube-controller-manager" >}}.
|
May be auto-approved by {{< glossary_tooltip term_id="kube-controller-manager" >}}.
|
||||||
|
@ -274,7 +274,7 @@ Kubernetes提供了内置的签名者,每个签名者都有一个众所周知
|
||||||
设置为 `--cluster-signing-duration` 选项和 CSR 对象的 `spec.expirationSeconds` 字段(如有设置该字段)中的最小值。
|
设置为 `--cluster-signing-duration` 选项和 CSR 对象的 `spec.expirationSeconds` 字段(如有设置该字段)中的最小值。
|
||||||
1. 允许/不允许 CA 位:不允许。
|
1. 允许/不允许 CA 位:不允许。
|
||||||
|
|
||||||
<!--
|
<!--
|
||||||
1. `kubernetes.io/kubelet-serving`: signs serving certificates that are honored as a valid kubelet serving certificate
|
1. `kubernetes.io/kubelet-serving`: signs serving certificates that are honored as a valid kubelet serving certificate
|
||||||
by the API server, but has no other guarantees.
|
by the API server, but has no other guarantees.
|
||||||
Never auto-approved by {{< glossary_tooltip term_id="kube-controller-manager" >}}.
|
Never auto-approved by {{< glossary_tooltip term_id="kube-controller-manager" >}}.
|
||||||
|
@ -294,12 +294,12 @@ Kubernetes提供了内置的签名者,每个签名者都有一个众所周知
|
||||||
1. 许可的 x509 扩展:允许 key usage、DNSName/IPAddress subjectAltName 等扩展,
|
1. 许可的 x509 扩展:允许 key usage、DNSName/IPAddress subjectAltName 等扩展,
|
||||||
禁止 EmailAddress、URI subjectAltName 等扩展,并丢弃其他扩展。
|
禁止 EmailAddress、URI subjectAltName 等扩展,并丢弃其他扩展。
|
||||||
至少有一个 DNS 或 IP 的 SubjectAltName 存在。
|
至少有一个 DNS 或 IP 的 SubjectAltName 存在。
|
||||||
1. 许可的密钥用途:必须是 `["key encipherment", "digital signature", "client auth"]`
|
1. 许可的密钥用途:必须是 `["key encipherment", "digital signature", "server auth"]`
|
||||||
1. 过期时间/证书有效期:对于 kube-controller-manager 实现的签名者,
|
1. 过期时间/证书有效期:对于 kube-controller-manager 实现的签名者,
|
||||||
设置为 `--cluster-signing-duration` 选项和 CSR 对象的 `spec.expirationSeconds` 字段(如有设置该字段)中的最小值。
|
设置为 `--cluster-signing-duration` 选项和 CSR 对象的 `spec.expirationSeconds` 字段(如有设置该字段)中的最小值。
|
||||||
1. 允许/不允许 CA 位:不允许。
|
1. 允许/不允许 CA 位:不允许。
|
||||||
|
|
||||||
<!--
|
<!--
|
||||||
1. `kubernetes.io/legacy-unknown`: has no guarantees for trust at all. Some third-party distributions of Kubernetes
|
1. `kubernetes.io/legacy-unknown`: has no guarantees for trust at all. Some third-party distributions of Kubernetes
|
||||||
may honor client certificates signed by it. The stable CertificateSigningRequest API (version `certificates.k8s.io/v1` and later)
|
may honor client certificates signed by it. The stable CertificateSigningRequest API (version `certificates.k8s.io/v1` and later)
|
||||||
does not allow to set the `signerName` as `kubernetes.io/legacy-unknown`.
|
does not allow to set the `signerName` as `kubernetes.io/legacy-unknown`.
|
||||||
|
@ -340,7 +340,7 @@ Kubernetes API servers prior to v1.22 will silently drop this field when the obj
|
||||||
v1.22 版本之前的 Kubernetes API 服务器会在创建对象的时候忽略该字段。
|
v1.22 版本之前的 Kubernetes API 服务器会在创建对象的时候忽略该字段。
|
||||||
{{< /note >}}
|
{{< /note >}}
|
||||||
|
|
||||||
<!--
|
<!--
|
||||||
Distribution of trust happens out of band for these signers. Any trust outside of those described above are strictly
|
Distribution of trust happens out of band for these signers. Any trust outside of those described above are strictly
|
||||||
coincidental. For instance, some distributions may honor `kubernetes.io/legacy-unknown` as client certificates for the
|
coincidental. For instance, some distributions may honor `kubernetes.io/legacy-unknown` as client certificates for the
|
||||||
kube-apiserver, but this is not a standard.
|
kube-apiserver, but this is not a standard.
|
||||||
|
@ -353,7 +353,7 @@ guaranteed to verify a connection to the API server using the default service (`
|
||||||
这些用途都没有以任何方式涉及到 ServiceAccount 中的 Secrets `.data[ca.crt]`。
|
这些用途都没有以任何方式涉及到 ServiceAccount 中的 Secrets `.data[ca.crt]`。
|
||||||
此 CA 证书包只保证使用默认的服务(`kubernetes.default.svc`)来验证到 API 服务器的连接。
|
此 CA 证书包只保证使用默认的服务(`kubernetes.default.svc`)来验证到 API 服务器的连接。
|
||||||
|
|
||||||
<!--
|
<!--
|
||||||
## Authorization
|
## Authorization
|
||||||
|
|
||||||
To allow creating a CertificateSigningRequest and retrieving any CertificateSigningRequest:
|
To allow creating a CertificateSigningRequest and retrieving any CertificateSigningRequest:
|
||||||
|
@ -366,7 +366,7 @@ For example:
|
||||||
|
|
||||||
授权创建 CertificateSigningRequest 和检索 CertificateSigningRequest:
|
授权创建 CertificateSigningRequest 和检索 CertificateSigningRequest:
|
||||||
|
|
||||||
* verbs(动词): `create`、`get`、`list`、`watch`,
|
* verbs(动词): `create`、`get`、`list`、`watch`,
|
||||||
group(组):`certificates.k8s.io`,
|
group(组):`certificates.k8s.io`,
|
||||||
resources:`certificatesigningrequests`
|
resources:`certificatesigningrequests`
|
||||||
|
|
||||||
|
@ -374,7 +374,7 @@ For example:
|
||||||
|
|
||||||
{{< codenew file="access/certificate-signing-request/clusterrole-create.yaml" >}}
|
{{< codenew file="access/certificate-signing-request/clusterrole-create.yaml" >}}
|
||||||
|
|
||||||
<!--
|
<!--
|
||||||
To allow approving a CertificateSigningRequest:
|
To allow approving a CertificateSigningRequest:
|
||||||
|
|
||||||
* Verbs: `get`, `list`, `watch`, group: `certificates.k8s.io`, resource: `certificatesigningrequests`
|
* Verbs: `get`, `list`, `watch`, group: `certificates.k8s.io`, resource: `certificatesigningrequests`
|
||||||
|
@ -400,7 +400,7 @@ For example:
|
||||||
|
|
||||||
{{< codenew file="access/certificate-signing-request/clusterrole-approve.yaml" >}}
|
{{< codenew file="access/certificate-signing-request/clusterrole-approve.yaml" >}}
|
||||||
|
|
||||||
<!--
|
<!--
|
||||||
To allow signing a CertificateSigningRequest:
|
To allow signing a CertificateSigningRequest:
|
||||||
|
|
||||||
* Verbs: `get`, `list`, `watch`, group: `certificates.k8s.io`, resource: `certificatesigningrequests`
|
* Verbs: `get`, `list`, `watch`, group: `certificates.k8s.io`, resource: `certificatesigningrequests`
|
||||||
|
@ -422,7 +422,7 @@ To allow signing a CertificateSigningRequest:
|
||||||
|
|
||||||
{{< codenew file="access/certificate-signing-request/clusterrole-sign.yaml" >}}
|
{{< codenew file="access/certificate-signing-request/clusterrole-sign.yaml" >}}
|
||||||
|
|
||||||
<!--
|
<!--
|
||||||
## Normal User
|
## Normal User
|
||||||
|
|
||||||
A few steps are required in order to get a normal user to be able to
|
A few steps are required in order to get a normal user to be able to
|
||||||
|
@ -435,7 +435,7 @@ by the Kubernetes cluster, and then present that certificate to the Kubernetes A
|
||||||
首先,该用户必须拥有 Kubernetes 集群签发的证书,
|
首先,该用户必须拥有 Kubernetes 集群签发的证书,
|
||||||
然后将该证书提供给 Kubernetes API。
|
然后将该证书提供给 Kubernetes API。
|
||||||
|
|
||||||
<!--
|
<!--
|
||||||
### Create private key
|
### Create private key
|
||||||
|
|
||||||
The following scripts show how to generate PKI private key and CSR. It is
|
The following scripts show how to generate PKI private key and CSR. It is
|
||||||
|
@ -454,10 +454,10 @@ openssl genrsa -out myuser.key 2048
|
||||||
openssl req -new -key myuser.key -out myuser.csr
|
openssl req -new -key myuser.key -out myuser.csr
|
||||||
```
|
```
|
||||||
|
|
||||||
<!--
|
<!--
|
||||||
### Create CertificateSigningRequest
|
### Create CertificateSigningRequest
|
||||||
|
|
||||||
Create a CertificateSigningRequest and submit it to a Kubernetes Cluster via kubectl.
|
Create a CertificateSigningRequest and submit it to a Kubernetes Cluster via kubectl.
|
||||||
Below is a script to generate the CertificateSigningRequest.
|
Below is a script to generate the CertificateSigningRequest.
|
||||||
-->
|
-->
|
||||||
### 创建 CertificateSigningRequest {#create-certificatesigningrequest}
|
### 创建 CertificateSigningRequest {#create-certificatesigningrequest}
|
||||||
|
@ -480,7 +480,7 @@ spec:
|
||||||
EOF
|
EOF
|
||||||
```
|
```
|
||||||
|
|
||||||
<!--
|
<!--
|
||||||
Some points to note:
|
Some points to note:
|
||||||
|
|
||||||
- `usages` has to be '`client auth`'
|
- `usages` has to be '`client auth`'
|
||||||
|
@ -495,7 +495,7 @@ Some points to note:
|
||||||
- `request` 字段是 CSR 文件内容的 base64 编码值。
|
- `request` 字段是 CSR 文件内容的 base64 编码值。
|
||||||
要得到该值,可以执行命令 `cat myuser.csr | base64 | tr -d "\n"`。
|
要得到该值,可以执行命令 `cat myuser.csr | base64 | tr -d "\n"`。
|
||||||
|
|
||||||
<!--
|
<!--
|
||||||
### Approve certificate signing request
|
### Approve certificate signing request
|
||||||
|
|
||||||
Use kubectl to create a CSR and approve it.
|
Use kubectl to create a CSR and approve it.
|
||||||
|
@ -512,7 +512,7 @@ Get the list of CSRs:
|
||||||
kubectl get csr
|
kubectl get csr
|
||||||
```
|
```
|
||||||
|
|
||||||
<!--
|
<!--
|
||||||
Approve the CSR:
|
Approve the CSR:
|
||||||
-->
|
-->
|
||||||
批准 CSR:
|
批准 CSR:
|
||||||
|
@ -521,7 +521,7 @@ Approve the CSR:
|
||||||
kubectl certificate approve myuser
|
kubectl certificate approve myuser
|
||||||
```
|
```
|
||||||
|
|
||||||
<!--
|
<!--
|
||||||
### Get the certificate
|
### Get the certificate
|
||||||
|
|
||||||
Retrieve the certificate from the CSR.
|
Retrieve the certificate from the CSR.
|
||||||
|
@ -534,7 +534,7 @@ Retrieve the certificate from the CSR.
|
||||||
kubectl get csr/myuser -o yaml
|
kubectl get csr/myuser -o yaml
|
||||||
```
|
```
|
||||||
|
|
||||||
<!--
|
<!--
|
||||||
The Certificate value is in Base64-encoded format under `status.certificate`.
|
The Certificate value is in Base64-encoded format under `status.certificate`.
|
||||||
|
|
||||||
Export the issued certificate from the CertificateSigningRequest.
|
Export the issued certificate from the CertificateSigningRequest.
|
||||||
|
@ -567,7 +567,7 @@ Role 和 RoleBinding 了。
|
||||||
kubectl create role developer --verb=create --verb=get --verb=list --verb=update --verb=delete --resource=pods
|
kubectl create role developer --verb=create --verb=get --verb=list --verb=update --verb=delete --resource=pods
|
||||||
```
|
```
|
||||||
|
|
||||||
<!--
|
<!--
|
||||||
This is a sample command to create a RoleBinding for this new user:
|
This is a sample command to create a RoleBinding for this new user:
|
||||||
-->
|
-->
|
||||||
下面是为这个新用户创建 RoleBinding 的示例命令:
|
下面是为这个新用户创建 RoleBinding 的示例命令:
|
||||||
|
@ -576,7 +576,7 @@ This is a sample command to create a RoleBinding for this new user:
|
||||||
kubectl create rolebinding developer-binding-myuser --role=developer --user=myuser
|
kubectl create rolebinding developer-binding-myuser --role=developer --user=myuser
|
||||||
```
|
```
|
||||||
|
|
||||||
<!--
|
<!--
|
||||||
### Add to kubeconfig
|
### Add to kubeconfig
|
||||||
|
|
||||||
The last step is to add this user into the kubeconfig file.
|
The last step is to add this user into the kubeconfig file.
|
||||||
|
@ -595,7 +595,7 @@ kubectl config set-credentials myuser --client-key=myuser.key --client-certifica
|
||||||
|
|
||||||
```
|
```
|
||||||
|
|
||||||
<!--
|
<!--
|
||||||
Then, you need to add the context:
|
Then, you need to add the context:
|
||||||
-->
|
-->
|
||||||
然后,你需要添加上下文:
|
然后,你需要添加上下文:
|
||||||
|
@ -604,7 +604,7 @@ Then, you need to add the context:
|
||||||
kubectl config set-context myuser --cluster=kubernetes --user=myuser
|
kubectl config set-context myuser --cluster=kubernetes --user=myuser
|
||||||
```
|
```
|
||||||
|
|
||||||
<!--
|
<!--
|
||||||
To test it, change the context to `myuser`:
|
To test it, change the context to `myuser`:
|
||||||
-->
|
-->
|
||||||
来测试一下,把上下文切换为 `myuser`:
|
来测试一下,把上下文切换为 `myuser`:
|
||||||
|
@ -613,7 +613,7 @@ To test it, change the context to `myuser`:
|
||||||
kubectl config use-context myuser
|
kubectl config use-context myuser
|
||||||
```
|
```
|
||||||
|
|
||||||
<!--
|
<!--
|
||||||
## Approval or rejection {#approval-rejection}
|
## Approval or rejection {#approval-rejection}
|
||||||
|
|
||||||
### Control plane automated approval {#approval-rejection-control-plane}
|
### Control plane automated approval {#approval-rejection-control-plane}
|
||||||
|
@ -628,13 +628,13 @@ in order to check authorization for certificate approval.
|
||||||
|
|
||||||
### 控制平面的自动化批准 {#approval-rejection-control-plane}
|
### 控制平面的自动化批准 {#approval-rejection-control-plane}
|
||||||
|
|
||||||
kube-controller-manager 内建了一个证书批准者,其 signerName 为
|
kube-controller-manager 内建了一个证书批准者,其 signerName 为
|
||||||
`kubernetes.io/kube-apiserver-client-kubelet`,
|
`kubernetes.io/kube-apiserver-client-kubelet`,
|
||||||
该批准者将 CSR 上用于节点凭据的各种权限委托给权威认证机构。
|
该批准者将 CSR 上用于节点凭据的各种权限委托给权威认证机构。
|
||||||
kube-controller-manager 将 SubjectAccessReview 资源发送(POST)到 API 服务器,
|
kube-controller-manager 将 SubjectAccessReview 资源发送(POST)到 API 服务器,
|
||||||
以便检验批准证书的授权。
|
以便检验批准证书的授权。
|
||||||
|
|
||||||
<!--
|
<!--
|
||||||
### Approval or rejection using `kubectl` {#approval-rejection-kubectl}
|
### Approval or rejection using `kubectl` {#approval-rejection-kubectl}
|
||||||
|
|
||||||
A Kubernetes administrator (with appropriate permissions) can manually approve
|
A Kubernetes administrator (with appropriate permissions) can manually approve
|
||||||
|
@ -654,8 +654,8 @@ Kubernetes 管理员(拥有足够的权限)可以手工批准(或驳回)
|
||||||
kubectl certificate approve <certificate-signing-request-name>
|
kubectl certificate approve <certificate-signing-request-name>
|
||||||
```
|
```
|
||||||
|
|
||||||
<!--
|
<!--
|
||||||
Likewise, to deny a CSR:
|
Likewise, to deny a CSR:
|
||||||
-->
|
-->
|
||||||
同样地,驳回一个 CSR:
|
同样地,驳回一个 CSR:
|
||||||
|
|
||||||
|
@ -663,7 +663,7 @@ Likewise, to deny a CSR:
|
||||||
kubectl certificate deny <certificate-signing-request-name>
|
kubectl certificate deny <certificate-signing-request-name>
|
||||||
```
|
```
|
||||||
|
|
||||||
<!--
|
<!--
|
||||||
### Approval or rejection using the Kubernetes API {#approval-rejection-api-client}
|
### Approval or rejection using the Kubernetes API {#approval-rejection-api-client}
|
||||||
|
|
||||||
Users of the REST API can approve CSRs by submitting an UPDATE request to the `approval`
|
Users of the REST API can approve CSRs by submitting an UPDATE request to the `approval`
|
||||||
|
@ -699,7 +699,7 @@ status:
|
||||||
reason: ApprovedByMyPolicy # You can set this to any string
|
reason: ApprovedByMyPolicy # You can set this to any string
|
||||||
type: Approved
|
type: Approved
|
||||||
```
|
```
|
||||||
<!--
|
<!--
|
||||||
For `Denied` CSRs:
|
For `Denied` CSRs:
|
||||||
-->
|
-->
|
||||||
驳回(`Denied`)的 CRS:
|
驳回(`Denied`)的 CRS:
|
||||||
|
@ -717,7 +717,7 @@ status:
|
||||||
type: Denied
|
type: Denied
|
||||||
```
|
```
|
||||||
|
|
||||||
<!--
|
<!--
|
||||||
It's usual to set `status.conditions.reason` to a machine-friendly reason
|
It's usual to set `status.conditions.reason` to a machine-friendly reason
|
||||||
code using TitleCase; this is a convention but you can set it to anything
|
code using TitleCase; this is a convention but you can set it to anything
|
||||||
you like. If you want to add a note for human consumption, use the
|
you like. If you want to add a note for human consumption, use the
|
||||||
|
@ -727,7 +727,7 @@ you like. If you want to add a note for human consumption, use the
|
||||||
这是一个命名约定,但你也可以随你的个人喜好设置。
|
这是一个命名约定,但你也可以随你的个人喜好设置。
|
||||||
如果你想添加一个供人类使用的注释,那就用 `status.conditions.message` 字段。
|
如果你想添加一个供人类使用的注释,那就用 `status.conditions.message` 字段。
|
||||||
|
|
||||||
<!--
|
<!--
|
||||||
## Signing
|
## Signing
|
||||||
|
|
||||||
### Control plane signer {#signer-control-plane}
|
### Control plane signer {#signer-control-plane}
|
||||||
|
@ -743,7 +743,7 @@ were marked as approved.
|
||||||
|
|
||||||
### 控制平面签名者 {#signer-control-plane}
|
### 控制平面签名者 {#signer-control-plane}
|
||||||
|
|
||||||
Kubernetes 控制平面实现了每一个
|
Kubernetes 控制平面实现了每一个
|
||||||
[Kubernetes 签名者](/zh/docs/reference/access-authn-authz/certificate-signing-requests/#kubernetes-signers),
|
[Kubernetes 签名者](/zh/docs/reference/access-authn-authz/certificate-signing-requests/#kubernetes-signers),
|
||||||
每个签名者的实现都是 kube-controller-manager 的一部分。
|
每个签名者的实现都是 kube-controller-manager 的一部分。
|
||||||
|
|
||||||
|
@ -761,7 +761,7 @@ Kubernetes API servers prior to v1.22 will silently drop this field when the obj
|
||||||
v1.22 版本之前的 Kubernetes API 服务器会在创建对象的时候忽略该字段。
|
v1.22 版本之前的 Kubernetes API 服务器会在创建对象的时候忽略该字段。
|
||||||
{{< /note >}}
|
{{< /note >}}
|
||||||
|
|
||||||
<!--
|
<!--
|
||||||
### API-based signers {#signer-api}
|
### API-based signers {#signer-api}
|
||||||
|
|
||||||
Users of the REST API can sign CSRs by submitting an UPDATE request to the `status`
|
Users of the REST API can sign CSRs by submitting an UPDATE request to the `status`
|
||||||
|
@ -811,7 +811,7 @@ M1fLPhLyR54fGaY+7/X8P9AZzPefAkwizeXwe9ii6/a08vWoiE4=
|
||||||
-----END CERTIFICATE-----
|
-----END CERTIFICATE-----
|
||||||
```
|
```
|
||||||
|
|
||||||
<!--
|
<!--
|
||||||
Non-PEM content may appear before or after the CERTIFICATE PEM blocks and is unvalidated,
|
Non-PEM content may appear before or after the CERTIFICATE PEM blocks and is unvalidated,
|
||||||
to allow for explanatory text as described in section 5.2 of RFC7468.
|
to allow for explanatory text as described in section 5.2 of RFC7468.
|
||||||
|
|
||||||
|
@ -834,7 +834,7 @@ status:
|
||||||
|
|
||||||
## {{% heading "whatsnext" %}}
|
## {{% heading "whatsnext" %}}
|
||||||
|
|
||||||
<!--
|
<!--
|
||||||
* Read [Manage TLS Certificates in a Cluster](/docs/tasks/tls/managing-tls-in-a-cluster/)
|
* Read [Manage TLS Certificates in a Cluster](/docs/tasks/tls/managing-tls-in-a-cluster/)
|
||||||
* View the source code for the kube-controller-manager built in [signer](https://github.com/kubernetes/kubernetes/blob/32ec6c212ec9415f604ffc1f4c1f29b782968ff1/pkg/controller/certificates/signer/cfssl_signer.go)
|
* View the source code for the kube-controller-manager built in [signer](https://github.com/kubernetes/kubernetes/blob/32ec6c212ec9415f604ffc1f4c1f29b782968ff1/pkg/controller/certificates/signer/cfssl_signer.go)
|
||||||
* View the source code for the kube-controller-manager built in [approver](https://github.com/kubernetes/kubernetes/blob/32ec6c212ec9415f604ffc1f4c1f29b782968ff1/pkg/controller/certificates/approver/sarapprove.go)
|
* View the source code for the kube-controller-manager built in [approver](https://github.com/kubernetes/kubernetes/blob/32ec6c212ec9415f604ffc1f4c1f29b782968ff1/pkg/controller/certificates/approver/sarapprove.go)
|
||||||
|
|
|
@ -73,12 +73,14 @@ Write operations:
|
||||||
Auth-related operations:
|
Auth-related operations:
|
||||||
-->
|
-->
|
||||||
|
|
||||||
* 对于基于 TLS 的启动引导过程时使用的 certificationsigningrequests API 的读/写权限
|
* 对于基于 TLS 的启动引导过程时使用的
|
||||||
* 为委派的身份验证/授权检查创建 tokenreviews 和 subjectaccessreviews 的能力
|
[certificationsigningrequests API](/zh/docs/reference/access-authn-authz/certificate-signing-requests/)
|
||||||
|
的读/写权限
|
||||||
|
* 为委派的身份验证/授权检查创建 TokenReview 和 SubjectAccessReview 的能力
|
||||||
|
|
||||||
<!--
|
<!--
|
||||||
* read/write access to the certificationsigningrequests API for TLS bootstrapping
|
* read/write access to the [CertificateSigningRequests API](/docs/reference/access-authn-authz/certificate-signing-requests/) for TLS bootstrapping
|
||||||
* the ability to create tokenreviews and subjectaccessreviews for delegated authentication/authorization checks
|
* the ability to create TokenReviews and SubjectAccessReviews for delegated authentication/authorization checks
|
||||||
-->
|
-->
|
||||||
|
|
||||||
在将来的版本中,节点鉴权器可能会添加或删除权限,以确保 kubelet 具有正确操作所需的最小权限集。
|
在将来的版本中,节点鉴权器可能会添加或删除权限,以确保 kubelet 具有正确操作所需的最小权限集。
|
||||||
|
|
|
@ -266,6 +266,7 @@ to the REST api.
|
||||||
|
|
||||||
<!--
|
<!--
|
||||||
For further documentation refer to the authorization.v1beta1 API objects and
|
For further documentation refer to the authorization.v1beta1 API objects and
|
||||||
[webhook.go](https://github.com/kubernetes/kubernetes/blob/{{< param "githubbranch" >}}/staging/src/k8s.io/apiserver/plugin/pkg/authorizer/webhook/webhook.go).
|
[webhook.go](https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/apiserver/plugin/pkg/authorizer/webhook/webhook.go).
|
||||||
-->
|
-->
|
||||||
更多信息可以参考 authorization.v1beta1 API 对象和[webhook.go](https://github.com/kubernetes/kubernetes/blob/{{< param "githubbranch" >}}/staging/src/k8s.io/apiserver/plugin/pkg/authorizer/webhook/webhook.go)。
|
更多信息可以参考 authorization.v1beta1 API 对象和 [webhook.go](https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/apiserver/plugin/pkg/authorizer/webhook/webhook.go)。
|
||||||
|
|
||||||
|
|
|
@ -70,58 +70,6 @@ You can use the `--dry-run=client` flag to preview the object that would be sent
|
||||||
-->
|
-->
|
||||||
你可以使用 `--dry-run=client` 参数来预览而不真正提交即将下发到集群的对象实例:
|
你可以使用 `--dry-run=client` 参数来预览而不真正提交即将下发到集群的对象实例:
|
||||||
|
|
||||||
{{< note >}}
|
|
||||||
<!--
|
|
||||||
All `kubectl run` generators are deprecated.
|
|
||||||
See the Kubernetes v1.17 documentation for a [list](https://v1-17.docs.kubernetes.io/docs/reference/kubectl/conventions/#generators) of generators and how they were used.
|
|
||||||
-->
|
|
||||||
所有的 `kubectl run` 生成器已弃用。
|
|
||||||
查阅 Kubernetes v1.17 文档中的生成器[列表](https://v1-17.docs.kubernetes.io/docs/reference/kubectl/conventions/#generators)以及它们的用法。
|
|
||||||
{{< /note >}}
|
|
||||||
|
|
||||||
<!--
|
|
||||||
#### Generators
|
|
||||||
-->
|
|
||||||
#### 生成器
|
|
||||||
<!--
|
|
||||||
You can generate the following resources with a kubectl command, `kubectl create --dry-run=client -o yaml`:
|
|
||||||
|
|
||||||
* `clusterrole`: Create a ClusterRole.
|
|
||||||
* `clusterrolebinding`: Create a ClusterRoleBinding for a particular ClusterRole.
|
|
||||||
* `configmap`: Create a ConfigMap from a local file, directory or literal value.
|
|
||||||
* `cronjob`: Create a CronJob with the specified name.
|
|
||||||
* `deployment`: Create a Deployment with the specified name.
|
|
||||||
* `job`: Create a Job with the specified name.
|
|
||||||
* `namespace`: Create a Namespace with the specified name.
|
|
||||||
* `poddisruptionbudget`: Create a PodDisruptionBudget with the specified name.
|
|
||||||
* `priorityclass`: Create a PriorityClass with the specified name.
|
|
||||||
* `quota`: Create a Quota with the specified name.
|
|
||||||
* `role`: Create a Role with single rule.
|
|
||||||
* `rolebinding`: Create a RoleBinding for a particular Role or ClusterRole.
|
|
||||||
* `secret`: Create a Secret using specified subcommand.
|
|
||||||
* `service`: Create a Service using specified subcommand.
|
|
||||||
* `serviceaccount`: Create a ServiceAccount with the specified name.
|
|
||||||
|
|
||||||
-->
|
|
||||||
你可以使用 kubectl 命令生成以下资源, `kubectl create --dry-run=client -o yaml`:
|
|
||||||
|
|
||||||
* `clusterrole`: 创建 ClusterRole。
|
|
||||||
* `clusterrolebinding`: 为特定的 ClusterRole 创建 ClusterRoleBinding。
|
|
||||||
* `configmap`: 使用本地文件、目录或文本值创建 Configmap。
|
|
||||||
* `cronjob`: 使用指定的名称创建 Cronjob。
|
|
||||||
* `deployment`: 使用指定的名称创建 Deployment。
|
|
||||||
* `job`: 使用指定的名称创建 Job。
|
|
||||||
* `namespace`: 使用指定的名称创建名称空间。
|
|
||||||
* `poddisruptionbudget`: 使用指定名称创建 Pod 干扰预算。
|
|
||||||
* `priorityclass`: 使用指定的名称创建 Priorityclass。
|
|
||||||
* `quota`: 使用指定的名称创建配额。
|
|
||||||
* `role`: 使用单一规则创建角色。
|
|
||||||
* `rolebinding`: 为特定角色或 ClusterRole 创建 RoleBinding。
|
|
||||||
* `secret`: 使用指定的子命令创建 Secret。
|
|
||||||
* `service`: 使用指定的子命令创建服务。
|
|
||||||
* `serviceaccount`: 使用指定的名称创建服务帐户。
|
|
||||||
|
|
||||||
|
|
||||||
### `kubectl apply`
|
### `kubectl apply`
|
||||||
|
|
||||||
<!--
|
<!--
|
||||||
|
|
|
@ -1,9 +1,11 @@
|
||||||
---
|
---
|
||||||
title: kubectl 命令
|
title: kubectl 命令
|
||||||
|
weight: 20
|
||||||
---
|
---
|
||||||
|
|
||||||
<!-- ---
|
<!-- ---
|
||||||
title: kubectl Commands
|
title: kubectl Commands
|
||||||
|
weight: 20
|
||||||
--- -->
|
--- -->
|
||||||
|
|
||||||
<!-- [kubectl Command Reference](/docs/reference/generated/kubectl/kubectl-commands/) -->
|
<!-- [kubectl Command Reference](/docs/reference/generated/kubectl/kubectl-commands/) -->
|
||||||
|
|
|
@ -11,41 +11,3 @@ weight: 20
|
||||||
{{/* If you're localizing this page, you only need to copy the front matter */}}
|
{{/* If you're localizing this page, you only need to copy the front matter */}}
|
||||||
{{/* and add a redirect into "/static/_redirects", for YOUR localization. */}}
|
{{/* and add a redirect into "/static/_redirects", for YOUR localization. */}}
|
||||||
-->
|
-->
|
||||||
|
|
||||||
## kind
|
|
||||||
|
|
||||||
<!--
|
|
||||||
[`kind`](https://kind.sigs.k8s.io/docs/) lets you run Kubernetes on
|
|
||||||
your local computer. This tool requires that you have
|
|
||||||
[Docker](https://docs.docker.com/get-docker/) installed and configured.
|
|
||||||
|
|
||||||
The kind [Quick Start](https://kind.sigs.k8s.io/docs/user/quick-start/) page
|
|
||||||
shows you what you need to do to get up and running with kind.
|
|
||||||
-->
|
|
||||||
你可以使用 [`kind`](https://kind.sigs.k8s.io/docs/) 来在本地计算机上运行 Kubernetes。
|
|
||||||
此工具要求你已经安装并配置了 [Docker](https://docs.docker.com/get-docker/)。
|
|
||||||
|
|
||||||
kind [快速入门](https://kind.sigs.k8s.io/docs/user/quick-start/)页面
|
|
||||||
为你展示了如何开始使用 kind 的相关信息。
|
|
||||||
|
|
||||||
## minikube
|
|
||||||
|
|
||||||
<!--
|
|
||||||
Like `kind`, [`minikube`](https://minikube.sigs.k8s.io/) is a tool that lets you run Kubernetes
|
|
||||||
locally. `minikube` runs a single-node Kubernetes cluster on your personal
|
|
||||||
computer (including Windows, macOS and Linux PCs) so that you can try out
|
|
||||||
Kubernetes, or for daily development work.
|
|
||||||
|
|
||||||
You can follow the official
|
|
||||||
[Get Started!](https://minikube.sigs.k8s.io/docs/start/) guide if your focus is
|
|
||||||
on getting the tool installed.
|
|
||||||
-->
|
|
||||||
与 `kind` 类似,[`minikube`](https://minikube.sigs.k8s.io/) 是一个允许你在
|
|
||||||
本地运行 Kubernetes 的工具。`minikube` 在你的个人计算机上运行一个单节点的
|
|
||||||
Kubernetes 集群(包括 Windows、macOS 和 Linux PC 机),这样你可以尝试
|
|
||||||
Kubernetes 或者执行每天的开发工作。
|
|
||||||
|
|
||||||
如果你所关注的是如何安装该工具,可以查阅官方的
|
|
||||||
[Get Started!](https://minikube.sigs.k8s.io/docs/start/)
|
|
||||||
文档。
|
|
||||||
|
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
---
|
---
|
||||||
title: 访问集群
|
title: 访问集群
|
||||||
weight: 20
|
weight: 20
|
||||||
content_type: concept
|
content_type: concept
|
||||||
|
@ -50,10 +50,10 @@ kubectl config view
|
||||||
|
|
||||||
<!--
|
<!--
|
||||||
Many of the [examples](/docs/user-guide/kubectl-cheatsheet) provide an introduction to using
|
Many of the [examples](/docs/user-guide/kubectl-cheatsheet) provide an introduction to using
|
||||||
kubectl and complete documentation is found in the [kubectl manual](/docs/user-guide/kubectl-overview).
|
`kubectl` and complete documentation is found in the [kubectl reference](/docs/reference/kubectl/).
|
||||||
-->
|
-->
|
||||||
有许多 [例子](/zh/docs/reference/kubectl/cheatsheet/) 介绍了如何使用 kubectl,
|
有许多 [例子](/zh/docs/reference/kubectl/cheatsheet/) 介绍了如何使用 kubectl,
|
||||||
可以在 [kubectl手册](/zh/docs/reference/kubectl/overview/) 中找到更完整的文档。
|
可以在 [kubectl 参考](/zh/docs/reference/kubectl/overview/) 中找到更完整的文档。
|
||||||
|
|
||||||
<!--
|
<!--
|
||||||
## Directly accessing the REST API
|
## Directly accessing the REST API
|
||||||
|
@ -139,18 +139,47 @@ curl http://localhost:8080/api/
|
||||||
<!--
|
<!--
|
||||||
### Without kubectl proxy
|
### Without kubectl proxy
|
||||||
|
|
||||||
In Kubernetes version 1.3 or later, `kubectl config view` no longer displays the token. Use `kubectl describe secret...` to get the token for the default service account, like this:
|
In Kubernetes version 1.3 or later, `kubectl config view` no longer displays the token. Use `kubectl apply` and `kubectl describe secret...` to create a token for the default service account with grep/cut:
|
||||||
|
|
||||||
|
First, create the Secret, requesting a token for the default ServiceAccount:
|
||||||
|
|
||||||
-->
|
-->
|
||||||
|
|
||||||
### 不使用 kubectl proxy
|
### 不使用 kubectl proxy
|
||||||
|
|
||||||
在 Kubernetes 1.3 或更高版本中,`kubectl config view` 不再显示 token。
|
在 Kubernetes 1.3 或更高版本中,`kubectl config view` 不再显示 token。
|
||||||
使用 `kubectl describe secret ...` 来获取默认服务帐户的 token,如下所示:
|
使用 `kubectl apply` 和 `kubectl describe secret ...` 及 grep 和剪切操作来为 default 服务帐户创建令牌,如下所示:
|
||||||
|
|
||||||
`grep/cut` 方法实现:
|
`grep/cut` 方法实现:
|
||||||
|
首先,创建 Secret,请求默认 ServiceAccount 的令牌:
|
||||||
|
```shell
|
||||||
|
kubectl apply -f - <<EOF
|
||||||
|
apiVersion: v1
|
||||||
|
kind: Secret
|
||||||
|
metadata:
|
||||||
|
name: default-token
|
||||||
|
annotations:
|
||||||
|
kubernetes.io/service-account.name: default
|
||||||
|
type: kubernetes.io/service-account-token
|
||||||
|
EOF
|
||||||
|
```
|
||||||
|
|
||||||
|
<!--
|
||||||
|
Next, wait for the token controller to populate the Secret with a token:
|
||||||
|
|
||||||
|
Capture and use the generated token:
|
||||||
|
-->
|
||||||
|
接下来,等待令牌控制器使用令牌填充 Secret:
|
||||||
|
```shell
|
||||||
|
while ! kubectl describe secret default-token | grep -E '^token' >/dev/null; do
|
||||||
|
echo "waiting for token..." >&2
|
||||||
|
sleep 1
|
||||||
|
done
|
||||||
|
```
|
||||||
|
|
||||||
|
捕获并使用生成的令牌:
|
||||||
```shell
|
```shell
|
||||||
APISERVER=$(kubectl config view | grep server | cut -f 2- -d ":" | tr -d " ")
|
APISERVER=$(kubectl config view | grep server | cut -f 2- -d ":" | tr -d " ")
|
||||||
TOKEN=$(kubectl describe secret $(kubectl get secrets | grep default | cut -f1 -d ' ') | grep -E '^token' | cut -f2 -d':' | tr -d ' ')
|
TOKEN=$(kubectl describe secret default-token | grep -E '^token' | cut -f2 -d':' | tr -d ' ')
|
||||||
curl $APISERVER/api --header "Authorization: Bearer $TOKEN" --insecure
|
curl $APISERVER/api --header "Authorization: Bearer $TOKEN" --insecure
|
||||||
```
|
```
|
||||||
```json
|
```json
|
||||||
|
@ -172,7 +201,7 @@ curl $APISERVER/api --header "Authorization: Bearer $TOKEN" --insecure
|
||||||
|
|
||||||
```shell
|
```shell
|
||||||
APISERVER=$(kubectl config view --minify -o jsonpath='{.clusters[0].cluster.server}')
|
APISERVER=$(kubectl config view --minify -o jsonpath='{.clusters[0].cluster.server}')
|
||||||
TOKEN=$(kubectl get secret $(kubectl get serviceaccount default -o jsonpath='{.secrets[0].name}') -o jsonpath='{.data.token}' | base64 --decode )
|
TOKEN=$(kubectl get secret default-token -o jsonpath='{.data.token}' | base64 --decode )
|
||||||
curl $APISERVER/api --header "Authorization: Bearer $TOKEN" --insecure
|
curl $APISERVER/api --header "Authorization: Bearer $TOKEN" --insecure
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|
|
@ -9,7 +9,9 @@ card:
|
||||||
---
|
---
|
||||||
<!--
|
<!--
|
||||||
reviewers:
|
reviewers:
|
||||||
- bryk
|
- floreks
|
||||||
|
- maciaszczykm
|
||||||
|
- shu-mutou
|
||||||
- mikedanese
|
- mikedanese
|
||||||
title: Deploy and Access the Kubernetes Dashboard
|
title: Deploy and Access the Kubernetes Dashboard
|
||||||
content_type: concept
|
content_type: concept
|
||||||
|
@ -55,7 +57,7 @@ The Dashboard UI is not deployed by default. To deploy it, run the following com
|
||||||
默认情况下不会部署 Dashboard。可以通过以下命令部署:
|
默认情况下不会部署 Dashboard。可以通过以下命令部署:
|
||||||
|
|
||||||
```
|
```
|
||||||
kubectl apply -f https://raw.githubusercontent.com/kubernetes/dashboard/v2.4.0/aio/deploy/recommended.yaml
|
kubectl apply -f https://raw.githubusercontent.com/kubernetes/dashboard/v2.5.0/aio/deploy/recommended.yaml
|
||||||
```
|
```
|
||||||
|
|
||||||
<!--
|
<!--
|
||||||
|
|
|
@ -151,7 +151,17 @@ See [Access Clusters Using the Kubernetes API](/docs/tasks/administer-cluster/ac
|
||||||
As mentioned above, you use the `kubectl cluster-info` command to retrieve the service's proxy URL. To create proxy URLs that include service endpoints, suffixes, and parameters, you append to the service's proxy URL:
|
As mentioned above, you use the `kubectl cluster-info` command to retrieve the service's proxy URL. To create proxy URLs that include service endpoints, suffixes, and parameters, you append to the service's proxy URL:
|
||||||
`http://`*`kubernetes_master_address`*`/api/v1/namespaces/`*`namespace_name`*`/services/`*`[https:]service_name[:port_name]`*`/proxy`
|
`http://`*`kubernetes_master_address`*`/api/v1/namespaces/`*`namespace_name`*`/services/`*`[https:]service_name[:port_name]`*`/proxy`
|
||||||
|
|
||||||
If you haven't specified a name for your port, you don't have to specify *port_name* in the URL.
|
If you haven't specified a name for your port, you don't have to specify *port_name* in the URL. You can also use the port number in place of the *port_name* for both named and unnamed ports.
|
||||||
|
|
||||||
|
By default, the API server proxies to your service using HTTP. To use HTTPS, prefix the service name with `https:`:
|
||||||
|
`http://<kubernetes_master_address>/api/v1/namespaces/<namespace_name>/services/<service_name>/proxy`
|
||||||
|
|
||||||
|
The supported formats for the `<service_name>` segment of the URL are:
|
||||||
|
|
||||||
|
* `<service_name>` - proxies to the default or unnamed port using http
|
||||||
|
* `<service_name>:<port_name>` - proxies to the specified port name or port number using http
|
||||||
|
* `https:<service_name>:` - proxies to the default or unnamed port using https (note the trailing colon)
|
||||||
|
* `https:<service_name>:<port_name>` - proxies to the specified port name or port number using https
|
||||||
-->
|
-->
|
||||||
#### 手动构建 API 服务器代理 URLs {#manually-constructing-apiserver-proxy-urls}
|
#### 手动构建 API 服务器代理 URLs {#manually-constructing-apiserver-proxy-urls}
|
||||||
|
|
||||||
|
@ -160,6 +170,15 @@ If you haven't specified a name for your port, you don't have to specify *port_n
|
||||||
`http://`*`kubernetes_master_address`*`/api/v1/namespaces/`*`namespace_name`*`/services/`*`service_name[:port_name]`*`/proxy`
|
`http://`*`kubernetes_master_address`*`/api/v1/namespaces/`*`namespace_name`*`/services/`*`service_name[:port_name]`*`/proxy`
|
||||||
|
|
||||||
如果还没有为你的端口指定名称,你可以不用在 URL 中指定 *port_name*。
|
如果还没有为你的端口指定名称,你可以不用在 URL 中指定 *port_name*。
|
||||||
|
对于命名和未命名端口,你还可以使用端口号代替 *port_name*。
|
||||||
|
|
||||||
|
默认情况下,API 服务器使用 HTTP 为你的服务提供代理。 要使用 HTTPS,请在服务名称前加上 `https:`:
|
||||||
|
`http://<kubernetes_master_address>/api/v1/namespaces/<namespace_name>/services/<service_name>/proxy`
|
||||||
|
URL 的 `<service_name>` 段支持的格式为:
|
||||||
|
* `<service_name>` - 使用 http 代理到默认或未命名端口
|
||||||
|
* `<service_name>:<port_name>` - 使用 http 代理到指定的端口名称或端口号
|
||||||
|
* `https:<service_name>:` - 使用 https 代理到默认或未命名端口(注意尾随冒号)
|
||||||
|
* `https:<service_name>:<port_name>` - 使用 https 代理到指定的端口名称或端口号
|
||||||
|
|
||||||
<!--
|
<!--
|
||||||
##### Examples
|
##### Examples
|
||||||
|
|
|
@ -672,3 +672,20 @@ ETCDCTL_API=3 etcdctl --data-dir <data-dir-location> snapshot restore snapshotdb
|
||||||
依赖一些过时的数据。请注意,实际中还原会花费一些时间。
|
依赖一些过时的数据。请注意,实际中还原会花费一些时间。
|
||||||
在还原过程中,关键组件将丢失领导锁并自行重启。
|
在还原过程中,关键组件将丢失领导锁并自行重启。
|
||||||
{{< /note >}}
|
{{< /note >}}
|
||||||
|
|
||||||
|
<!--
|
||||||
|
|
||||||
|
## Upgrading etcd clusters
|
||||||
|
|
||||||
|
For more details on etcd upgrade, please refer to the [etcd upgrades](https://etcd.io/docs/latest/upgrades/) documentation.
|
||||||
|
|
||||||
|
{{< note >}}
|
||||||
|
Before you start an upgrade, please back up your etcd cluster first.
|
||||||
|
{{< /note >}}
|
||||||
|
-->
|
||||||
|
## 升级 etcd 集群
|
||||||
|
有关 etcd 升级的更多详细信息,请参阅 [etcd 升级](https://etcd.io/docs/latest/upgrades/)文档。
|
||||||
|
{{< note >}}
|
||||||
|
在开始升级之前,请先备份你的 etcd 集群。
|
||||||
|
{{< /note >}}
|
||||||
|
|
||||||
|
|
|
@ -289,10 +289,10 @@ Install-WindowsFeature -Name containers
|
||||||
```
|
```
|
||||||
<!--
|
<!--
|
||||||
Install Docker
|
Install Docker
|
||||||
Instructions to do so are available at [Install Docker Engine - Enterprise on Windows Servers](https://hub.docker.com/editions/enterprise/docker-ee-server-windows).
|
Instructions to do so are available at [Install Docker Engine - Enterprise on Windows Servers](https://docs.microsoft.com/en-us/virtualization/windowscontainers/quick-start/set-up-environment?tabs=Windows-Server#install-docker).
|
||||||
-->
|
-->
|
||||||
安装 Docker
|
安装 Docker
|
||||||
操作指南在 [Install Docker Engine - Enterprise on Windows Servers](https://hub.docker.com/editions/enterprise/docker-ee-server-windows)。
|
操作指南在 [Install Docker Engine - Enterprise on Windows Servers](https://docs.microsoft.com/en-us/virtualization/windowscontainers/quick-start/set-up-environment?tabs=Windows-Server#install-docker)。
|
||||||
|
|
||||||
<!--
|
<!--
|
||||||
#### Install wins, kubelet, and kubeadm.
|
#### Install wins, kubelet, and kubeadm.
|
||||||
|
|
|
@ -287,7 +287,8 @@ Kubernetes 调度器在优化 Pod 调度过程时,会考虑“可分配的”
|
||||||
并完成跨 NUMA 节点的预留操作。
|
并完成跨 NUMA 节点的预留操作。
|
||||||
|
|
||||||
<!--
|
<!--
|
||||||
The flag specifies a comma-separated list of memory reservations per NUMA node.
|
The flag specifies a comma-separated list of memory reservations of different memory types per NUMA node.
|
||||||
|
Memory reservations across multiple NUMA nodes can be specified using semicolon as separator.
|
||||||
This parameter is only useful in the context of the Memory Manager feature.
|
This parameter is only useful in the context of the Memory Manager feature.
|
||||||
The Memory Manager will not use this reserved memory for the allocation of container workloads.
|
The Memory Manager will not use this reserved memory for the allocation of container workloads.
|
||||||
|
|
||||||
|
@ -295,7 +296,8 @@ For example, if you have a NUMA node "NUMA0" with `10Gi` of memory available, an
|
||||||
the `--reserved-memory` was specified to reserve `1Gi` of memory at "NUMA0",
|
the `--reserved-memory` was specified to reserve `1Gi` of memory at "NUMA0",
|
||||||
the Memory Manager assumes that only `9Gi` is available for containers.
|
the Memory Manager assumes that only `9Gi` is available for containers.
|
||||||
-->
|
-->
|
||||||
标志设置的值是一个按 NUMA 节点所给的内存预留的值的列表,用逗号分开。
|
标志设置的值是一个按 NUMA 节点的不同内存类型所给的内存预留的值的列表,用逗号分开。
|
||||||
|
可以使用分号作为分隔符来指定跨多个 NUMA 节点的内存预留。
|
||||||
只有在内存管理器特性被启用的语境下,这个参数才有意义。
|
只有在内存管理器特性被启用的语境下,这个参数才有意义。
|
||||||
内存管理器不会使用这些预留的内存来为容器负载分配内存。
|
内存管理器不会使用这些预留的内存来为容器负载分配内存。
|
||||||
|
|
||||||
|
@ -426,7 +428,7 @@ Here is an example of a correct configuration:
|
||||||
--kube-reserved=cpu=4,memory=4Gi
|
--kube-reserved=cpu=4,memory=4Gi
|
||||||
--system-reserved=cpu=1,memory=1Gi
|
--system-reserved=cpu=1,memory=1Gi
|
||||||
--memory-manager-policy=Static
|
--memory-manager-policy=Static
|
||||||
--reserved-memory 0:memory=3Gi --reserved-memory 1:memory=2148Mi
|
--reserved-memory '0:memory=3Gi;1:memory=2148Mi'
|
||||||
```
|
```
|
||||||
|
|
||||||
<!--
|
<!--
|
||||||
|
|
|
@ -20,13 +20,13 @@ dockershim to other container runtimes.
|
||||||
<!--
|
<!--
|
||||||
Since the announcement of [dockershim deprecation](/blog/2020/12/08/kubernetes-1-20-release-announcement/#dockershim-deprecation)
|
Since the announcement of [dockershim deprecation](/blog/2020/12/08/kubernetes-1-20-release-announcement/#dockershim-deprecation)
|
||||||
in Kubernetes 1.20, there were questions on how this will affect various workloads and Kubernetes
|
in Kubernetes 1.20, there were questions on how this will affect various workloads and Kubernetes
|
||||||
installations. You can find this blog post useful to understand the problem better: [Dockershim Deprecation FAQ](/blog/2020/12/02/dockershim-faq/)
|
installations. Our [Dockershim Removal FAQ](/blog/2022/02/17/dockershim-faq/) is there to help you
|
||||||
|
to understand the problem better.
|
||||||
-->
|
-->
|
||||||
自从 Kubernetes 1.20 宣布
|
自从 Kubernetes 1.20 宣布
|
||||||
[弃用 dockershim](/zh/blog/2020/12/08/kubernetes-1-20-release-announcement/#dockershim-deprecation),
|
[弃用 dockershim](/zh/blog/2020/12/08/kubernetes-1-20-release-announcement/#dockershim-deprecation),
|
||||||
各类疑问随之而来:这对各类工作负载和 Kubernetes 部署会产生什么影响。
|
各类疑问随之而来:这对各类工作负载和 Kubernetes 部署会产生什么影响。
|
||||||
你会发现这篇博文对于更好地理解此问题非常有用:
|
我们的[弃用 Dockershim 常见问题](/blog/2022/02/17/dockershim-faq/)可以帮助你更好地理解这个问题。
|
||||||
[弃用 Dockershim 常见问题](/zh/blog/2020/12/02/dockershim-faq/)
|
|
||||||
|
|
||||||
<!-- It is recommended to migrate from dockershim to alternative container runtimes.
|
<!-- It is recommended to migrate from dockershim to alternative container runtimes.
|
||||||
Check out [container runtimes](/docs/setup/production-environment/container-runtimes/)
|
Check out [container runtimes](/docs/setup/production-environment/container-runtimes/)
|
||||||
|
|
|
@ -29,7 +29,7 @@ you can take to check whether any workloads could be affected by `dockershim` de
|
||||||
-->
|
-->
|
||||||
本页讲解你的集群把 Docker 用作容器运行时的运作机制,
|
本页讲解你的集群把 Docker 用作容器运行时的运作机制,
|
||||||
并提供使用 `dockershim` 时,它所扮演角色的详细信息,
|
并提供使用 `dockershim` 时,它所扮演角色的详细信息,
|
||||||
继而展示了一组验证步骤,可用来检查弃用 `dockershim` 对你的工作负载的影响。
|
继而展示了一组操作,可用来检查弃用 `dockershim` 对你的工作负载是否有影响。
|
||||||
|
|
||||||
<!--
|
<!--
|
||||||
## Finding if your app has a dependencies on Docker {#find-docker-dependencies}
|
## Finding if your app has a dependencies on Docker {#find-docker-dependencies}
|
||||||
|
@ -41,16 +41,16 @@ If you are using Docker for building your application containers, you can still
|
||||||
run these containers on any container runtime. This use of Docker does not count
|
run these containers on any container runtime. This use of Docker does not count
|
||||||
as a dependency on Docker as a container runtime.
|
as a dependency on Docker as a container runtime.
|
||||||
-->
|
-->
|
||||||
虽然你通过 Docker 创建了应用容器,但这些容器却可以运行于所有容器运行时。
|
即使你是通过 Docker 创建的应用容器,也不妨碍你在其他任何容器运行时上运行这些容器。
|
||||||
所以这种使用 Docker 容器运行时的方式并不构成对 Docker 的依赖。
|
这种使用 Docker 的方式并不构成对 Docker 作为一个容器运行时的依赖。
|
||||||
|
|
||||||
<!--
|
<!--
|
||||||
When alternative container runtime is used, executing Docker commands may either
|
When alternative container runtime is used, executing Docker commands may either
|
||||||
not work or yield unexpected output. This is how you can find whether you have a
|
not work or yield unexpected output. This is how you can find whether you have a
|
||||||
dependency on Docker:
|
dependency on Docker:
|
||||||
-->
|
-->
|
||||||
当用了替代的容器运行时之后,Docker 命令可能不工作,甚至产生意外的输出。
|
当用了别的容器运行时之后,Docker 命令可能不工作,或者产生意外的输出。
|
||||||
这才是判定你是否依赖于 Docker 的方法。
|
下面是判定你是否依赖于 Docker 的方法。
|
||||||
|
|
||||||
<!--
|
<!--
|
||||||
1. Make sure no privileged Pods execute Docker commands (like `docker ps`),
|
1. Make sure no privileged Pods execute Docker commands (like `docker ps`),
|
||||||
|
@ -75,20 +75,21 @@ dependency on Docker:
|
||||||
cluster before migration.
|
cluster before migration.
|
||||||
-->
|
-->
|
||||||
1. 确认没有特权 Pod 执行 Docker 命令(如 `docker ps`)、重新启动 Docker
|
1. 确认没有特权 Pod 执行 Docker 命令(如 `docker ps`)、重新启动 Docker
|
||||||
服务(如 `systemctl restart docker.service`)或修改
|
服务(如 `systemctl restart docker.service`)或修改 Docker 配置文件
|
||||||
Docker 配置文件 `/etc/docker/daemon.json`。
|
`/etc/docker/daemon.json`。
|
||||||
2. 检查 Docker 配置文件(如 `/etc/docker/daemon.json`)中容器镜像仓库的镜像(mirror)站点设置。
|
2. 检查 Docker 配置文件(如 `/etc/docker/daemon.json`)中容器镜像仓库的镜像(mirror)站点设置。
|
||||||
这些配置通常需要针对不同容器运行时来重新设置。
|
这些配置通常需要针对不同容器运行时来重新设置。
|
||||||
3. 检查确保在 Kubernetes 基础设施之外的节点上运行的脚本和应用程序没有执行Docker命令。
|
3. 检查确保在 Kubernetes 基础设施之外的节点上运行的脚本和应用程序没有执行 Docker 命令。
|
||||||
可能的情况如:
|
可能的情况如:
|
||||||
- SSH 到节点排查故障;
|
- SSH 到节点排查故障;
|
||||||
- 节点启动脚本;
|
- 节点启动脚本;
|
||||||
- 直接安装在节点上的监控和安全代理。
|
- 直接安装在节点上的监控和安全代理。
|
||||||
4. 检查执行上述特权操作的第三方工具。详细操作请参考:
|
4. 检查执行上述特权操作的第三方工具。详细操作请参考
|
||||||
[从 dockershim 迁移遥测和安全代理](/zh/docs/tasks/administer-cluster/migrating-from-dockershim/migrating-telemetry-and-security-agents)
|
[从 dockershim 迁移遥测和安全代理](/zh/docs/tasks/administer-cluster/migrating-from-dockershim/migrating-telemetry-and-security-agents)。
|
||||||
5. 确认没有对 dockershim 行为的间接依赖。这是一种极端情况,不太可能影响你的应用。
|
5. 确认没有对 dockershim 行为的间接依赖。这是一种极端情况,不太可能影响你的应用。
|
||||||
一些工具很可能被配置为使用了 Docker 特性,比如,基于特定指标发警报,或者在故障排查指令的一个环节中搜索特定的日志信息。
|
一些工具很可能被配置为使用了 Docker 特性,比如,基于特定指标发警报,
|
||||||
如果你有此类配置的工具,需要在迁移之前,在测试集群上完成功能验证。
|
或者在故障排查指令的一个环节中搜索特定的日志信息。
|
||||||
|
如果你有此类配置的工具,需要在迁移之前,在测试集群上测试这类行为。
|
||||||
|
|
||||||
<!--
|
<!--
|
||||||
## Dependency on Docker explained {#role-of-dockershim}
|
## Dependency on Docker explained {#role-of-dockershim}
|
||||||
|
@ -103,8 +104,7 @@ uses the container runtime interface as an abstraction so that you can use any c
|
||||||
container runtime.
|
container runtime.
|
||||||
-->
|
-->
|
||||||
[容器运行时](/zh/docs/concepts/containers/#container-runtimes)是一个软件,用来运行组成 Kubernetes Pod 的容器。
|
[容器运行时](/zh/docs/concepts/containers/#container-runtimes)是一个软件,用来运行组成 Kubernetes Pod 的容器。
|
||||||
Kubernetes 负责编排和调度 Pod;在每一个节点上,
|
Kubernetes 负责编排和调度 Pod;在每一个节点上,{{< glossary_tooltip text="kubelet" term_id="kubelet" >}}
|
||||||
{{< glossary_tooltip text="kubelet" term_id="kubelet" >}}
|
|
||||||
使用抽象的容器运行时接口,所以你可以任意选用兼容的容器运行时。
|
使用抽象的容器运行时接口,所以你可以任意选用兼容的容器运行时。
|
||||||
|
|
||||||
<!--
|
<!--
|
||||||
|
@ -118,14 +118,14 @@ if Docker were a CRI compatible runtime.
|
||||||
在早期版本中,Kubernetes 提供的兼容性支持一个容器运行时:Docker。
|
在早期版本中,Kubernetes 提供的兼容性支持一个容器运行时:Docker。
|
||||||
在 Kubernetes 发展历史中,集群运营人员希望采用更多的容器运行时。
|
在 Kubernetes 发展历史中,集群运营人员希望采用更多的容器运行时。
|
||||||
于是 CRI 被设计出来满足这类灵活性需要 - 而 kubelet 亦开始支持 CRI。
|
于是 CRI 被设计出来满足这类灵活性需要 - 而 kubelet 亦开始支持 CRI。
|
||||||
然而,因为 Docker 在 CRI 规范创建之前就已经存在,Kubernetes 就创建了一个适配器组件:`dockershim`。
|
然而,因为 Docker 在 CRI 规范创建之前就已经存在,Kubernetes 就创建了一个适配器组件 `dockershim`。
|
||||||
dockershim 适配器允许 kubelet 与 Docker交互,就好像 Docker 是一个 CRI 兼容的运行时一样。
|
dockershim 适配器允许 kubelet 与 Docker 交互,就好像 Docker 是一个 CRI 兼容的运行时一样。
|
||||||
|
|
||||||
<!--
|
<!--
|
||||||
You can read about it in [Kubernetes Containerd integration goes GA](/blog/2018/05/24/kubernetes-containerd-integration-goes-ga/) blog post.
|
You can read about it in [Kubernetes Containerd integration goes GA](/blog/2018/05/24/kubernetes-containerd-integration-goes-ga/) blog post.
|
||||||
-->
|
-->
|
||||||
你可以阅读博文
|
你可以阅读博文
|
||||||
[Kubernetes 容器集成功能的正式发布](/zh/blog/2018/05/24/kubernetes-containerd-integration-goes-ga/)
|
[Kubernetes 正式支持集成 Containerd](/zh/blog/2018/05/24/kubernetes-containerd-integration-goes-ga/)。
|
||||||
|
|
||||||
<!-- Dockershim vs. CRI with Containerd -->
|
<!-- Dockershim vs. CRI with Containerd -->
|
||||||
![Dockershim 和 Containerd CRI 的实现对比图](/images/blog/2018-05-24-kubernetes-containerd-integration-goes-ga/cri-containerd.png)
|
![Dockershim 和 Containerd CRI 的实现对比图](/images/blog/2018-05-24-kubernetes-containerd-integration-goes-ga/cri-containerd.png)
|
||||||
|
@ -138,8 +138,8 @@ So any Docker tooling or fancy UI you might have used
|
||||||
before to check on these containers is no longer available.
|
before to check on these containers is no longer available.
|
||||||
-->
|
-->
|
||||||
切换到容器运行时 Containerd 可以消除掉中间环节。
|
切换到容器运行时 Containerd 可以消除掉中间环节。
|
||||||
所有以前遗留的容器可由 Containerd 这类容器运行时来运行和管理,操作体验也和以前一样。
|
所有相同的容器都可由 Containerd 这类容器运行时来运行。
|
||||||
但是现在,由于直接用容器运行时调度容器,所以它们对 Docker 来说是不可见的。
|
但是现在,由于直接用容器运行时调度容器,它们对 Docker 是不可见的。
|
||||||
因此,你以前用来检查这些容器的 Docker 工具或漂亮的 UI 都不再可用。
|
因此,你以前用来检查这些容器的 Docker 工具或漂亮的 UI 都不再可用。
|
||||||
|
|
||||||
<!--
|
<!--
|
||||||
|
@ -156,10 +156,9 @@ the Kubernetes API rather than directly through the container runtime (this advi
|
||||||
for all container runtimes, not only Docker).
|
for all container runtimes, not only Docker).
|
||||||
-->
|
-->
|
||||||
{{< note >}}
|
{{< note >}}
|
||||||
|
如果你在用 Kubernetes 运行工作负载,最好通过 Kubernetes API 停止容器,
|
||||||
如果你用 Kubernetes 运行工作负载,最好通过 Kubernetes API停止容器,而不是通过容器运行时
|
而不是通过容器运行时来停止它们
|
||||||
(此建议适用于所有容器运行时,不仅仅是针对 Docker)。
|
(此建议适用于所有容器运行时,不仅仅是针对 Docker)。
|
||||||
|
|
||||||
{{< /note >}}
|
{{< /note >}}
|
||||||
|
|
||||||
<!--
|
<!--
|
||||||
|
@ -170,4 +169,5 @@ by Kubernetes.
|
||||||
-->
|
-->
|
||||||
你仍然可以下载镜像,或者用 `docker build` 命令创建它们。
|
你仍然可以下载镜像,或者用 `docker build` 命令创建它们。
|
||||||
但用 Docker 创建、下载的镜像,对于容器运行时和 Kubernetes,均不可见。
|
但用 Docker 创建、下载的镜像,对于容器运行时和 Kubernetes,均不可见。
|
||||||
为了在 Kubernetes 中使用,需要把镜像推送(push)到某注册中心。
|
为了在 Kubernetes 中使用,需要把镜像推送(push)到某镜像仓库。
|
||||||
|
|
||||||
|
|
|
@ -46,7 +46,7 @@ as Windows server containers, meaning that the version of the base images does n
|
||||||
to match that of the host. It is, however, recommended that you use the same base image
|
to match that of the host. It is, however, recommended that you use the same base image
|
||||||
version as your Windows Server container workloads to ensure you do not have any unused
|
version as your Windows Server container workloads to ensure you do not have any unused
|
||||||
images taking up space on the node. HostProcess containers also support
|
images taking up space on the node. HostProcess containers also support
|
||||||
[volume mounts](./create-hostprocess-pod#volume-mounts) within the container volume.
|
[volume mounts](#volume-mounts) within the container volume.
|
||||||
-->
|
-->
|
||||||
类似于安装安全补丁、事件日志收集等这类管理性质的任务可以在不需要集群操作员登录到每个
|
类似于安装安全补丁、事件日志收集等这类管理性质的任务可以在不需要集群操作员登录到每个
|
||||||
Windows 节点的前提下执行。HostProcess 容器可以以主机上存在的任何用户账户来运行,
|
Windows 节点的前提下执行。HostProcess 容器可以以主机上存在的任何用户账户来运行,
|
||||||
|
@ -58,7 +58,7 @@ Windows 节点的前提下执行。HostProcess 容器可以以主机上存在的
|
||||||
这意味着基础镜像的版本不必与主机操作系统的版本匹配。
|
这意味着基础镜像的版本不必与主机操作系统的版本匹配。
|
||||||
不过,仍然建议你像使用 Windows 服务器容器负载那样,使用相同的基础镜像版本,
|
不过,仍然建议你像使用 Windows 服务器容器负载那样,使用相同的基础镜像版本,
|
||||||
这样你就不会有一些未使用的镜像占用节点上的存储空间。HostProcess 容器也支持
|
这样你就不会有一些未使用的镜像占用节点上的存储空间。HostProcess 容器也支持
|
||||||
在容器卷内执行[卷挂载](./create-hostprocess-pod#volume-mounts)。
|
在容器卷内执行[卷挂载](#volume-mounts)。
|
||||||
|
|
||||||
<!--
|
<!--
|
||||||
### When should I use a Windows HostProcess container?
|
### When should I use a Windows HostProcess container?
|
||||||
|
@ -130,12 +130,12 @@ These limitations are relevant for Kubernetes v{{< skew currentVersion >}}:
|
||||||
|
|
||||||
<!--
|
<!--
|
||||||
- HostProcess containers require containerd 1.6 or higher
|
- HostProcess containers require containerd 1.6 or higher
|
||||||
{{< glossary_tooltip text="container runtime" term_id="container-runtime" >}}.
|
{{< glossary_tooltip text="container runtime" term_id="container-runtime" >}}.
|
||||||
- HostProcess pods can only contain HostProcess containers. This is a current limitation
|
- HostProcess pods can only contain HostProcess containers. This is a current limitation
|
||||||
of the Windows OS; non-privileged Windows containers cannot share a vNIC with the host IP namespace.
|
of the Windows OS; non-privileged Windows containers cannot share a vNIC with the host IP namespace.
|
||||||
- HostProcess containers run as a process on the host and do not have any degree of
|
- HostProcess containers run as a process on the host and do not have any degree of
|
||||||
isolation other than resource constraints imposed on the HostProcess user account. Neither
|
isolation other than resource constraints imposed on the HostProcess user account. Neither
|
||||||
filesystem or Hyper-V isolation are supported for HostProcess containers.
|
filesystem or Hyper-V isolation are supported for HostProcess containers.
|
||||||
-->
|
-->
|
||||||
- HostProcess 容器需要 containerd 1.6 或更高版本的
|
- HostProcess 容器需要 containerd 1.6 或更高版本的
|
||||||
{{< glossary_tooltip text="容器运行时" term_id="container-runtime" >}}。
|
{{< glossary_tooltip text="容器运行时" term_id="container-runtime" >}}。
|
||||||
|
@ -145,13 +145,14 @@ filesystem or Hyper-V isolation are supported for HostProcess containers.
|
||||||
用户账号所实施的资源约束外,不提供任何形式的隔离。HostProcess 容器不支持文件系统或
|
用户账号所实施的资源约束外,不提供任何形式的隔离。HostProcess 容器不支持文件系统或
|
||||||
Hyper-V 隔离。
|
Hyper-V 隔离。
|
||||||
<!--
|
<!--
|
||||||
- Volume mounts are supported and are mounted under the container volume. See [Volume Mounts](#volume-mounts)
|
- Volume mounts are supported and are mounted under the container volume. See
|
||||||
|
[Volume Mounts](#volume-mounts)
|
||||||
- A limited set of host user accounts are available for HostProcess containers by default.
|
- A limited set of host user accounts are available for HostProcess containers by default.
|
||||||
See [Choosing a User Account](#choosing-a-user-account).
|
See [Choosing a User Account](#choosing-a-user-account).
|
||||||
- Resource limits (disk, memory, cpu count) are supported in the same fashion as processes
|
- Resource limits (disk, memory, cpu count) are supported in the same fashion as processes
|
||||||
on the host.
|
on the host.
|
||||||
- Both Named pipe mounts and Unix domain sockets are **not** supported and should instead
|
- Both Named pipe mounts and Unix domain sockets are **not** supported and should instead
|
||||||
be accessed via their path on the host (e.g. \\\\.\\pipe\\\*)
|
be accessed via their path on the host (e.g. \\\\.\\pipe\\\*)
|
||||||
-->
|
-->
|
||||||
- 卷挂载是被支持的,并且要花在到容器卷下。参见[卷挂载](#volume-mounts)。
|
- 卷挂载是被支持的,并且要花在到容器卷下。参见[卷挂载](#volume-mounts)。
|
||||||
- 默认情况下有一组主机用户账户可供 HostProcess 容器使用。
|
- 默认情况下有一组主机用户账户可供 HostProcess 容器使用。
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
---
|
---
|
||||||
title: 为 Pod 或容器配置安全性上下文
|
title: 为 Pod 或容器配置安全上下文
|
||||||
content_type: task
|
content_type: task
|
||||||
weight: 80
|
weight: 80
|
||||||
---
|
---
|
||||||
|
@ -21,7 +21,8 @@ a Pod or Container. Security context settings include, but are not limited to:
|
||||||
|
|
||||||
* Discretionary Access Control: Permission to access an object, like a file, is based on
|
* Discretionary Access Control: Permission to access an object, like a file, is based on
|
||||||
[user ID (UID) and group ID (GID)](https://wiki.archlinux.org/index.php/users_and_groups).
|
[user ID (UID) and group ID (GID)](https://wiki.archlinux.org/index.php/users_and_groups).
|
||||||
* [Security Enhanced Linux (SELinux)](https://en.wikipedia.org/wiki/Security-Enhanced_Linux): Objects are assigned security labels.
|
* [Security Enhanced Linux (SELinux)](https://en.wikipedia.org/wiki/Security-Enhanced_Linux):
|
||||||
|
Objects are assigned security labels.
|
||||||
* Running as privileged or unprivileged.
|
* Running as privileged or unprivileged.
|
||||||
* [Linux Capabilities](https://linux-audit.com/linux-capabilities-hardening-linux-binaries-by-removing-setuid/):
|
* [Linux Capabilities](https://linux-audit.com/linux-capabilities-hardening-linux-binaries-by-removing-setuid/):
|
||||||
Give a process some privileges, but not all the privileges of the root user.
|
Give a process some privileges, but not all the privileges of the root user.
|
||||||
|
@ -29,8 +30,8 @@ a Pod or Container. Security context settings include, but are not limited to:
|
||||||
安全上下文(Security Context)定义 Pod 或 Container 的特权与访问控制设置。
|
安全上下文(Security Context)定义 Pod 或 Container 的特权与访问控制设置。
|
||||||
安全上下文包括但不限于:
|
安全上下文包括但不限于:
|
||||||
|
|
||||||
* 自主访问控制(Discretionary Access Control):基于
|
* 自主访问控制(Discretionary Access Control):
|
||||||
[用户 ID(UID)和组 ID(GID)](https://wiki.archlinux.org/index.php/users_and_groups).
|
基于[用户 ID(UID)和组 ID(GID)](https://wiki.archlinux.org/index.php/users_and_groups)
|
||||||
来判定对对象(例如文件)的访问权限。
|
来判定对对象(例如文件)的访问权限。
|
||||||
* [安全性增强的 Linux(SELinux)](https://zh.wikipedia.org/wiki/%E5%AE%89%E5%85%A8%E5%A2%9E%E5%BC%BA%E5%BC%8FLinux):
|
* [安全性增强的 Linux(SELinux)](https://zh.wikipedia.org/wiki/%E5%AE%89%E5%85%A8%E5%A2%9E%E5%BC%BA%E5%BC%8FLinux):
|
||||||
为对象赋予安全性标签。
|
为对象赋予安全性标签。
|
||||||
|
@ -38,21 +39,31 @@ a Pod or Container. Security context settings include, but are not limited to:
|
||||||
* [Linux 权能](https://linux-audit.com/linux-capabilities-hardening-linux-binaries-by-removing-setuid/):
|
* [Linux 权能](https://linux-audit.com/linux-capabilities-hardening-linux-binaries-by-removing-setuid/):
|
||||||
为进程赋予 root 用户的部分特权而非全部特权。
|
为进程赋予 root 用户的部分特权而非全部特权。
|
||||||
<!--
|
<!--
|
||||||
* [AppArmor](/docs/tutorials/clusters/apparmor/): Use program profiles to restrict the capabilities of individual programs.
|
* [AppArmor](/docs/tutorials/security/apparmor/):
|
||||||
* [Seccomp](/docs/tutorials/clusters/seccomp/): Filter a process's system calls.
|
Use program profiles to restrict the capabilities of individual programs.
|
||||||
* AllowPrivilegeEscalation: Controls whether a process can gain more
|
* [Seccomp](/docs/tutorials/security/seccomp/): Filter a process's system calls.
|
||||||
privileges than its parent process. This bool directly controls whether the
|
* `allowPrivilegeEscalation`: Controls whether a process can gain more privileges than
|
||||||
|
its parent process. This bool directly controls whether the
|
||||||
[`no_new_privs`](https://www.kernel.org/doc/Documentation/prctl/no_new_privs.txt)
|
[`no_new_privs`](https://www.kernel.org/doc/Documentation/prctl/no_new_privs.txt)
|
||||||
flag gets set on the container process. AllowPrivilegeEscalation is true
|
flag gets set on the container process.
|
||||||
always when the container is: 1) run as Privileged OR 2) has `CAP_SYS_ADMIN`.
|
`allowPrivilegeEscalation` is always true
|
||||||
|
when the container:
|
||||||
|
|
||||||
|
- is run as privileged, or
|
||||||
|
- has `CAP_SYS_ADMIN`
|
||||||
|
|
||||||
* readOnlyRootFilesystem: Mounts the container's root filesystem as read-only.
|
* readOnlyRootFilesystem: Mounts the container's root filesystem as read-only.
|
||||||
-->
|
-->
|
||||||
* [AppArmor](/zh/docs/tutorials/clusters/apparmor/):使用程序框架来限制个别程序的权能。
|
* [AppArmor](/zh/docs/tutorials/security/apparmor/):使用程序配置来限制个别程序的权能。
|
||||||
* [Seccomp](/zh/docs/tutorials/clusters/seccomp/):过滤进程的系统调用。
|
* [Seccomp](/zh/docs/tutorials/security/seccomp/):过滤进程的系统调用。
|
||||||
* AllowPrivilegeEscalation:控制进程是否可以获得超出其父进程的特权。
|
* `allowPrivilegeEscalation`:控制进程是否可以获得超出其父进程的特权。
|
||||||
此布尔值直接控制是否为容器进程设置
|
此布尔值直接控制是否为容器进程设置
|
||||||
[`no_new_privs`](https://www.kernel.org/doc/Documentation/prctl/no_new_privs.txt)标志。
|
[`no_new_privs`](https://www.kernel.org/doc/Documentation/prctl/no_new_privs.txt)标志。
|
||||||
当容器以特权模式运行或者具有 `CAP_SYS_ADMIN` 权能时,AllowPrivilegeEscalation 总是为 true。
|
当容器满足一下条件之一时,`allowPrivilegeEscalation` 总是为 true:
|
||||||
|
|
||||||
|
- 以特权模式运行,或者
|
||||||
|
- 具有 `CAP_SYS_ADMIN` 权能
|
||||||
|
|
||||||
* readOnlyRootFilesystem:以只读方式加载容器的根文件系统。
|
* readOnlyRootFilesystem:以只读方式加载容器的根文件系统。
|
||||||
|
|
||||||
<!--
|
<!--
|
||||||
|
@ -213,19 +224,24 @@ Run the following command:
|
||||||
id
|
id
|
||||||
```
|
```
|
||||||
|
|
||||||
输出为:
|
<!--
|
||||||
```
|
The output is similar to this:
|
||||||
|
-->
|
||||||
|
输出类似于:
|
||||||
|
|
||||||
|
```none
|
||||||
uid=1000 gid=3000 groups=2000
|
uid=1000 gid=3000 groups=2000
|
||||||
```
|
```
|
||||||
|
|
||||||
<!--
|
<!--
|
||||||
You will see that gid is 3000 which is same as `runAsGroup` field. If the `runAsGroup` was omitted the gid would
|
From the output, you can see that `gid` is 3000 which is same as the `runAsGroup` field.
|
||||||
remain as 0(root) and the process will be able to interact with files that are owned by root(0) group and that have
|
If the `runAsGroup` was omitted, the `gid` would remain as 0 (root) and the process will
|
||||||
the required group permissions for root(0) group.
|
be able to interact with files that are owned by the root(0) group and groups that have
|
||||||
|
the required group permissions for the root (0) group.
|
||||||
|
|
||||||
Exit your shell:
|
Exit your shell:
|
||||||
-->
|
-->
|
||||||
你会看到 `gid` 值为 3000,也就是 `runAsGroup` 字段的值。
|
从输出中你会看到 `gid` 值为 3000,也就是 `runAsGroup` 字段的值。
|
||||||
如果 `runAsGroup` 被忽略,则 `gid` 会取值 0(root),而进程就能够与 root
|
如果 `runAsGroup` 被忽略,则 `gid` 会取值 0(root),而进程就能够与 root
|
||||||
用户组所拥有以及要求 root 用户组访问权限的文件交互。
|
用户组所拥有以及要求 root 用户组访问权限的文件交互。
|
||||||
|
|
||||||
|
@ -251,18 +267,21 @@ slowing Pod startup. You can use the `fsGroupChangePolicy` field inside a `secur
|
||||||
to control the way that Kubernetes checks and manages ownership and permissions
|
to control the way that Kubernetes checks and manages ownership and permissions
|
||||||
for a volume.
|
for a volume.
|
||||||
-->
|
-->
|
||||||
默认情况下,Kubernetes 在挂载一个卷时,会递归地更改每个卷中的内容的属主和访问权限,使之与 Pod
|
默认情况下,Kubernetes 在挂载一个卷时,会递归地更改每个卷中的内容的属主和访问权限,
|
||||||
的 `securityContext` 中指定的 `fsGroup` 匹配。
|
使之与 Pod 的 `securityContext` 中指定的 `fsGroup` 匹配。
|
||||||
对于较大的数据卷,检查和变更属主与访问权限可能会花费很长时间,降低 Pod 启动速度。
|
对于较大的数据卷,检查和变更属主与访问权限可能会花费很长时间,降低 Pod 启动速度。
|
||||||
你可以在 `securityContext` 中使用 `fsGroupChangePolicy` 字段来控制 Kubernetes
|
你可以在 `securityContext` 中使用 `fsGroupChangePolicy` 字段来控制 Kubernetes
|
||||||
检查和管理卷属主和访问权限的方式。
|
检查和管理卷属主和访问权限的方式。
|
||||||
|
|
||||||
<!--
|
<!--
|
||||||
**fsGroupChangePolicy** - `fsGroupChangePolicy` defines behavior for changing ownership and permission of the volume
|
**fsGroupChangePolicy** - `fsGroupChangePolicy` defines behavior for changing ownership
|
||||||
before being exposed inside a Pod. This field only applies to volume types that support
|
and permission of the volume before being exposed inside a Pod.
|
||||||
`fsGroup` controlled ownership and permissions. This field has two possible values:
|
This field only applies to volume types that support `fsGroup` controlled ownership and permissions.
|
||||||
|
This field has two possible values:
|
||||||
|
|
||||||
* _OnRootMismatch_: Only change permissions and ownership if permission and ownership of root directory does not match with expected permissions of the volume. This could help shorten the time it takes to change ownership and permission of a volume.
|
* _OnRootMismatch_: Only change permissions and ownership if permission and ownership of
|
||||||
|
root directory does not match with expected permissions of the volume.
|
||||||
|
This could help shorten the time it takes to change ownership and permission of a volume.
|
||||||
* _Always_: Always change permission and ownership of the volume when volume is mounted.
|
* _Always_: Always change permission and ownership of the volume when volume is mounted.
|
||||||
|
|
||||||
For example:
|
For example:
|
||||||
|
@ -293,7 +312,7 @@ This field has no effect on ephemeral volume types such as
|
||||||
and [`emptydir`](/docs/concepts/storage/volumes/#emptydir).
|
and [`emptydir`](/docs/concepts/storage/volumes/#emptydir).
|
||||||
-->
|
-->
|
||||||
{{< note >}}
|
{{< note >}}
|
||||||
此字段对于[`secret`](/zh/docs/concepts/storage/volumes/#secret)、
|
此字段对于 [`secret`](/zh/docs/concepts/storage/volumes/#secret)、
|
||||||
[`configMap`](/zh/docs/concepts/storage/volumes/#configmap)
|
[`configMap`](/zh/docs/concepts/storage/volumes/#configmap)
|
||||||
和 [`emptydir`](/zh/docs/concepts/storage/volumes/#emptydir)
|
和 [`emptydir`](/zh/docs/concepts/storage/volumes/#emptydir)
|
||||||
这类临时性存储无效。
|
这类临时性存储无效。
|
||||||
|
@ -316,23 +335,24 @@ ownership and permission change, `fsGroupChangePolicy` does not take effect, and
|
||||||
as specified by CSI, the driver is expected to mount the volume with the
|
as specified by CSI, the driver is expected to mount the volume with the
|
||||||
provided `fsGroup`, resulting in a volume that is readable/writable by the
|
provided `fsGroup`, resulting in a volume that is readable/writable by the
|
||||||
`fsGroup`.
|
`fsGroup`.
|
||||||
|
-->
|
||||||
|
如果你部署了一个[容器存储接口 (CSI)](https://github.com/container-storage-interface/spec/blob/master/spec.md)
|
||||||
|
驱动,而该驱动支持 `VOLUME_MOUNT_GROUP` `NodeServiceCapability`,
|
||||||
|
在 `securityContext` 中指定 `fsGroup` 来设置文件所有权和权限的过程将由 CSI
|
||||||
|
驱动而不是 Kubernetes 来执行,前提是 Kubernetes 的 `DelegateFSGroupToCSIDriver`
|
||||||
|
特性门控已启用。在这种情况下,由于 Kubernetes 不执行任何所有权和权限更改,
|
||||||
|
`fsGroupChangePolicy` 不会生效,并且按照 CSI 的规定,CSI 驱动应该使用所指定的
|
||||||
|
`fsGroup` 来挂载卷,从而生成了一个对 `fsGroup` 可读/可写的卷.
|
||||||
|
|
||||||
|
<!--
|
||||||
Please refer to the [KEP](https://github.com/gnufied/enhancements/blob/master/keps/sig-storage/2317-fsgroup-on-mount/README.md)
|
Please refer to the [KEP](https://github.com/gnufied/enhancements/blob/master/keps/sig-storage/2317-fsgroup-on-mount/README.md)
|
||||||
and the description of the `VolumeCapability.MountVolume.volume_mount_group`
|
and the description of the `VolumeCapability.MountVolume.volume_mount_group`
|
||||||
field in the [CSI spec](https://github.com/container-storage-interface/spec/blob/master/spec.md#createvolume)
|
field in the [CSI spec](https://github.com/container-storage-interface/spec/blob/master/spec.md#createvolume)
|
||||||
for more information.
|
for more information.
|
||||||
-->
|
-->
|
||||||
如果你部署了一个[容器存储接口 (CSI)](https://github.com/container-storage-interface/spec/blob/master/spec.md)
|
|
||||||
驱动支持 `VOLUME_MOUNT_GROUP` `NodeServiceCapability`,
|
|
||||||
在 `securityContext` 中指定 `fsGroup` 来设置文件所有权和权限的过程将由 CSI 驱动
|
|
||||||
而不是 Kubernetes 来执行,前提是 Kubernetes 的 `DelegateFSGroupToCSIDriver`
|
|
||||||
特性门控已启用。在这种情况下,由于 Kubernetes 不执行任何
|
|
||||||
所有权和权限更改,`fsGroupChangePolicy` 不会生效,并且
|
|
||||||
按照 CSI 的规定,CSI 驱动应该使用所指定的 `fsGroup` 来挂载卷,从而生成了一个对 `fsGroup` 可读/可写的卷.
|
|
||||||
|
|
||||||
更多的信息请参考 [KEP](https://github.com/gnufied/enhancements/blob/master/keps/sig-storage/2317-fsgroup-on-mount/README.md)
|
更多的信息请参考 [KEP](https://github.com/gnufied/enhancements/blob/master/keps/sig-storage/2317-fsgroup-on-mount/README.md)
|
||||||
和 [CSI 规范](https://github.com/container-storage-interface/spec/blob/master/spec.md#createvolume) 中的字
|
和 [CSI 规范](https://github.com/container-storage-interface/spec/blob/master/spec.md#createvolume)
|
||||||
段 `VolumeCapability.MountVolume.volume_mount_group` 的描述 。
|
中的字段 `VolumeCapability.MountVolume.volume_mount_group` 的描述。
|
||||||
|
|
||||||
<!--
|
<!--
|
||||||
## Set the security context for a Container
|
## Set the security context for a Container
|
||||||
|
@ -352,9 +372,8 @@ and the Container have a `securityContext` field:
|
||||||
若要为 Container 设置安全性配置,可以在 Container 清单中包含 `securityContext`
|
若要为 Container 设置安全性配置,可以在 Container 清单中包含 `securityContext`
|
||||||
字段。`securityContext` 字段的取值是一个
|
字段。`securityContext` 字段的取值是一个
|
||||||
[SecurityContext](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#securitycontext-v1-core)
|
[SecurityContext](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#securitycontext-v1-core)
|
||||||
对象。你为 Container 设置的安全性配置仅适用于该容器本身,并且所指定的设置
|
对象。你为 Container 设置的安全性配置仅适用于该容器本身,并且所指定的设置在与
|
||||||
在与 Pod 层面设置的内容发生重叠时,会重载后者。Container 层面的设置不会影响
|
Pod 层面设置的内容发生重叠时,会重载后者。Container 层面的设置不会影响到 Pod 的卷。
|
||||||
到 Pod 的卷。
|
|
||||||
|
|
||||||
下面是一个 Pod 的配置文件,其中包含一个 Container。Pod 和 Container 都有
|
下面是一个 Pod 的配置文件,其中包含一个 Container。Pod 和 Container 都有
|
||||||
`securityContext` 字段:
|
`securityContext` 字段:
|
||||||
|
@ -402,7 +421,7 @@ The output shows that the processes are running as user 2000. This is the value
|
||||||
of `runAsUser` specified for the Container. It overrides the value 1000 that is
|
of `runAsUser` specified for the Container. It overrides the value 1000 that is
|
||||||
specified for the Pod.
|
specified for the Pod.
|
||||||
-->
|
-->
|
||||||
输出显示进程以用户 2000 账号运行。该值是在 Container 的 `runAsUser` 中设置的。
|
输出显示进程以用户 2000 运行。该值是在 Container 的 `runAsUser` 中设置的。
|
||||||
该设置值重载了 Pod 层面所设置的值 1000。
|
该设置值重载了 Pod 层面所设置的值 1000。
|
||||||
|
|
||||||
```
|
```
|
||||||
|
@ -434,12 +453,12 @@ Here is configuration file that does not add or remove any Container capabilitie
|
||||||
-->
|
-->
|
||||||
## 为 Container 设置权能 {#set-capabilities-for-a-container}
|
## 为 Container 设置权能 {#set-capabilities-for-a-container}
|
||||||
|
|
||||||
使用 [Linux 权能](https://man7.org/linux/man-pages/man7/capabilities.7.html),你可以
|
使用 [Linux 权能](https://man7.org/linux/man-pages/man7/capabilities.7.html),
|
||||||
赋予进程 root 用户所拥有的某些特权,但不必赋予其全部特权。
|
你可以赋予进程 root 用户所拥有的某些特权,但不必赋予其全部特权。
|
||||||
要为 Container 添加或移除 Linux 权能,可以在 Container 清单的 `securityContext` 节
|
要为 Container 添加或移除 Linux 权能,可以在 Container 清单的 `securityContext`
|
||||||
包含 `capabilities` 字段。
|
节包含 `capabilities` 字段。
|
||||||
|
|
||||||
首先,查看不包含 `capabilities` 字段时候会发生什么。
|
首先,看一下不包含 `capabilities` 字段时候会发生什么。
|
||||||
下面是一个配置文件,其中没有添加或移除容器的权能:
|
下面是一个配置文件,其中没有添加或移除容器的权能:
|
||||||
|
|
||||||
{{< codenew file="pods/security/security-context-3.yaml" >}}
|
{{< codenew file="pods/security/security-context-3.yaml" >}}
|
||||||
|
@ -598,12 +617,15 @@ for definitions of the capability constants.
|
||||||
了解权能常数的定义。
|
了解权能常数的定义。
|
||||||
|
|
||||||
<!--
|
<!--
|
||||||
Linux capability constants have the form `CAP_XXX`. But when you list capabilities in your Container manifest, you must omit the `CAP_` portion of the constant. For example, to add `CAP_SYS_TIME`, include `SYS_TIME` in your list of capabilities.
|
Linux capability constants have the form `CAP_XXX`.
|
||||||
|
But when you list capabilities in your Container manifest, you must
|
||||||
|
omit the `CAP_` portion of the constant.
|
||||||
|
For example, to add `CAP_SYS_TIME`, include `SYS_TIME` in your list of capabilities.
|
||||||
-->
|
-->
|
||||||
{{< note >}}
|
{{< note >}}
|
||||||
Linux 权能常数定义的形式为 `CAP_XXX`。但是你在 Container 清单中列举权能时,
|
Linux 权能常数定义的形式为 `CAP_XXX`。但是你在 Container 清单中列举权能时,
|
||||||
要将权能名称中的 `CAP_` 部分去掉。例如,要添加 `CAP_SYS_TIME`,可在权能
|
要将权能名称中的 `CAP_` 部分去掉。例如,要添加 `CAP_SYS_TIME`,
|
||||||
列表中添加 `SYS_TIME`。
|
可在权能列表中添加 `SYS_TIME`。
|
||||||
{{< /note >}}
|
{{< /note >}}
|
||||||
|
|
||||||
<!--
|
<!--
|
||||||
|
@ -623,18 +645,18 @@ flag).
|
||||||
Here is an example that sets the Seccomp profile to the node's container runtime
|
Here is an example that sets the Seccomp profile to the node's container runtime
|
||||||
default profile:
|
default profile:
|
||||||
-->
|
-->
|
||||||
## 为容器设置 Seccomp 样板
|
## 为容器设置 Seccomp 配置
|
||||||
|
|
||||||
若要为容器设置 Seccomp 样板(Profile),可在你的 Pod 或 Container 清单的
|
若要为容器设置 Seccomp 配置(Profile),可在你的 Pod 或 Container 清单的
|
||||||
`securityContext` 节中包含 `seccompProfile` 字段。该字段是一个
|
`securityContext` 节中包含 `seccompProfile` 字段。该字段是一个
|
||||||
[SeccompProfile](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#seccompprofile-v1-core)
|
[SeccompProfile](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#seccompprofile-v1-core)
|
||||||
对象,包含 `type` 和 `localhostProfile` 属性。
|
对象,包含 `type` 和 `localhostProfile` 属性。
|
||||||
`type` 的合法选项包括 `RuntimeDefault`、`Unconfined` 和 `Localhost`。
|
`type` 的合法选项包括 `RuntimeDefault`、`Unconfined` 和 `Localhost`。
|
||||||
`localhostProfile` 只能在 `type: Localhost` 配置下才需要设置。
|
`localhostProfile` 只能在 `type: Localhost` 配置下才可以设置。
|
||||||
该字段标明节点上预先配置的样板的路径,路径是相对于 kubelet 所配置的
|
该字段标明节点上预先设定的配置的路径,路径是相对于 kubelet 所配置的
|
||||||
Seccomp 样板路径(使用 `--root-dir` 配置)而言的。
|
Seccomp 配置路径(使用 `--root-dir` 设置)而言的。
|
||||||
|
|
||||||
下面是一个例子,设置容器使用节点上容器运行时的默认样板作为 Seccomp 样板:
|
下面是一个例子,设置容器使用节点上容器运行时的默认配置作为 Seccomp 配置:
|
||||||
|
|
||||||
```yaml
|
```yaml
|
||||||
...
|
...
|
||||||
|
@ -704,15 +726,15 @@ Pod 的安全上下文适用于 Pod 中的容器,也适用于 Pod 所挂载的
|
||||||
|
|
||||||
<!--
|
<!--
|
||||||
* `fsGroup`: Volumes that support ownership management are modified to be owned
|
* `fsGroup`: Volumes that support ownership management are modified to be owned
|
||||||
and writable by the GID specified in `fsGroup`. See the
|
and writable by the GID specified in `fsGroup`. See the
|
||||||
[Ownership Management design document](https://git.k8s.io/community/contributors/design-proposals/storage/volume-ownership-management.md)
|
[Ownership Management design document](https://git.k8s.io/community/contributors/design-proposals/storage/volume-ownership-management.md)
|
||||||
for more details.
|
for more details.
|
||||||
|
|
||||||
* `seLinuxOptions`: Volumes that support SELinux labeling are relabeled to be accessible
|
* `seLinuxOptions`: Volumes that support SELinux labeling are relabeled to be accessible
|
||||||
by the label specified under `seLinuxOptions`. Usually you only
|
by the label specified under `seLinuxOptions`. Usually you only
|
||||||
need to set the `level` section. This sets the
|
need to set the `level` section. This sets the
|
||||||
[Multi-Category Security (MCS)](https://selinuxproject.org/page/NB_MLS)
|
[Multi-Category Security (MCS)](https://selinuxproject.org/page/NB_MLS)
|
||||||
label given to all Containers in the Pod as well as the Volumes.
|
label given to all Containers in the Pod as well as the Volumes.
|
||||||
-->
|
-->
|
||||||
* `fsGroup`:支持属主管理的卷会被修改,将其属主变更为 `fsGroup` 所指定的 GID,
|
* `fsGroup`:支持属主管理的卷会被修改,将其属主变更为 `fsGroup` 所指定的 GID,
|
||||||
并且对该 GID 可写。进一步的细节可参阅
|
并且对该 GID 可写。进一步的细节可参阅
|
||||||
|
@ -763,9 +785,9 @@ kubectl delete pod security-context-demo-4
|
||||||
-->
|
-->
|
||||||
* [PodSecurityContext](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#podsecuritycontext-v1-core) API 定义
|
* [PodSecurityContext](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#podsecuritycontext-v1-core) API 定义
|
||||||
* [SecurityContext](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#securitycontext-v1-core) API 定义
|
* [SecurityContext](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#securitycontext-v1-core) API 定义
|
||||||
* [使用最新的安全性增强来调优 Docker](https://github.com/containerd/containerd/blob/main/docs/cri/config.md)
|
* [使用最新的安全性增强来调优 Docker(英文)](https://github.com/containerd/containerd/blob/main/docs/cri/config.md)
|
||||||
* [安全性上下文的设计文档](https://git.k8s.io/community/contributors/design-proposals/auth/security_context.md)
|
* [安全上下文的设计文档(英文)](https://git.k8s.io/community/contributors/design-proposals/auth/security_context.md)
|
||||||
* [属主管理的设计文档](https://git.k8s.io/community/contributors/design-proposals/storage/volume-ownership-management.md)
|
* [属主管理的设计文档(英文)](https://git.k8s.io/community/contributors/design-proposals/storage/volume-ownership-management.md)
|
||||||
* [Pod 安全策略](/zh/docs/concepts/policy/pod-security-policy/)
|
* [Pod 安全策略](/zh/docs/concepts/policy/pod-security-policy/)
|
||||||
* [AllowPrivilegeEscalation 的设计文档](https://git.k8s.io/community/contributors/design-proposals/auth/no-new-privs.md)
|
* [AllowPrivilegeEscalation 的设计文档(英文)](https://git.k8s.io/community/contributors/design-proposals/auth/no-new-privs.md)
|
||||||
|
|
||||||
|
|
|
@ -46,8 +46,7 @@ with your new arguments.
|
||||||
如果在配置文件中设置了容器启动时要执行的命令及其参数,那么容器镜像中自带的命令与参数将会被覆盖而不再执行。如果配置文件中只是设置了参数,却没有设置其对应的命令,那么容器镜像中自带的命令会使用该新参数作为其执行时的参数。
|
如果在配置文件中设置了容器启动时要执行的命令及其参数,那么容器镜像中自带的命令与参数将会被覆盖而不再执行。如果配置文件中只是设置了参数,却没有设置其对应的命令,那么容器镜像中自带的命令会使用该新参数作为其执行时的参数。
|
||||||
|
|
||||||
<!--
|
<!--
|
||||||
The `command` field corresponds to `entrypoint` in some container
|
The `command` field corresponds to `entrypoint` in some container runtimes.
|
||||||
runtimes. Refer to the [Notes](#notes) below.
|
|
||||||
-->
|
-->
|
||||||
{{< note >}}
|
{{< note >}}
|
||||||
在有些容器运行时中,`command` 字段对应 `entrypoint`,请参阅下面的
|
在有些容器运行时中,`command` 字段对应 `entrypoint`,请参阅下面的
|
||||||
|
@ -161,73 +160,6 @@ command: ["/bin/sh"]
|
||||||
args: ["-c", "while true; do echo hello; sleep 10;done"]
|
args: ["-c", "while true; do echo hello; sleep 10;done"]
|
||||||
```
|
```
|
||||||
|
|
||||||
<!--
|
|
||||||
## Notes
|
|
||||||
|
|
||||||
This table summarizes the field names used by Docker and Kubernetes.
|
|
||||||
|
|
||||||
| Description | Docker field name | Kubernetes field name |
|
|
||||||
|----------------------------------------|------------------------|---------------------|
|
|
||||||
| The command run by the container | Entrypoint | command |
|
|
||||||
| The arguments passed to the command | Cmd | args |
|
|
||||||
-->
|
|
||||||
## 说明事项 {#notes}
|
|
||||||
|
|
||||||
下表给出了 Docker 与 Kubernetes 中对应的字段名称。
|
|
||||||
|
|
||||||
| 描述 | Docker 字段名称 | Kubernetes 字段名称 |
|
|
||||||
|--------------------|--------------------|-----------------------|
|
|
||||||
| 容器执行的命令 | Entrypoint | command |
|
|
||||||
| 传给命令的参数 | Cmd | args |
|
|
||||||
|
|
||||||
<!--
|
|
||||||
When you override the default Entrypoint and Cmd, these rules apply:
|
|
||||||
|
|
||||||
* If you do not supply `command` or `args` for a Container, the defaults defined
|
|
||||||
in the Docker image are used.
|
|
||||||
|
|
||||||
* If you supply a `command` but no `args` for a Container, only the supplied
|
|
||||||
`command` is used. The default EntryPoint and the default Cmd defined in the Docker
|
|
||||||
image are ignored.
|
|
||||||
|
|
||||||
* If you supply only `args` for a Container, the default Entrypoint defined in
|
|
||||||
the Docker image is run with the `args` that you supplied.
|
|
||||||
|
|
||||||
* If you supply a `command` and `args`, the default Entrypoint and the default
|
|
||||||
Cmd defined in the Docker image are ignored. Your `command` is run with your
|
|
||||||
`args`.
|
|
||||||
-->
|
|
||||||
如果要覆盖默认的 Entrypoint 与 Cmd,需要遵循如下规则:
|
|
||||||
|
|
||||||
* 如果在容器配置中没有设置 `command` 或者 `args`,那么将使用 Docker 镜像自带的命令及其参数。
|
|
||||||
|
|
||||||
* 如果在容器配置中只设置了 `command` 但是没有设置 `args`,那么容器启动时只会执行该命令,
|
|
||||||
Docker 镜像中自带的命令及其参数会被忽略。
|
|
||||||
|
|
||||||
* 如果在容器配置中只设置了 `args`,那么 Docker 镜像中自带的命令会使用该新参数作为其执行时的参数。
|
|
||||||
|
|
||||||
* 如果在容器配置中同时设置了 `command` 与 `args`,那么 Docker 镜像中自带的命令及其参数会被忽略。
|
|
||||||
容器启动时只会执行配置中设置的命令,并使用配置中设置的参数作为命令的参数。
|
|
||||||
|
|
||||||
<!--
|
|
||||||
Here are some examples:
|
|
||||||
|
|
||||||
| Image Entrypoint | Image Cmd | Container command | Container args | Command run |
|
|
||||||
|--------------------|------------------|---------------------|--------------------|------------------|
|
|
||||||
| `[/ep-1]` | `[foo bar]` | <not set> | <not set> | `[ep-1 foo bar]` |
|
|
||||||
| `[/ep-1]` | `[foo bar]` | `[/ep-2]` | <not set> | `[ep-2]` |
|
|
||||||
| `[/ep-1]` | `[foo bar]` | <not set> | `[zoo boo]` | `[ep-1 zoo boo]` |
|
|
||||||
| `[/ep-1]` | `[foo bar]` | `[/ep-2]` | `[zoo boo]` | `[ep-2 zoo boo]` |
|
|
||||||
-->
|
|
||||||
下面是一些例子:
|
|
||||||
|
|
||||||
| 镜像 Entrypoint | 镜像 Cmd | 容器 command | 容器 args | 命令执行 |
|
|
||||||
|--------------------|------------------|---------------------|--------------------|------------------|
|
|
||||||
| `[/ep-1]` | `[foo bar]` | <not set> | <not set> | `[ep-1 foo bar]` |
|
|
||||||
| `[/ep-1]` | `[foo bar]` | `[/ep-2]` | <not set> | `[ep-2]` |
|
|
||||||
| `[/ep-1]` | `[foo bar]` | <not set> | `[zoo boo]` | `[ep-1 zoo boo]` |
|
|
||||||
| `[/ep-1]` | `[foo bar]` | `[/ep-2]` | `[zoo boo]` | `[ep-2 zoo boo]` |
|
|
||||||
|
|
||||||
|
|
||||||
## {{% heading "whatsnext" %}}
|
## {{% heading "whatsnext" %}}
|
||||||
|
|
||||||
|
|
|
@ -30,6 +30,7 @@ Cron jobs can also schedule individual tasks for a specific time, such as if you
|
||||||
-->
|
-->
|
||||||
|
|
||||||
在Kubernetes v1.21 版本中,CronJob 被提升为通用版本。如果你使用的是旧版本的 Kubernetes,请参考你正在使用的 Kubernetes 版本的文档,这样你就能看到准确的信息。旧的 Kubernetes 版本不支持`batch/v1` CronJob API。
|
在Kubernetes v1.21 版本中,CronJob 被提升为通用版本。如果你使用的是旧版本的 Kubernetes,请参考你正在使用的 Kubernetes 版本的文档,这样你就能看到准确的信息。旧的 Kubernetes 版本不支持`batch/v1` CronJob API。
|
||||||
|
|
||||||
你可以利用 [CronJobs](/zh/docs/concepts/workloads/controllers/cron-jobs) 执行基于时间调度的任务。这些自动化任务和 Linux 或者 Unix 系统的 [Cron](https://en.wikipedia.org/wiki/Cron) 任务类似。
|
你可以利用 [CronJobs](/zh/docs/concepts/workloads/controllers/cron-jobs) 执行基于时间调度的任务。这些自动化任务和 Linux 或者 Unix 系统的 [Cron](https://en.wikipedia.org/wiki/Cron) 任务类似。
|
||||||
|
|
||||||
CronJobs 在创建周期性以及重复性的任务时很有帮助,例如执行备份操作或者发送邮件。CronJobs 也可以在特定时间调度单个任务,例如你想调度低活跃周期的任务。
|
CronJobs 在创建周期性以及重复性的任务时很有帮助,例如执行备份操作或者发送邮件。CronJobs 也可以在特定时间调度单个任务,例如你想调度低活跃周期的任务。
|
||||||
|
@ -43,6 +44,7 @@ For more limitations, see [CronJobs](/docs/concepts/workloads/controllers/cron-j
|
||||||
CronJobs 有一些限制和特点。
|
CronJobs 有一些限制和特点。
|
||||||
例如,在特定状况下,同一个 CronJob 可以创建多个任务。
|
例如,在特定状况下,同一个 CronJob 可以创建多个任务。
|
||||||
因此,任务应该是幂等的。
|
因此,任务应该是幂等的。
|
||||||
|
|
||||||
查看更多限制,请参考 [CronJobs](/zh/docs/concepts/workloads/controllers/cron-jobs)。
|
查看更多限制,请参考 [CronJobs](/zh/docs/concepts/workloads/controllers/cron-jobs)。
|
||||||
|
|
||||||
## {{% heading "prerequisites" %}}
|
## {{% heading "prerequisites" %}}
|
||||||
|
@ -134,16 +136,14 @@ hello */1 * * * * False 0 50s 75s
|
||||||
```
|
```
|
||||||
|
|
||||||
<!--
|
<!--
|
||||||
You should see that the cron job "hello" successfully scheduled a job at the time specified in `LAST-SCHEDULE`.
|
You should see that the cron job `hello` successfully scheduled a job at the time specified in `LAST SCHEDULE`. There are currently 0 active jobs, meaning that the job has completed or failed.
|
||||||
There are currently 0 active jobs, meaning that the job has completed or failed.
|
|
||||||
|
|
||||||
Now, find the pods that the last scheduled job created and view the standard output of one of the pods.
|
Now, find the pods that the last scheduled job created and view the standard output of one of the pods.
|
||||||
Note that the job name and pod name are different.
|
|
||||||
-->
|
-->
|
||||||
你应该能看到 “hello” CronJob 在 `LAST-SCHEDULE` 声明的时间点成功的调度了一次任务。
|
你应该能看到 `hello` CronJob 在 `LAST SCHEDULE` 声明的时间点成功的调度了一次任务。
|
||||||
有 0 个活跃的任务意味着任务执行完毕或者执行失败。
|
有 0 个活跃的任务意味着任务执行完毕或者执行失败。
|
||||||
|
|
||||||
现在,找到最后一次调度任务创建的 Pod 并查看一个 Pod 的标准输出。请注意任务名称和 Pod 名称是不同的。
|
现在,找到最后一次调度任务创建的 Pod 并查看一个 Pod 的标准输出。
|
||||||
|
|
||||||
<!--
|
<!--
|
||||||
The job name and pod name are different.
|
The job name and pod name are different.
|
||||||
|
@ -165,6 +165,10 @@ Show pod log:
|
||||||
```shell
|
```shell
|
||||||
kubectl logs $pods
|
kubectl logs $pods
|
||||||
```
|
```
|
||||||
|
<!--
|
||||||
|
The output is similar to this:
|
||||||
|
-->
|
||||||
|
输出与此类似:
|
||||||
|
|
||||||
```
|
```
|
||||||
Fri Feb 22 11:02:09 UTC 2019
|
Fri Feb 22 11:02:09 UTC 2019
|
||||||
|
@ -359,6 +363,6 @@ By default, they are set to 3 and 1 respectively. Setting a limit to `0` corres
|
||||||
|
|
||||||
`.spec.successfulJobsHistoryLimit` 和 `.spec.failedJobsHistoryLimit`是可选的。
|
`.spec.successfulJobsHistoryLimit` 和 `.spec.failedJobsHistoryLimit`是可选的。
|
||||||
这两个字段指定应保留多少已完成和失败的任务。
|
这两个字段指定应保留多少已完成和失败的任务。
|
||||||
默认设置为3和1。限制设置为0代表相应类型的任务完成后不会保留。
|
默认设置为3和1。限制设置为 `0` 代表相应类型的任务完成后不会保留。
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -279,13 +279,13 @@ First, copy and paste the following template of a Job object, into a file called
|
||||||
首先,复制下面的 Job 对象模板到一个名为 `job.yaml.jinja2` 的文件。
|
首先,复制下面的 Job 对象模板到一个名为 `job.yaml.jinja2` 的文件。
|
||||||
|
|
||||||
```liquid
|
```liquid
|
||||||
{%- set params = [{ "name": "apple", "url": "http://dbpedia.org/resource/Apple", },
|
{% set params = [{ "name": "apple", "url": "http://dbpedia.org/resource/Apple", },
|
||||||
{ "name": "banana", "url": "http://dbpedia.org/resource/Banana", },
|
{ "name": "banana", "url": "http://dbpedia.org/resource/Banana", },
|
||||||
{ "name": "cherry", "url": "http://dbpedia.org/resource/Cherry" }]
|
{ "name": "cherry", "url": "http://dbpedia.org/resource/Cherry" }]
|
||||||
%}
|
%}
|
||||||
{%- for p in params %}
|
{% for p in params %}
|
||||||
{%- set name = p["name"] %}
|
{% set name = p["name"] %}
|
||||||
{%- set url = p["url"] %}
|
{% set url = p["url"] %}
|
||||||
---
|
---
|
||||||
apiVersion: batch/v1
|
apiVersion: batch/v1
|
||||||
kind: Job
|
kind: Job
|
||||||
|
@ -305,7 +305,7 @@ spec:
|
||||||
image: busybox
|
image: busybox
|
||||||
command: ["sh", "-c", "echo Processing URL {{ url }} && sleep 5"]
|
command: ["sh", "-c", "echo Processing URL {{ url }} && sleep 5"]
|
||||||
restartPolicy: Never
|
restartPolicy: Never
|
||||||
{%- endfor %}
|
{% endfor %}
|
||||||
```
|
```
|
||||||
|
|
||||||
<!--
|
<!--
|
||||||
|
|
|
@ -66,13 +66,15 @@ You may want to set
|
||||||
(default to 1),
|
(default to 1),
|
||||||
[`.spec.minReadySeconds`](/docs/concepts/workloads/controllers/deployment/#min-ready-seconds)
|
[`.spec.minReadySeconds`](/docs/concepts/workloads/controllers/deployment/#min-ready-seconds)
|
||||||
(default to 0) and
|
(default to 0) and
|
||||||
[`.spec.maxSurge`](https://kubernetes.io/docs/concepts/workloads/controllers/deployment/#max-surge)
|
[`.spec.updateStrategy.rollingUpdate.maxSurge`](/docs/reference/kubernetes-api/workload-resources/daemon-set-v1/#DaemonSetSpec)
|
||||||
(a beta feature and defaults to 25%) as well.
|
(a beta feature and defaults to 0) as well.
|
||||||
|
|
||||||
-->
|
-->
|
||||||
你可能想设置
|
你可能想设置
|
||||||
[`.spec.updateStrategy.rollingUpdate.maxUnavailable`](/zh/docs/concepts/workloads/controllers/deployment/#max-unavailable) (默认为 1),
|
[`.spec.updateStrategy.rollingUpdate.maxUnavailable`](/zh/docs/concepts/workloads/controllers/deployment/#max-unavailable) (默认为 1),
|
||||||
[`.spec.minReadySeconds`](/zh/docs/concepts/workloads/controllers/deployment/#min-ready-seconds) (默认为 0) 和
|
[`.spec.minReadySeconds`](/zh/docs/concepts/workloads/controllers/deployment/#min-ready-seconds) (默认为 0) 和
|
||||||
[`.spec.maxSurge`](/zh/docs/concepts/workloads/controllers/deployment/#max-surge) (一种 Beta 阶段的特性,默认为 25%)
|
[`.spec.updateStrategy.rollingUpdate.maxSurge`](/zh/docs/reference/kubernetes-api/workload-resources/daemon-set-v1/#DaemonSetSpec)
|
||||||
|
(一种 Beta 阶段的特性,默认为 0)。
|
||||||
|
|
||||||
<!--
|
<!--
|
||||||
### Creating a DaemonSet with `RollingUpdate` update strategy
|
### Creating a DaemonSet with `RollingUpdate` update strategy
|
||||||
|
|
|
@ -254,7 +254,7 @@ Patch your Deployment:
|
||||||
对 Deployment 执行 patch 操作:
|
对 Deployment 执行 patch 操作:
|
||||||
|
|
||||||
```
|
```
|
||||||
kubectl patch deployment patch-demo --patch "$(cat patch-file-containers.yaml)"
|
kubectl patch deployment patch-demo --patch-file patch-file-tolerations.yaml"
|
||||||
```
|
```
|
||||||
|
|
||||||
<!--
|
<!--
|
||||||
|
@ -368,7 +368,7 @@ In your patch command, set `type` to `merge`:
|
||||||
在 patch 命令中,将 `type` 设置为 `merge`:
|
在 patch 命令中,将 `type` 设置为 `merge`:
|
||||||
|
|
||||||
```shell
|
```shell
|
||||||
kubectl patch deployment patch-demo --type merge --patch "$(cat patch-file-2.yaml)"
|
kubectl patch deployment patch-demo --type merge --patch-file patch-file-2.yaml
|
||||||
```
|
```
|
||||||
|
|
||||||
<!--
|
<!--
|
||||||
|
@ -450,14 +450,9 @@ spec:
|
||||||
<!-- Patch your Deployment: -->
|
<!-- Patch your Deployment: -->
|
||||||
修补你的 Deployment:
|
修补你的 Deployment:
|
||||||
|
|
||||||
{{< tabs name="kubectl_retainkeys_example" >}}
|
```shell
|
||||||
{{{< tab name="Bash" codelang="bash" >}}
|
kubectl patch deployment patch-demo --patch-file patch-file.yaml
|
||||||
kubectl patch deployment retainkeys-demo --type merge --patch "$(cat patch-file-no-retainkeys.yaml)"
|
```
|
||||||
{{< /tab >}}
|
|
||||||
{{< tab name="PowerShell" codelang="posh" >}}
|
|
||||||
kubectl patch deployment retainkeys-demo --type merge --patch $(Get-Content patch-file-no-retainkeys.yaml -Raw)
|
|
||||||
{{< /tab >}}}
|
|
||||||
{{< /tabs >}}
|
|
||||||
|
|
||||||
<!--
|
<!--
|
||||||
In the output, you can see that it is not possible to set `type` as `Recreate` when a value is defined for `spec.strategy.rollingUpdate`:
|
In the output, you can see that it is not possible to set `type` as `Recreate` when a value is defined for `spec.strategy.rollingUpdate`:
|
||||||
|
@ -497,14 +492,9 @@ Patch your Deployment again with this new patch:
|
||||||
|
|
||||||
使用新的 patch 重新修补 Deployment:
|
使用新的 patch 重新修补 Deployment:
|
||||||
|
|
||||||
{{< tabs name="kubectl_retainkeys2_example" >}}
|
```shell
|
||||||
{{{< tab name="Bash" codelang="bash" >}}
|
kubectl patch deployment retainkeys-demo --type merge --patch-file patch-file-no-retainkeys.yaml
|
||||||
kubectl patch deployment retainkeys-demo --type merge --patch "$(cat patch-file-retainkeys.yaml)"
|
```
|
||||||
{{< /tab >}}
|
|
||||||
{{< tab name="PowerShell" codelang="posh" >}}
|
|
||||||
kubectl patch deployment retainkeys-demo --type merge --patch $(Get-Content patch-file-retainkeys.yaml -Raw)
|
|
||||||
{{< /tab >}}}
|
|
||||||
{{< /tabs >}}
|
|
||||||
|
|
||||||
<!-- Examine the content of the Deployment: -->
|
<!-- Examine the content of the Deployment: -->
|
||||||
检查 Deployment 的内容:
|
检查 Deployment 的内容:
|
||||||
|
@ -625,10 +615,10 @@ The following commands are equivalent:
|
||||||
以下命令是等价的:
|
以下命令是等价的:
|
||||||
|
|
||||||
```shell
|
```shell
|
||||||
kubectl patch deployment patch-demo --patch "$(cat patch-file.yaml)"
|
kubectl patch deployment patch-demo --patch-file patch-file.yaml"
|
||||||
kubectl patch deployment patch-demo --patch 'spec:\n template:\n spec:\n containers:\n - name: patch-demo-ctr-2\n image: redis'
|
kubectl patch deployment patch-demo --patch 'spec:\n template:\n spec:\n containers:\n - name: patch-demo-ctr-2\n image: redis'
|
||||||
|
|
||||||
kubectl patch deployment patch-demo --patch "$(cat patch-file.json)"
|
kubectl patch deployment patch-demo --patch-file patch-file.json"
|
||||||
kubectl patch deployment patch-demo --patch '{"spec": {"template": {"spec": {"containers": [{"name": "patch-demo-ctr-2","image": "redis"}]}}}}'
|
kubectl patch deployment patch-demo --patch '{"spec": {"template": {"spec": {"containers": [{"name": "patch-demo-ctr-2","image": "redis"}]}}}}'
|
||||||
```
|
```
|
||||||
|
|
||||||
|
@ -681,6 +671,3 @@ Strategic merge patch is not supported for custom resources.
|
||||||
* [使用配置文件执行 Kubernetes 对象的指令式管理](/zh/docs/tasks/manage-kubernetes-objects/imperative-config)
|
* [使用配置文件执行 Kubernetes 对象的指令式管理](/zh/docs/tasks/manage-kubernetes-objects/imperative-config)
|
||||||
* [使用配置文件对 Kubernetes 对象进行声明式管理](/zh/docs/tasks/manage-kubernetes-objects/declarative-config/)
|
* [使用配置文件对 Kubernetes 对象进行声明式管理](/zh/docs/tasks/manage-kubernetes-objects/declarative-config/)
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -106,17 +106,23 @@ The following methods exist for installing kubectl on Windows:
|
||||||
```
|
```
|
||||||
|
|
||||||
<!--
|
<!--
|
||||||
1. Add the binary in to your `PATH`.
|
1. Append or prepend the kubectl binary folder to your `PATH` environment variable.
|
||||||
|
|
||||||
1. Test to ensure the version of `kubectl` is the same as downloaded:
|
1. Test to ensure the version of `kubectl` is the same as downloaded:
|
||||||
|
Or use this for detailed view of version:
|
||||||
-->
|
-->
|
||||||
1. 将可执行文件的路径添加到 `PATH`。
|
1. 将 kubectl 二进制文件夹附加或添加到你的 `PATH` 环境变量中。
|
||||||
|
|
||||||
1. 测试一下,确保此 `kubectl` 的版本和期望版本一致:
|
1. 测试一下,确保此 `kubectl` 的版本和期望版本一致:
|
||||||
|
|
||||||
```cmd
|
```cmd
|
||||||
kubectl version --client
|
kubectl version --client
|
||||||
```
|
```
|
||||||
|
或者使用下面命令来查看版本的详细信息:
|
||||||
|
```cmd
|
||||||
|
kubectl version --client --output=yaml
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
<!--
|
<!--
|
||||||
[Docker Desktop for Windows](https://docs.docker.com/docker-for-windows/#kubernetes) adds its own version of `kubectl` to `PATH`.
|
[Docker Desktop for Windows](https://docs.docker.com/docker-for-windows/#kubernetes) adds its own version of `kubectl` to `PATH`.
|
||||||
|
@ -289,11 +295,11 @@ kubectl 为 Bash、Zsh、Fish 和 PowerShell 提供自动补全功能,可以
|
||||||
```
|
```
|
||||||
|
|
||||||
<!--
|
<!--
|
||||||
1. Add the binary in to your `PATH`.
|
1. Append or prepend the kubectl binary folder to your `PATH` environment variable.
|
||||||
|
|
||||||
1. Verify plugin is successfully installed
|
1. Verify plugin is successfully installed
|
||||||
-->
|
-->
|
||||||
1. 将可执行文件添加到你的 `PATH` 环境变量。
|
1. 将 kubectl 二进制文件夹附加或添加到你的 `PATH` 环境变量中。
|
||||||
|
|
||||||
1. 验证插件是否安装成功
|
1. 验证插件是否安装成功
|
||||||
|
|
||||||
|
|
|
@ -23,11 +23,10 @@ each of which has a sequence of steps.
|
||||||
Before walking through each tutorial, you may want to bookmark the
|
Before walking through each tutorial, you may want to bookmark the
|
||||||
[Standardized Glossary](/docs/reference/glossary/) page for later references.
|
[Standardized Glossary](/docs/reference/glossary/) page for later references.
|
||||||
-->
|
-->
|
||||||
Kubernetes 文档的这一部分包含教程。每个教程展示了如何完成一个比单个
|
Kubernetes 文档的这一部分包含教程。
|
||||||
[任务](/zh/docs/tasks/)更大的目标。
|
每个教程展示了如何完成一个比单个[任务](/zh/docs/tasks/)更大的目标。
|
||||||
通常一个教程有几个部分,每个部分都有一系列步骤。在浏览每个教程之前,
|
通常一个教程有几个部分,每个部分都有一系列步骤。在浏览每个教程之前,
|
||||||
您可能希望将[标准化术语表](/zh/docs/reference/glossary/)页面添加到书签,供以后参考。
|
你可能希望将[标准化术语表](/zh/docs/reference/glossary/)页面添加到书签,供以后参考。
|
||||||
|
|
||||||
|
|
||||||
<!-- body -->
|
<!-- body -->
|
||||||
<!--
|
<!--
|
||||||
|
@ -41,11 +40,9 @@ Kubernetes 文档的这一部分包含教程。每个教程展示了如何完成
|
||||||
-->
|
-->
|
||||||
## 基础知识 {#basics}
|
## 基础知识 {#basics}
|
||||||
|
|
||||||
* [Kubernetes 基础知识](/zh/docs/tutorials/Kubernetes-Basics/)是一个深入的
|
* [Kubernetes 基础知识](/zh/docs/tutorials/Kubernetes-Basics/)
|
||||||
交互式教程,帮助您理解 Kubernetes 系统,并尝试一些基本的 Kubernetes 特性。
|
是一个深入的交互式教程,帮助你理解 Kubernetes 系统,并尝试一些基本的 Kubernetes 特性。
|
||||||
|
* [Kubernetes 介绍 (edX)](https://www.edx.org/course/introduction-kubernetes-linuxfoundationx-lfs158x#)
|
||||||
* [介绍 Kubernetes (edx)](https://www.edx.org/course/introduction-kubernetes-linuxfoundationx-lfs158x#)
|
|
||||||
|
|
||||||
* [你好 Minikube](/zh/docs/tutorials/hello-minikube/)
|
* [你好 Minikube](/zh/docs/tutorials/hello-minikube/)
|
||||||
|
|
||||||
<!--
|
<!--
|
||||||
|
@ -58,8 +55,7 @@ Kubernetes 文档的这一部分包含教程。每个教程展示了如何完成
|
||||||
## 配置 {#configuration}
|
## 配置 {#configuration}
|
||||||
|
|
||||||
* [示例:配置 Java 微服务](/zh/docs/tutorials/configuration/configure-java-microservice/)
|
* [示例:配置 Java 微服务](/zh/docs/tutorials/configuration/configure-java-microservice/)
|
||||||
|
* [使用 ConfigMap 配置 Redis](/zh/docs/tutorials/configuration/configure-redis-using-configmap/)
|
||||||
* [使用一个 ConfigMap 配置 Redis](/zh/docs/tutorials/configuration/configure-redis-using-configmap/)
|
|
||||||
|
|
||||||
<!--
|
<!--
|
||||||
## Stateless Applications
|
## Stateless Applications
|
||||||
|
@ -71,10 +67,8 @@ Kubernetes 文档的这一部分包含教程。每个教程展示了如何完成
|
||||||
## 无状态应用程序 {#stateless-applications}
|
## 无状态应用程序 {#stateless-applications}
|
||||||
|
|
||||||
* [公开外部 IP 地址访问集群中的应用程序](/zh/docs/tutorials/stateless-application/expose-external-ip-address/)
|
* [公开外部 IP 地址访问集群中的应用程序](/zh/docs/tutorials/stateless-application/expose-external-ip-address/)
|
||||||
|
|
||||||
* [示例:使用 Redis 部署 PHP 留言板应用程序](/zh/docs/tutorials/stateless-application/guestbook/)
|
* [示例:使用 Redis 部署 PHP 留言板应用程序](/zh/docs/tutorials/stateless-application/guestbook/)
|
||||||
|
|
||||||
|
|
||||||
<!--
|
<!--
|
||||||
## Stateful Applications
|
## Stateful Applications
|
||||||
|
|
||||||
|
@ -89,26 +83,10 @@ Kubernetes 文档的这一部分包含教程。每个教程展示了如何完成
|
||||||
## 有状态应用程序 {#stateful-applications}
|
## 有状态应用程序 {#stateful-applications}
|
||||||
|
|
||||||
* [StatefulSet 基础](/zh/docs/tutorials/stateful-application/basic-stateful-set/)
|
* [StatefulSet 基础](/zh/docs/tutorials/stateful-application/basic-stateful-set/)
|
||||||
|
|
||||||
* [示例:WordPress 和 MySQL 使用持久卷](/zh/docs/tutorials/stateful-application/mysql-wordpress-persistent-volume/)
|
* [示例:WordPress 和 MySQL 使用持久卷](/zh/docs/tutorials/stateful-application/mysql-wordpress-persistent-volume/)
|
||||||
|
|
||||||
* [示例:使用有状态集部署 Cassandra](/zh/docs/tutorials/stateful-application/cassandra/)
|
* [示例:使用有状态集部署 Cassandra](/zh/docs/tutorials/stateful-application/cassandra/)
|
||||||
|
|
||||||
* [运行 ZooKeeper,CP 分布式系统](/zh/docs/tutorials/stateful-application/zookeeper/)
|
* [运行 ZooKeeper,CP 分布式系统](/zh/docs/tutorials/stateful-application/zookeeper/)
|
||||||
|
|
||||||
<!--
|
|
||||||
## Clusters
|
|
||||||
|
|
||||||
* [AppArmor](/docs/tutorials/clusters/apparmor/)
|
|
||||||
|
|
||||||
* [seccomp](/docs/tutorials/clusters/seccomp/)
|
|
||||||
-->
|
|
||||||
## 集群 {#clusters}
|
|
||||||
|
|
||||||
* [AppArmor](/zh/docs/tutorials/clusters/apparmor/)
|
|
||||||
|
|
||||||
* [seccomp](/zh/docs/tutorials/clusters/seccomp/)
|
|
||||||
|
|
||||||
<!--
|
<!--
|
||||||
## Services
|
## Services
|
||||||
|
|
||||||
|
@ -123,11 +101,15 @@ Kubernetes 文档的这一部分包含教程。每个教程展示了如何完成
|
||||||
|
|
||||||
* [Apply Pod Security Standards at Cluster level](/docs/tutorials/security/cluster-level-pss/)
|
* [Apply Pod Security Standards at Cluster level](/docs/tutorials/security/cluster-level-pss/)
|
||||||
* [Apply Pod Security Standards at Namespace level](/docs/tutorials/security/ns-level-pss/)
|
* [Apply Pod Security Standards at Namespace level](/docs/tutorials/security/ns-level-pss/)
|
||||||
|
* [AppArmor](/zh/docs/tutorials/security/apparmor/)
|
||||||
|
* [seccomp](/zh/docs/tutorials/security/seccomp/)
|
||||||
-->
|
-->
|
||||||
## 安全 {#security}
|
## 安全 {#security}
|
||||||
|
|
||||||
* [在集群级别应用 Pod 安全标准](/zh/docs/tutorials/security/cluster-level-pss/)
|
* [在集群级别应用 Pod 安全标准](/zh/docs/tutorials/security/cluster-level-pss/)
|
||||||
* [在名字空间级别应用 Pod 安全标准](/zh/docs/tutorials/security/ns-level-pss/)
|
* [在名字空间级别应用 Pod 安全标准](/zh/docs/tutorials/security/ns-level-pss/)
|
||||||
|
* [AppArmor](/zh/docs/tutorials/security/apparmor/)
|
||||||
|
* [seccomp](/zh/docs/tutorials/security/seccomp/)
|
||||||
|
|
||||||
## {{% heading "whatsnext" %}}
|
## {{% heading "whatsnext" %}}
|
||||||
|
|
||||||
|
@ -136,6 +118,6 @@ If you would like to write a tutorial, see
|
||||||
[Content Page Types](/docs/contribute/style/page-content-types/)
|
[Content Page Types](/docs/contribute/style/page-content-types/)
|
||||||
for information about the tutorial page.
|
for information about the tutorial page.
|
||||||
-->
|
-->
|
||||||
如果您想编写教程,请参阅[内容页面类型](/zh/docs/contribute/style/page-content-types/)
|
如果你要编写教程,请参阅[内容页面类型](/zh/docs/contribute/style/page-content-types/)
|
||||||
以获取有关教程页面类型的信息。
|
以获取有关教程页面类型的信息。
|
||||||
|
|
||||||
|
|
|
@ -244,10 +244,10 @@ Pod runs a Container based on the provided Docker image.
|
||||||
|
|
||||||
<!--
|
<!--
|
||||||
For more information about `kubectl`commands, see the
|
For more information about `kubectl`commands, see the
|
||||||
[kubectl overview](/docs/user-guide/kubectl-overview/).
|
[kubectl overview](/docs/reference/kubectl/).
|
||||||
-->
|
-->
|
||||||
{{< note >}}
|
{{< note >}}
|
||||||
有关 `kubectl` 命令的更多信息,请参阅 [kubectl 概述](/zh/docs/reference/kubectl/overview/)。
|
有关 `kubectl` 命令的更多信息,请参阅 [kubectl 概述](/zh/docs/reference/kubectl/)。
|
||||||
{{< /note >}}
|
{{< /note >}}
|
||||||
|
|
||||||
<!--
|
<!--
|
||||||
|
|
|
@ -153,7 +153,7 @@ command=GET
|
||||||
## Type=NodePort 类型 Services 的 Source IP
|
## Type=NodePort 类型 Services 的 Source IP
|
||||||
|
|
||||||
|
|
||||||
从 Kubernetes 1.5 开始,发送给类型为 [Type=NodePort](/zh/docs/user-guide/services/#type-nodeport) Services 的数据包默认进行源地址 NAT。你可以通过创建一个 `NodePort` Service 来进行测试:
|
从 Kubernetes 1.5 开始,发送给类型为 [Type=NodePort](/zh/docs/user-guide/services/#nodeport) Services 的数据包默认进行源地址 NAT。你可以通过创建一个 `NodePort` Service 来进行测试:
|
||||||
|
|
||||||
```console
|
```console
|
||||||
kubectl expose deployment source-ip-app --name=nodeport --port=80 --target-port=8080 --type=NodePort
|
kubectl expose deployment source-ip-app --name=nodeport --port=80 --target-port=8080 --type=NodePort
|
||||||
|
|
|
@ -0,0 +1,10 @@
|
||||||
|
content/en/blog/_posts/2016-05-00-Coreosfest2016-Kubernetes-Community.md
|
||||||
|
content/en/blog/_posts/2016-10-00-Dynamic-Provisioning-And-Storage-In-Kubernetes.md
|
||||||
|
content/en/blog/_posts/2017-02-00-Postgresql-Clusters-Kubernetes-Statefulsets.md
|
||||||
|
content/en/blog/_posts/2018-06-28-Airflow-Kubernetes-Operator.md
|
||||||
|
content/en/blog/_posts/2018-10-02-network-bootable-farm-with-ltsp.md
|
||||||
|
content/en/blog/_posts/2019-03-19-kubeedge-k8s-based-edge-intro.md
|
||||||
|
content/en/blog/_posts/2020-05-06-third-party-dual-sourced-content.md
|
||||||
|
content/en/blog/_posts/2020-05-21-wsl2-dockerdesktop-k8s.md
|
||||||
|
content/en/blog/_posts/2020-07-27-kubernetes-1-17-release-interview.md
|
||||||
|
|
|
@ -15,14 +15,14 @@
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
##########
|
##########
|
||||||
# This script verifies mispellings in location. Today it only supports
|
# This script verifies mispellings in location. Today it only supports
|
||||||
# verifying English locale but can be modified in a future to support
|
# verifying English locale but can be modified in a future to support
|
||||||
# also other locales.
|
# other locales also
|
||||||
# You need to run this script inside the root directory of "website" git repo.
|
# You need to run this script inside the root directory of "website" git repo.
|
||||||
#
|
#
|
||||||
# Syntax: verify-spelling.sh LOCALE
|
# Syntax: verify-spelling.sh LOCALE
|
||||||
# Example: verify-spelling.sh en
|
# Example: verify-spelling.sh en
|
||||||
# If no locale is passed, it will assume "en"
|
# If no locale is passed, it will assume "en"
|
||||||
#
|
#
|
||||||
# Requirements:
|
# Requirements:
|
||||||
# - go v1.14 or superior version
|
# - go v1.14 or superior version
|
||||||
|
|
||||||
|
@ -34,6 +34,8 @@ set -o pipefail
|
||||||
|
|
||||||
TOOL_VERSION="v0.3.4"
|
TOOL_VERSION="v0.3.4"
|
||||||
|
|
||||||
|
KUBE_ROOT=$(dirname "${BASH_SOURCE[0]}")/..
|
||||||
|
|
||||||
LANGUAGE="${1:-en}"
|
LANGUAGE="${1:-en}"
|
||||||
# cd to the root path
|
# cd to the root path
|
||||||
ROOT="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd -P)"
|
ROOT="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd -P)"
|
||||||
|
@ -60,10 +62,15 @@ cd "${ROOT}"
|
||||||
RES=0
|
RES=0
|
||||||
echo "Checking spelling..."
|
echo "Checking spelling..."
|
||||||
ERROR_LOG="${TMP_DIR}/errors.log"
|
ERROR_LOG="${TMP_DIR}/errors.log"
|
||||||
git ls-files | grep content/${LANGUAGE} | xargs misspell > "${ERROR_LOG}"
|
|
||||||
|
# NOTE we usually don't correct old blog articles, so we ignore them in
|
||||||
|
# this file.
|
||||||
|
skipping_file="${KUBE_ROOT}/scripts/.spelling_failures"
|
||||||
|
failing_packages=$(sed "s| | -e |g" "${skipping_file}")
|
||||||
|
git ls-files -z | grep --null-data "^content/${LANGUAGE}" | grep --null-data -v -e "${failing_packages}" | xargs -0 -r misspell > "${ERROR_LOG}"
|
||||||
if [[ -s "${ERROR_LOG}" ]]; then
|
if [[ -s "${ERROR_LOG}" ]]; then
|
||||||
sed 's/^/error: /' "${ERROR_LOG}" # add 'error' to each line to highlight in e2e status
|
sed 's/^/error: /' "${ERROR_LOG}" # add 'error' to each line to highlight in e2e status
|
||||||
echo "Found spelling errors!"
|
echo "Found spelling errors!" >&2
|
||||||
RES=1
|
RES=1
|
||||||
fi
|
fi
|
||||||
exit "${RES}"
|
exit "${RES}"
|
||||||
|
|
|
@ -245,6 +245,7 @@
|
||||||
/docs/tasks/access-kubernetes-api/extend-api-custom-resource-definitions/ /docs/tasks/extend-kubernetes/custom-resources/custom-resource-definitions/ 301
|
/docs/tasks/access-kubernetes-api/extend-api-custom-resource-definitions/ /docs/tasks/extend-kubernetes/custom-resources/custom-resource-definitions/ 301
|
||||||
/docs/tasks/access-kubernetes-api/setup-extension-api-server/ /docs/tasks/extend-kubernetes/setup-extension-api-server/ 301
|
/docs/tasks/access-kubernetes-api/setup-extension-api-server/ /docs/tasks/extend-kubernetes/setup-extension-api-server/ 301
|
||||||
|
|
||||||
|
/docs/tasks/administer-cluster/access-cluster-services/ /docs/tasks/access-application-cluster/access-cluster-services/ 301
|
||||||
/docs/tasks/administer-cluster/apply-resource-quota-limit/ /docs/tasks/administer-cluster/quota-api-object/ 301
|
/docs/tasks/administer-cluster/apply-resource-quota-limit/ /docs/tasks/administer-cluster/quota-api-object/ 301
|
||||||
/docs/tasks/administer-cluster/assign-pods-nodes/ /docs/tasks/configure-pod-container/assign-pods-nodes/ 301
|
/docs/tasks/administer-cluster/assign-pods-nodes/ /docs/tasks/configure-pod-container/assign-pods-nodes/ 301
|
||||||
/docs/tasks/administer-cluster/calico-network-policy/ /docs/tasks/administer-cluster/network-policy-provider/calico-network-policy/ 301
|
/docs/tasks/administer-cluster/calico-network-policy/ /docs/tasks/administer-cluster/network-policy-provider/calico-network-policy/ 301
|
||||||
|
|
Loading…
Reference in New Issue