diff --git a/Dockerfile b/Dockerfile index 93c73d218c..9e9a6d65b0 100644 --- a/Dockerfile +++ b/Dockerfile @@ -4,29 +4,42 @@ # change is that the Hugo version is now an overridable argument rather than a fixed # environment variable. -FROM golang:1.15-alpine +FROM golang:1.16-alpine LABEL maintainer="Luc Perkins " RUN apk add --no-cache \ curl \ - git \ - openssh-client \ - rsync \ + gcc \ + g++ \ + musl-dev \ build-base \ - libc6-compat \ - npm && \ - npm install -D autoprefixer postcss-cli + libc6-compat ARG HUGO_VERSION +RUN mkdir $HOME/src && \ + cd $HOME/src && \ + curl -L https://github.com/gohugoio/hugo/archive/refs/tags/v${HUGO_VERSION}.tar.gz | tar -xz && \ + cd "hugo-${HUGO_VERSION}" && \ + go install --tags extended + +FROM golang:1.16-alpine + +RUN apk add --no-cache \ + git \ + openssh-client \ + rsync \ + npm && \ + npm install -D autoprefixer postcss-cli + RUN mkdir -p /usr/local/src && \ cd /usr/local/src && \ - curl -L https://github.com/gohugoio/hugo/releases/download/v${HUGO_VERSION}/hugo_extended_${HUGO_VERSION}_Linux-64bit.tar.gz | tar -xz && \ - mv hugo /usr/local/bin/hugo && \ addgroup -Sg 1000 hugo && \ adduser -Sg hugo -u 1000 -h /src hugo +COPY --from=0 /go/bin/hugo /usr/local/bin/hugo + WORKDIR /src USER hugo:hugo diff --git a/OWNERS_ALIASES b/OWNERS_ALIASES index b1a887ba33..6c8efc4bd8 100644 --- a/OWNERS_ALIASES +++ b/OWNERS_ALIASES @@ -31,6 +31,7 @@ aliases: - savitharaghunathan - sftim - tengqm + - zacharysarah sig-docs-en-reviews: # PR reviews for English content - bradtopol - celestehorgan @@ -44,6 +45,7 @@ aliases: - sftim - shannonxtreme - tengqm + - zacharysarah sig-docs-es-owners: # Admins for Spanish content - raelga - electrocucaracha diff --git a/README.md b/README.md index 87dd03e7eb..005452ef06 100644 --- a/README.md +++ b/README.md @@ -167,6 +167,14 @@ For more information about contributing to the Kubernetes documentation, see: - [Documentation Style Guide](https://kubernetes.io/docs/contribute/style/style-guide/) - [Localizing Kubernetes Documentation](https://kubernetes.io/docs/contribute/localization/) +### New contributor ambassadors + +If you need help at any point when contributing, the [New Contributor Ambassadors](https://kubernetes.io/docs/contribute/advanced/#serve-as-a-new-contributor-ambassador) are a good point of contact. These are SIG Docs approvers whose responsibilities include mentoring new contributors and helping them through their first few pull requests. The best place to contact the New Contributors Ambassadors would be on the [Kubernetes Slack](https://slack.k8s.io/). Current New Contributors Ambassadors for SIG Docs: + +| Name | Slack | GitHub | +| -------------------------- | -------------------------- | -------------------------- | +| Arsh Sharma | @arsh | @RinkiyaKeDad | + ## Localization `README.md`'s | Language | Language | diff --git a/content/en/blog/_posts/2015-04-00-Faster-Than-Speeding-Latte.md b/content/en/blog/_posts/2015-04-00-Faster-Than-Speeding-Latte.md index fd6118b1c5..90a11d2582 100644 --- a/content/en/blog/_posts/2015-04-00-Faster-Than-Speeding-Latte.md +++ b/content/en/blog/_posts/2015-04-00-Faster-Than-Speeding-Latte.md @@ -1,8 +1,11 @@ --- -title: " Faster than a speeding Latte " +title: "Faster than a speeding Latte" date: 2015-04-06 slug: faster-than-speeding-latte url: /blog/2015/04/Faster-Than-Speeding-Latte +evergreen: true --- + Check out Brendan Burns racing Kubernetes. -[![Check out Brendan Burns racing Kubernetes](https://img.youtube.com/vi/7vZ9dRKRMyc/0.jpg)](https://www.youtube.com/watch?v=?7vZ9dRKRMyc) + +{{< youtube id="7vZ9dRKRMyc" title="Latte vs. Kubernetes setup - which is faster?">}} diff --git a/content/en/blog/_posts/2019-02-11-runc-CVE-2019-5736.md b/content/en/blog/_posts/2019-02-11-runc-CVE-2019-5736.md index 84482daf79..027cc2e9bf 100644 --- a/content/en/blog/_posts/2019-02-11-runc-CVE-2019-5736.md +++ b/content/en/blog/_posts/2019-02-11-runc-CVE-2019-5736.md @@ -1,17 +1,20 @@ --- title: Runc and CVE-2019-5736 date: 2019-02-11 +evergreen: false # mentions PodSecurityPolicy --- +Authors: Kubernetes Product Security Committee + This morning [a container escape vulnerability in runc was announced](https://www.openwall.com/lists/oss-security/2019/02/11/2). We wanted to provide some guidance to Kubernetes users to ensure everyone is safe and secure. -## What Is Runc? +## What is runc? Very briefly, runc is the low-level tool which does the heavy lifting of spawning a Linux container. Other tools like Docker, Containerd, and CRI-O sit on top of runc to deal with things like data formatting and serialization, but runc is at the heart of all of these systems. Kubernetes in turn sits on top of those tools, and so while no part of Kubernetes itself is vulnerable, most Kubernetes installations are using runc under the hood. -### What Is The Vulnerability? +### What is the vulnerability? While full details are still embargoed to give people time to patch, the rough version is that when running a process as root (UID 0) inside a container, that process can exploit a bug in runc to gain root privileges on the host running the container. This then allows them unlimited access to the server as well as any other containers on that server. @@ -19,13 +22,14 @@ If the process inside the container is either trusted (something you know is not The most common source of risk is attacker-controller container images, such as unvetted images from public repositories. -### What Should I Do? +### What should i do? As with all security issues, the two main options are to mitigate the vulnerability or upgrade your version of runc to one that includes the fix. As the exploit requires UID 0 within the container, a direct mitigation is to ensure all your containers are running as a non-0 user. This can be set within the container image, or via your pod specification: ```yaml +--- apiVersion: v1 kind: Pod metadata: @@ -39,6 +43,7 @@ spec: This can also be enforced globally using a PodSecurityPolicy: ```yaml +--- apiVersion: policy/v1beta1 kind: PodSecurityPolicy metadata: @@ -89,7 +94,7 @@ We don't have specific confirmation that Docker for Mac and Docker for Windows a If you are unable to upgrade Docker, the Rancher team has provided backports of the fix for many older versions at [github.com/rancher/runc-cve](https://github.com/rancher/runc-cve). -## Getting More Information +## Getting more information If you have any further questions about how this vulnerability impacts Kubernetes, please join us at [discuss.kubernetes.io](https://discuss.kubernetes.io/). diff --git a/content/en/blog/_posts/2021-04-22-gateway-api/index.md b/content/en/blog/_posts/2021-04-22-gateway-api/index.md index d9c798a5b1..c22d45cdbb 100644 --- a/content/en/blog/_posts/2021-04-22-gateway-api/index.md +++ b/content/en/blog/_posts/2021-04-22-gateway-api/index.md @@ -30,15 +30,15 @@ This led to design principles that allow the Gateway API to improve upon Ingress The Gateway API introduces a few new resource types: -- **[GatewayClasses](https://gateway-api.sigs.k8s.io/references/spec/#networking.x-k8s.io/v1alpha1.GatewayClass)** are cluster-scoped resources that act as templates to explicitly define behavior for Gateways derived from them. This is similar in concept to StorageClasses, but for networking data-planes. -- **[Gateways](https://gateway-api.sigs.k8s.io/references/spec/#networking.x-k8s.io/v1alpha1.Gateway)** are the deployed instances of GatewayClasses. They are the logical representation of the data-plane which performs routing, which may be in-cluster proxies, hardware LBs, or cloud LBs. -- **Routes** are not a single resource, but represent many different protocol-specific Route resources. The [HTTPRoute](https://gateway-api.sigs.k8s.io/references/spec/#networking.x-k8s.io/v1alpha1.HTTPRoute) has matching, filtering, and routing rules that get applied to Gateways that can process HTTP and HTTPS traffic. Similarly, there are [TCPRoutes](https://gateway-api.sigs.k8s.io/references/spec/#networking.x-k8s.io/v1alpha1.TCPRoute), [UDPRoutes](https://gateway-api.sigs.k8s.io/references/spec/#networking.x-k8s.io/v1alpha1.UDPRoute), and [TLSRoutes](https://gateway-api.sigs.k8s.io/references/spec/#networking.x-k8s.io/v1alpha1.TLSRoute) which also have protocol-specific semantics. This model also allows the Gateway API to incrementally expand its protocol support in the future. +- **[GatewayClasses](https://gateway-api.sigs.k8s.io/v1alpha1/references/spec/#networking.x-k8s.io/v1alpha1.GatewayClass)** are cluster-scoped resources that act as templates to explicitly define behavior for Gateways derived from them. This is similar in concept to StorageClasses, but for networking data-planes. +- **[Gateways](https://gateway-api.sigs.k8s.io/v1alpha1/references/spec/#networking.x-k8s.io/v1alpha1.Gateway)** are the deployed instances of GatewayClasses. They are the logical representation of the data-plane which performs routing, which may be in-cluster proxies, hardware LBs, or cloud LBs. +- **Routes** are not a single resource, but represent many different protocol-specific Route resources. The [HTTPRoute](https://gateway-api.sigs.k8s.io/v1alpha1/references/spec/#networking.x-k8s.io/v1alpha1.HTTPRoute) has matching, filtering, and routing rules that get applied to Gateways that can process HTTP and HTTPS traffic. Similarly, there are [TCPRoutes](https://gateway-api.sigs.k8s.io/v1alpha1/references/spec/#networking.x-k8s.io/v1alpha1.TCPRoute), [UDPRoutes](https://gateway-api.sigs.k8s.io/v1alpha1/references/spec/#networking.x-k8s.io/v1alpha1.UDPRoute), and [TLSRoutes](https://gateway-api.sigs.k8s.io/v1alpha1/references/spec/#networking.x-k8s.io/v1alpha1.TLSRoute) which also have protocol-specific semantics. This model also allows the Gateway API to incrementally expand its protocol support in the future. ![The resources of the Gateway API](gateway-api-resources.png) ### Gateway Controller Implementations -The good news is that although Gateway is in [Alpha](https://github.com/kubernetes-sigs/gateway-api/releases), there are already several [Gateway controller implementations](https://gateway-api.sigs.k8s.io/references/implementations/) that you can run. Since it’s a standardized spec, the following example could be run on any of them and should function the exact same way. Check out [getting started](https://gateway-api.sigs.k8s.io/guides/getting-started/) to see how to install and use one of these Gateway controllers. +The good news is that although Gateway is in [Alpha](https://github.com/kubernetes-sigs/gateway-api/releases), there are already several [Gateway controller implementations](https://gateway-api.sigs.k8s.io/implementations/) that you can run. Since it’s a standardized spec, the following example could be run on any of them and should function the exact same way. Check out [getting started](https://gateway-api.sigs.k8s.io/v1alpha1/guides/getting-started/) to see how to install and use one of these Gateway controllers. ## Getting Hands-on with the Gateway API @@ -134,7 +134,7 @@ spec: So we have two HTTPRoutes matching and routing traffic to different Services. You might be wondering, where are these Services accessible? Through which networks or IPs are they exposed? -How Routes are exposed to clients is governed by [Route binding](https://gateway-api.sigs.k8s.io/concepts/api-overview/#route-binding), which describes how Routes and Gateways create a bidirectional relationship between each other. When Routes are bound to a Gateway it means their collective routing rules are configured on the underlying load balancers or proxies and the Routes are accessible through the Gateway. Thus, a Gateway is a logical representation of a networking data plane that can be configured through Routes. +How Routes are exposed to clients is governed by [Route binding](https://gateway-api.sigs.k8s.io/concepts/api-overview/#route-resources), which describes how Routes and Gateways create a bidirectional relationship between each other. When Routes are bound to a Gateway it means their collective routing rules are configured on the underlying load balancers or proxies and the Routes are accessible through the Gateway. Thus, a Gateway is a logical representation of a networking data plane that can be configured through Routes. ![How Routes bind with Gateways](route-binding.png ) @@ -192,6 +192,6 @@ When you put it all together, you have a single load balancing infrastructure th There are many resources to check out to learn more. -* Check out the [user guides](https://gateway-api.sigs.k8s.io/guides/getting-started/) to see what use-cases can be addressed. -* Try out one of the [existing Gateway controllers ](https://gateway-api.sigs.k8s.io/references/implementations/) +* Check out the [user guides](https://gateway-api.sigs.k8s.io/v1alpha1/guides/getting-started/) to see what use-cases can be addressed. +* Try out one of the [existing Gateway controllers ](https://gateway-api.sigs.k8s.io/implementations/) * Or [get involved](https://gateway-api.sigs.k8s.io/contributing/community/) and help design and influence the future of Kubernetes service networking! diff --git a/content/en/blog/_posts/2022-03-15-meet-our-contributors-APAC-AU-NZ-region-01.md b/content/en/blog/_posts/2022-03-15-meet-our-contributors-APAC-AU-NZ-region-01.md new file mode 100644 index 0000000000..5a8a4a2989 --- /dev/null +++ b/content/en/blog/_posts/2022-03-15-meet-our-contributors-APAC-AU-NZ-region-01.md @@ -0,0 +1,78 @@ +--- +layout: blog +title: "Meet Our Contributors - APAC (Aus-NZ region)" +date: 2022-03-16T12:00:00+0000 +slug: meet-our-contributors-au-nz-ep-02 +canonicalUrl: https://www.kubernetes.dev/blog/2022/03/14/meet-our-contributors-au-nz-ep-02/ +--- + +**Authors & Interviewers:** [Anubhav Vardhan](https://github.com/anubha-v-ardhan), [Atharva Shinde](https://github.com/Atharva-Shinde), [Avinesh Tripathi](https://github.com/AvineshTripathi), [Brad McCoy](https://github.com/bradmccoydev), [Debabrata Panigrahi](https://github.com/Debanitrkl), [Jayesh Srivastava](https://github.com/jayesh-srivastava), [Kunal Verma](https://github.com/verma-kunal), [Pranshu Srivastava](https://github.com/PranshuSrivastava), [Priyanka Saggu](github.com/Priyankasaggu11929/), [Purneswar Prasad](https://github.com/PurneswarPrasad), [Vedant Kakde](https://github.com/vedant-kakde) + +--- + +Good day, everyone 👋 + +Welcome back to the second episode of the "Meet Our Contributors" blog post series for APAC. + +This post will feature four outstanding contributors from the Australia and New Zealand regions, who have played diverse leadership and community roles in the Upstream Kubernetes project. + +So, without further ado, let's get straight to the blog. + +## [Caleb Woodbine](https://github.com/BobyMCbobs) + +Caleb Woodbine is currently a member of the ii.nz organisation. + +He began contributing to the Kubernetes project in 2018 as a member of the Kubernetes Conformance working group. His experience was positive, and he benefited from early guidance from [Hippie Hacker](https://github.com/hh), a fellow contributor from New Zealand. + +He has made major contributions to Kubernetes project since then through `SIG k8s-infra` and `k8s-conformance` working group. + +Caleb is also a co-organizer of the [CloudNative NZ](https://www.meetup.com/cloudnative-nz/) community events, which aim to expand the reach of Kubernetes project throughout New Zealand in order to encourage technical education and improved employment opportunities. + +> _There need to be more outreach in APAC and the educators and universities must pick up Kubernetes, as they are very slow and about 8+ years out of date. NZ tends to rather pay overseas than educate locals on the latest cloud tech Locally._ + +## [Dylan Graham](https://github.com/DylanGraham) + +Dylan Graham is a cloud engineer from Adeliade, Australia. He has been contributing to the upstream Kubernetes project since 2018. + +He stated that being a part of such a large-scale project was initially overwhelming, but that the community's friendliness and openness assisted him in getting through it. + +He began by contributing to the project documentation and is now mostly focused on the community support for the APAC region. + +He believes that consistent attendance at community/project meetings, taking on project tasks, and seeking community guidance as needed can help new aspiring developers become effective contributors. + +> _The feeling of being a part of a large community is really special. I've met some amazing people, even some before the pandemic in real life :)_ + +## [Hippie Hacker](https://github.com/hh) + +Hippie has worked for the CNCF.io as a Strategic Initiatives contractor from New Zealand for almost 5+ years. He is an active contributor to k8s-infra, API conformance testing, Cloud provider conformance submissions, and apisnoop.cncf.io domains of the upstream Kubernetes & CNCF projects. + +He recounts their early involvement with the Kubernetes project, which began roughly 5 years ago when their firm, ii.nz, demonstrated [network booting from a Raspberry Pi using PXE and running Gitlab in-cluster to install Kubernetes on servers](https://ii.nz/post/bringing-the-cloud-to-your-community/). + +He describes their own contributing experience as someone who, at first, tried to do all of the hard lifting on their own, but eventually saw the benefit of group contributions which reduced burnout and task division which allowed folks to keep moving forward on their own momentum. + +He recommends that new contributors use pair programming. + +> _The cross pollination of approaches and two pairs of eyes on the same work can often yield a much more amplified effect than a PR comment / approval alone can afford._ + +## [Nick Young](https://github.com/youngnick) + +Nick Young works at VMware as a technical lead for Contour, a CNCF ingress controller. He was active with the upstream Kubernetes project from the beginning, and eventually became the chair of the LTS working group, where he advocated user concerns. He is currently the SIG Network Gateway API subproject's maintainer. + +His contribution path was notable in that he began working on major areas of the Kubernetes project early on, skewing his trajectory. + +He asserts the best thing a new contributor can do is to "start contributing". Naturally, if it is relevant to their employment, that is excellent; however, investing non-work time in contributing can pay off in the long run in terms of work. He believes that new contributors, particularly those who are currently Kubernetes users, should be encouraged to participate in higher-level project discussions. + +> _Just being active and contributing will get you a long way. Once you've been active for a while, you'll find that you're able to answer questions, which will mean you're asked questions, and before you know it you are an expert._ + + + + +--- + +If you have any recommendations/suggestions for who we should interview next, please let us know in #sig-contribex. Your suggestions would be much appreciated. We're thrilled to have additional folks assisting us in reaching out to even more wonderful individuals of the community. + + +We'll see you all in the next one. Everyone, till then, have a happy contributing! 👋 + + + diff --git a/content/en/docs/concepts/architecture/nodes.md b/content/en/docs/concepts/architecture/nodes.md index 6946a6d706..049e7cc190 100644 --- a/content/en/docs/concepts/architecture/nodes.md +++ b/content/en/docs/concepts/architecture/nodes.md @@ -33,9 +33,9 @@ There are two main ways to have Nodes added to the {{< glossary_tooltip text="AP 1. The kubelet on a node self-registers to the control plane 2. You (or another human user) manually add a Node object -After you create a Node {{< glossary_tooltip text="object" term_id="object" >}}, or the kubelet on a node self-registers, the -control plane checks whether the new Node object is valid. For example, if you -try to create a Node from the following JSON manifest: +After you create a Node {{< glossary_tooltip text="object" term_id="object" >}}, +or the kubelet on a node self-registers, the control plane checks whether the new Node object is +valid. For example, if you try to create a Node from the following JSON manifest: ```json { @@ -85,19 +85,23 @@ register itself with the API server. This is the preferred pattern, used by mos For self-registration, the kubelet is started with the following options: - - `--kubeconfig` - Path to credentials to authenticate itself to the API server. - - `--cloud-provider` - How to talk to a {{< glossary_tooltip text="cloud provider" term_id="cloud-provider" >}} to read metadata about itself. - - `--register-node` - Automatically register with the API server. - - `--register-with-taints` - Register the node with the given list of {{< glossary_tooltip text="taints" term_id="taint" >}} (comma separated `=:`). +- `--kubeconfig` - Path to credentials to authenticate itself to the API server. +- `--cloud-provider` - How to talk to a {{< glossary_tooltip text="cloud provider" term_id="cloud-provider" >}} + to read metadata about itself. +- `--register-node` - Automatically register with the API server. +- `--register-with-taints` - Register the node with the given list of + {{< glossary_tooltip text="taints" term_id="taint" >}} (comma separated `=:`). - No-op if `register-node` is false. - - `--node-ip` - IP address of the node. - - `--node-labels` - {{< glossary_tooltip text="Labels" term_id="label" >}} to add when registering the node in the cluster (see label restrictions enforced by the [NodeRestriction admission plugin](/docs/reference/access-authn-authz/admission-controllers/#noderestriction)). - - `--node-status-update-frequency` - Specifies how often kubelet posts its node status to the API server. + No-op if `register-node` is false. +- `--node-ip` - IP address of the node. +- `--node-labels` - {{< glossary_tooltip text="Labels" term_id="label" >}} to add when registering the node + in the cluster (see label restrictions enforced by the + [NodeRestriction admission plugin](/docs/reference/access-authn-authz/admission-controllers/#noderestriction)). +- `--node-status-update-frequency` - Specifies how often kubelet posts its node status to the API server. When the [Node authorization mode](/docs/reference/access-authn-authz/node/) and -[NodeRestriction admission plugin](/docs/reference/access-authn-authz/admission-controllers/#noderestriction) are enabled, -kubelets are only authorized to create/modify their own Node resource. +[NodeRestriction admission plugin](/docs/reference/access-authn-authz/admission-controllers/#noderestriction) +are enabled, kubelets are only authorized to create/modify their own Node resource. {{< note >}} As mentioned in the [Node name uniqueness](#node-name-uniqueness) section, @@ -168,8 +172,10 @@ Each section of the output is described below. The usage of these fields varies depending on your cloud provider or bare metal configuration. -* HostName: The hostname as reported by the node's kernel. Can be overridden via the kubelet `--hostname-override` parameter. -* ExternalIP: Typically the IP address of the node that is externally routable (available from outside the cluster). +* HostName: The hostname as reported by the node's kernel. Can be overridden via the kubelet + `--hostname-override` parameter. +* ExternalIP: Typically the IP address of the node that is externally routable (available from + outside the cluster). * InternalIP: Typically the IP address of the node that is routable only within the cluster. @@ -289,7 +295,6 @@ and for updating their related Leases. updates to the Node's `.status`. If the Lease update fails, the kubelet retries, using exponential backoff that starts at 200 milliseconds and capped at 7 seconds. - ## Node controller The node {{< glossary_tooltip text="controller" term_id="controller" >}} is a @@ -306,6 +311,7 @@ controller deletes the node from its list of nodes. The third is monitoring the nodes' health. The node controller is responsible for: + - In the case that a node becomes unreachable, updating the NodeReady condition of within the Node's `.status`. In this case the node controller sets the NodeReady condition to `ConditionUnknown`. @@ -327,6 +333,7 @@ The node eviction behavior changes when a node in a given availability zone becomes unhealthy. The node controller checks what percentage of nodes in the zone are unhealthy (NodeReady condition is `ConditionUnknown` or `ConditionFalse`) at the same time: + - If the fraction of unhealthy nodes is at least `--unhealthy-zone-threshold` (default 0.55), then the eviction rate is reduced. - If the cluster is small (i.e. has less than or equal to @@ -391,7 +398,9 @@ for more information. The kubelet attempts to detect node system shutdown and terminates pods running on the node. -Kubelet ensures that pods follow the normal [pod termination process](/docs/concepts/workloads/pods/pod-lifecycle/#pod-termination) during the node shutdown. +Kubelet ensures that pods follow the normal +[pod termination process](/docs/concepts/workloads/pods/pod-lifecycle/#pod-termination) +during the node shutdown. The Graceful node shutdown feature depends on systemd since it takes advantage of [systemd inhibitor locks](https://www.freedesktop.org/wiki/Software/systemd/inhibit/) to @@ -404,18 +413,26 @@ enabled by default in 1.21. Note that by default, both configuration options described below, `shutdownGracePeriod` and `shutdownGracePeriodCriticalPods` are set to zero, thus not activating Graceful node shutdown functionality. -To activate the feature, the two kubelet config settings should be configured appropriately and set to non-zero values. +To activate the feature, the two kubelet config settings should be configured appropriately and +set to non-zero values. During a graceful shutdown, kubelet terminates pods in two phases: 1. Terminate regular pods running on the node. -2. Terminate [critical pods](/docs/tasks/administer-cluster/guaranteed-scheduling-critical-addon-pods/#marking-pod-as-critical) running on the node. +2. Terminate [critical pods](/docs/tasks/administer-cluster/guaranteed-scheduling-critical-addon-pods/#marking-pod-as-critical) + running on the node. + +Graceful node shutdown feature is configured with two +[`KubeletConfiguration`](/docs/tasks/administer-cluster/kubelet-config-file/) options: -Graceful node shutdown feature is configured with two [`KubeletConfiguration`](/docs/tasks/administer-cluster/kubelet-config-file/) options: * `shutdownGracePeriod`: - * Specifies the total duration that the node should delay the shutdown by. This is the total grace period for pod termination for both regular and [critical pods](/docs/tasks/administer-cluster/guaranteed-scheduling-critical-addon-pods/#marking-pod-as-critical). + * Specifies the total duration that the node should delay the shutdown by. This is the total + grace period for pod termination for both regular and + [critical pods](/docs/tasks/administer-cluster/guaranteed-scheduling-critical-addon-pods/#marking-pod-as-critical). * `shutdownGracePeriodCriticalPods`: - * Specifies the duration used to terminate [critical pods](/docs/tasks/administer-cluster/guaranteed-scheduling-critical-addon-pods/#marking-pod-as-critical) during a node shutdown. This value should be less than `shutdownGracePeriod`. + * Specifies the duration used to terminate + [critical pods](/docs/tasks/administer-cluster/guaranteed-scheduling-critical-addon-pods/#marking-pod-as-critical) + during a node shutdown. This value should be less than `shutdownGracePeriod`. For example, if `shutdownGracePeriod=30s`, and `shutdownGracePeriodCriticalPods=10s`, kubelet will delay the node shutdown by @@ -443,8 +460,8 @@ To provide more flexibility during graceful node shutdown around the ordering of pods during shutdown, graceful node shutdown honors the PriorityClass for Pods, provided that you enabled this feature in your cluster. The feature allows cluster administers to explicitly define the ordering of pods -during graceful node shutdown based on [priority -classes](docs/concepts/scheduling-eviction/pod-priority-preemption/#priorityclass). +during graceful node shutdown based on +[priority classes](/docs/concepts/scheduling-eviction/pod-priority-preemption/#priorityclass). The [Graceful Node Shutdown](#graceful-node-shutdown) feature, as described above, shuts down pods in two phases, non-critical pods, followed by critical @@ -457,8 +474,8 @@ graceful node shutdown in multiple phases, each phase shutting down a particular priority class of pods. The kubelet can be configured with the exact phases and shutdown time per phase. -Assuming the following custom pod [priority -classes](docs/concepts/scheduling-eviction/pod-priority-preemption/#priorityclass) +Assuming the following custom pod +[priority classes](/docs/concepts/scheduling-eviction/pod-priority-preemption/#priorityclass) in a cluster, |Pod priority class name|Pod priority class value| @@ -492,7 +509,7 @@ shutdownGracePeriodByPodPriority: shutdownGracePeriodSeconds: 60 ``` -The above table implies that any pod with priority value >= 100000 will get +The above table implies that any pod with `priority` value >= 100000 will get just 10 seconds to stop, any pod with value >= 10000 and < 100000 will get 180 seconds to stop, any pod with value >= 1000 and < 10000 will get 120 seconds to stop. Finally, all other pods will get 60 seconds to stop. @@ -507,8 +524,8 @@ example, you could instead use these settings: | 0 |60 seconds | -In the above case, the pods with custom-class-b will go into the same bucket -as custom-class-c for shutdown. +In the above case, the pods with `custom-class-b` will go into the same bucket +as `custom-class-c` for shutdown. If there are no pods in a particular range, then the kubelet does not wait for pods in that priority range. Instead, the kubelet immediately skips to the @@ -577,3 +594,4 @@ see [KEP-2400](https://github.com/kubernetes/enhancements/issues/2400) and its * Read the [Node](https://git.k8s.io/community/contributors/design-proposals/architecture/architecture.md#the-kubernetes-node) section of the architecture design document. * Read about [taints and tolerations](/docs/concepts/scheduling-eviction/taint-and-toleration/). + diff --git a/content/en/docs/concepts/configuration/secret.md b/content/en/docs/concepts/configuration/secret.md index 82e8a5737d..d9611439a4 100644 --- a/content/en/docs/concepts/configuration/secret.md +++ b/content/en/docs/concepts/configuration/secret.md @@ -6,7 +6,8 @@ content_type: concept feature: title: Secret and configuration management description: > - Deploy and update secrets and application configuration without rebuilding your image and without exposing secrets in your stack configuration. + Deploy and update secrets and application configuration without rebuilding your image + and without exposing secrets in your stack configuration. weight: 30 --- @@ -22,8 +23,8 @@ application code. Because Secrets can be created independently of the Pods that use them, there is less risk of the Secret (and its data) being exposed during the workflow of creating, viewing, and editing Pods. Kubernetes, and applications that run in -your cluster, can also take additional precautions with Secrets, such as -avoiding writing confidential data to nonvolatile storage. +your cluster, can also take additional precautions with Secrets, such as avoiding +writing secret data to nonvolatile storage. Secrets are similar to {{< glossary_tooltip text="ConfigMaps" term_id="configmap" >}} but are specifically intended to hold confidential data. @@ -35,19 +36,21 @@ Additionally, anyone who is authorized to create a Pod in a namespace can use th In order to safely use Secrets, take at least the following steps: 1. [Enable Encryption at Rest](/docs/tasks/administer-cluster/encrypt-data/) for Secrets. -2. Enable or configure [RBAC rules](/docs/reference/access-authn-authz/authorization/) that - restrict reading data in Secrets (including via indirect means). -3. Where appropriate, also use mechanisms such as RBAC to limit which principals are allowed to create new Secrets or replace existing ones. +1. [Enable or configure RBAC rules](/docs/reference/access-authn-authz/authorization/) that + restrict reading and writing the Secret. Be aware that secrets can be obtained + implicitly by anyone with the permission to create a Pod. +1. Where appropriate, also use mechanisms such as RBAC to limit which principals are allowed + to create new Secrets or replace existing ones. {{< /caution >}} +See [Information security for Secrets](#information-security-for-secrets) for more details. + -## Overview of Secrets - -To use a Secret, a Pod needs to reference the Secret. -A Secret can be used with a Pod in three ways: +## Uses for Secrets +There are three main ways for a Pod to use a Secret: - As [files](#using-secrets-as-files-from-a-pod) in a {{< glossary_tooltip text="volume" term_id="volume" >}} mounted on one or more of its containers. @@ -58,8 +61,50 @@ The Kubernetes control plane also uses Secrets; for example, [bootstrap token Secrets](#bootstrap-token-secrets) are a mechanism to help automate node registration. +### Alternatives to Secrets + +Rather than using a Secret to protect confidential data, you can pick from alternatives. + +Here are some of your options: + +- if your cloud-native component needs to authenticate to another application that you + know is running within the same Kubernetes cluster, you can use a + [ServiceAccount](/docs/reference/access-authn-authz/authentication/#service-account-tokens) + and its tokens to identify your client. +- there are third-party tools that you can run, either within or outside your cluster, + that provide secrets management. For example, a service that Pods access over HTTPS, + that reveals a secret if the client correctly authenticates (for example, with a ServiceAccount + token). +- for authentication, you can implement a custom signer for X.509 certificates, and use + [CertificateSigningRequests](/docs/reference/access-authn-authz/certificate-signing-requests/) + to let that custom signer issue certificates to Pods that need them. +- you can use a [device plugin](/docs/concepts/extend-kubernetes/compute-storage-net/device-plugins/) + to expose node-local encryption hardware to a specific Pod. For example, you can schedule + trusted Pods onto nodes that provide a Trusted Platform Module, configured out-of-band. + +You can also combine two or more of those options, including the option to use Secret objects themselves. + +For example: implement (or deploy) an {{< glossary_tooltip text="operator" term_id="operator-pattern" >}} +that fetches short-lived session tokens from an external service, and then creates Secrets based +on those short-lived session tokens. Pods running in your cluster can make use of the session tokens, +and operator ensures they are valid. This separation means that you can run Pods that are unaware of +the exact mechanisms for issuing and refreshing those session tokens. + +## Working with Secrets + +### Creating a Secret + +There are several options to create a Secret: + +- [create Secret using `kubectl` command](/docs/tasks/configmap-secret/managing-secret-using-kubectl/) +- [create Secret from config file](/docs/tasks/configmap-secret/managing-secret-using-config-file/) +- [create Secret using kustomize](/docs/tasks/configmap-secret/managing-secret-using-kustomize/) + +#### Constraints on Secret names and data {#restriction-names-data} + The name of a Secret object must be a valid [DNS subdomain name](/docs/concepts/overview/working-with-objects/names#dns-subdomain-names). + You can specify the `data` and/or the `stringData` field when creating a configuration file for a Secret. The `data` and the `stringData` fields are optional. The values for all keys in the `data` field have to be base64-encoded strings. @@ -72,21 +117,732 @@ merged into the `data` field. If a key appears in both the `data` and the `stringData` field, the value specified in the `stringData` field takes precedence. +#### Size limit {#restriction-data-size} + +Individual secrets are limited to 1MiB in size. This is to discourage creation +of very large secrets that could exhaust the API server and kubelet memory. +However, creation of many smaller secrets could also exhaust memory. You can +use a [resource quota](/docs/concepts/policy/resource-quotas/) to limit the +number of Secrets (or other resources) in a namespace. + +### Editing a Secret + +You can edit an existing Secret using kubectl: + +```shell +kubectl edit secrets mysecret +``` + +This opens your default editor and allows you to update the base64 encoded Secret +values in the `data` field; for example: + +```yaml +# Please edit the object below. Lines beginning with a '#' will be ignored, +# and an empty file will abort the edit. If an error occurs while saving this file, it will be +# reopened with the relevant failures. +# +apiVersion: v1 +data: + username: YWRtaW4= + password: MWYyZDFlMmU2N2Rm +kind: Secret +metadata: + annotations: + kubectl.kubernetes.io/last-applied-configuration: { ... } + creationTimestamp: 2020-01-22T18:41:56Z + name: mysecret + namespace: default + resourceVersion: "164619" + uid: cfee02d6-c137-11e5-8d73-42010af00002 +type: Opaque +``` + +That example manifest defines a Secret with two keys in the `data` field: `username` and `password`. +The values are Base64 strings in the manifest; however, when you use the Secret with a Pod +then the kubelet provides the _decoded_ data to the Pod and its containers. + +You can package many keys and values into one Secret, or use many Secrets, whichever is convenient. + +### Using a Secret + +Secrets can be mounted as data volumes or exposed as +{{< glossary_tooltip text="environment variables" term_id="container-env-variables" >}} +to be used by a container in a Pod. Secrets can also be used by other parts of the +system, without being directly exposed to the Pod. For example, Secrets can hold +credentials that other parts of the system should use to interact with external +systems on your behalf. + +Secret volume sources are validated to ensure that the specified object +reference actually points to an object of type Secret. Therefore, a Secret +needs to be created before any Pods that depend on it. + +If the Secret cannot be fetched (perhaps because it does not exist, or +due to a temporary lack of connection to the API server) the kubelet +periodically retries running that Pod. The kubelet also reports an Event +for that Pod, including details of the problem fetching the Secret. + +#### Optional Secrets {#restriction-secret-must-exist} + +When you define a container environment variable based on a Secret, +you can mark it as _optional_. The default is for the Secret to be +required. + +None of a Pod's containers will start until all non-optional Secrets are +available. + +If a Pod references a specific key in a Secret and that Secret does exist, but +is missing the named key, the Pod fails during startup. + +### Using Secrets as files from a Pod {#using-secrets-as-files-from-a-pod} + +If you want to access data from a Secret in a Pod, one way to do that is to +have Kubernetes make the value of that Secret be available as a file inside +the filesystem of one or more of the Pod's containers. + +To configure that, you: + +1. Create a secret or use an existing one. Multiple Pods can reference the same secret. +1. Modify your Pod definition to add a volume under `.spec.volumes[]`. Name the volume anything, and have a `.spec.volumes[].secret.secretName` field equal to the name of the Secret object. +1. Add a `.spec.containers[].volumeMounts[]` to each container that needs the secret. Specify `.spec.containers[].volumeMounts[].readOnly = true` and `.spec.containers[].volumeMounts[].mountPath` to an unused directory name where you would like the secrets to appear. +1. Modify your image or command line so that the program looks for files in that directory. Each key in the secret `data` map becomes the filename under `mountPath`. + +This is an example of a Pod that mounts a Secret named `mysecret` in a volume: + +```yaml +apiVersion: v1 +kind: Pod +metadata: + name: mypod +spec: + containers: + - name: mypod + image: redis + volumeMounts: + - name: foo + mountPath: "/etc/foo" + readOnly: true + volumes: + - name: foo + secret: + secretName: mysecret + optional: false # default setting; "mysecret" must exist +``` + +Each Secret you want to use needs to be referred to in `.spec.volumes`. + +If there are multiple containers in the Pod, then each container needs its +own `volumeMounts` block, but only one `.spec.volumes` is needed per Secret. + +{{< note >}} +Versions of Kubernetes before v1.22 automatically created credentials for accessing +the Kubernetes API. This older mechanism was based on creating token Secrets that +could then be mounted into running Pods. +In more recent versions, including Kubernetes v{{< skew currentVersion >}}, API credentials +are obtained directly by using the [TokenRequest](/docs/reference/kubernetes-api/authentication-resources/token-request-v1/) API, +and are mounted into Pods using a [projected volume](/docs/reference/access-authn-authz/service-accounts-admin/#bound-service-account-token-volume). +The tokens obtained using this method have bounded lifetimes, and are automatically +invalidated when the Pod they are mounted into is deleted. + +You can still [manually create](/docs/tasks/configure-pod-container/configure-service-account/#manually-create-a-service-account-api-token) +a service account token Secret; for example, if you need a token that never expires. +However, using the [TokenRequest](/docs/reference/kubernetes-api/authentication-resources/token-request-v1/) +subresource to obtain a token to access the API is recommended instead. +{{< /note >}} + +#### Projection of Secret keys to specific paths + +You can also control the paths within the volume where Secret keys are projected. +You can use the `.spec.volumes[].secret.items` field to change the target path of each key: + +```yaml +apiVersion: v1 +kind: Pod +metadata: + name: mypod +spec: + containers: + - name: mypod + image: redis + volumeMounts: + - name: foo + mountPath: "/etc/foo" + readOnly: true + volumes: + - name: foo + secret: + secretName: mysecret + items: + - key: username + path: my-group/my-username +``` + +What will happen: + +* the `username` key from `mysecret` is available to the container at the path + `/etc/foo/my-group/my-username` instead of at `/etc/foo/username`. +* the `password` key from that Secret object is not projected. + +If `.spec.volumes[].secret.items` is used, only keys specified in `items` are projected. +To consume all keys from the Secret, all of them must be listed in the `items` field. + +If you list keys explicitly, then all listed keys must exist in the corresponding Secret. +Otherwise, the volume is not created. + +#### Secret files permissions + +You can set the POSIX file access permission bits for a single Secret key. +If you don't specify any permissions, `0644` is used by default. +You can also set a default mode for the entire Secret volume and override per key if needed. + +For example, you can specify a default mode like this: + +```yaml +apiVersion: v1 +kind: Pod +metadata: + name: mypod +spec: + containers: + - name: mypod + image: redis + volumeMounts: + - name: foo + mountPath: "/etc/foo" + volumes: + - name: foo + secret: + secretName: mysecret + defaultMode: 0400 +``` + +The secret is mounted on `/etc/foo`; all the files created by the +secret volume mount have permission `0400`. + + +{{< note >}} +If you're defining a Pod or a Pod template using JSON, beware that the JSON +specification doesn't support octal notation. You can use the decimal value +for the `defaultMode` (for example, 0400 in octal is 256 in decimal) instead. +If you're writing YAML, you can write the `defaultMode` in octal. +{{< /note >}} + +#### Consuming Secret values from volumes + +Inside the container that mounts a secret volume, the secret keys appear as +files. The secret values are base64 decoded and stored inside these files. + +This is the result of commands executed inside the container from the example above: + +```shell +ls /etc/foo/ +``` + +The output is similar to: + +``` +username +password +``` + +```shell +cat /etc/foo/username +``` + +The output is similar to: + +``` +admin +``` + +```shell +cat /etc/foo/password +``` + +The output is similar to: + +``` +1f2d1e2e67df +``` + +The program in a container is responsible for reading the secret data from these +files, as needed. + +#### Mounted Secrets are updated automatically + +When a volume contains data from a Secret, and that Secret is updated, Kubernetes tracks +this and updates the data in the volume, using an eventually-consistent approach. + +{{< note >}} +A container using a Secret as a +[subPath](/docs/concepts/storage/volumes#using-subpath) volume mount does not receive +automated Secret updates. +{{< /note >}} + +The kubelet keeps a cache of the current keys and values for the Secrets that are used in +volumes for pods on that node. +You can configure the way that the kubelet detects changes from the cached values. The `configMapAndSecretChangeDetectionStrategy` field in +the [kubelet configuration](/docs/reference/config-api/kubelet-config.v1beta1/) controls which strategy the kubelet uses. The default strategy is `Watch`. + +Updates to Secrets can be either propagated by an API watch mechanism (the default), based on +a cache with a defined time-to-live, or polled from the cluster API server on each kubelet +synchronisation loop. + +As a result, the total delay from the moment when the Secret is updated to the moment +when new keys are projected to the Pod can be as long as the kubelet sync period + cache +propagation delay, where the cache propagation delay depends on the chosen cache type +(following the same order listed in the previous paragraph, these are: +watch propagation delay, the configured cache TTL, or zero for direct polling). + +### Using Secrets as environment variables + +To use a Secret in an {{< glossary_tooltip text="environment variable" term_id="container-env-variables" >}} +in a Pod: + +1. Create a Secret (or use an existing one). Multiple Pods can reference the same Secret. +1. Modify your Pod definition in each container that you wish to consume the value of a secret + key to add an environment variable for each secret key you wish to consume. The environment + variable that consumes the secret key should populate the secret's name and key in `env[].valueFrom.secretKeyRef`. +1. Modify your image and/or command line so that the program looks for values in the specified + environment variables. + +This is an example of a Pod that uses a Secret via environment variables: + +```yaml +apiVersion: v1 +kind: Pod +metadata: + name: secret-env-pod +spec: + containers: + - name: mycontainer + image: redis + env: + - name: SECRET_USERNAME + valueFrom: + secretKeyRef: + name: mysecret + key: username + optional: false # same as default; "mysecret" must exist + # and include a key named "username" + - name: SECRET_PASSWORD + valueFrom: + secretKeyRef: + name: mysecret + key: password + optional: false # same as default; "mysecret" must exist + # and include a key named "password" + restartPolicy: Never +``` + + +#### Invalid environment variables {#restriction-env-from-invalid} + +Secrets used to populate environment variables by the `envFrom` field that have keys +that are considered invalid environment variable names will have those keys +skipped. The Pod is allowed to start. + +If you define a Pod with an invalid variable name, the failed Pod startup includes +an event with the reason set to `InvalidVariableNames` and a message that lists the +skipped invalid keys. The following example shows a Pod that refers to a Secret +named `mysecret`, where `mysecret` contains 2 invalid keys: `1badkey` and `2alsobad`. + +```shell +kubectl get events +``` + +The output is similar to: + +``` +LASTSEEN FIRSTSEEN COUNT NAME KIND SUBOBJECT TYPE REASON +0s 0s 1 dapi-test-pod Pod Warning InvalidEnvironmentVariableNames kubelet, 127.0.0.1 Keys [1badkey, 2alsobad] from the EnvFrom secret default/mysecret were skipped since they are considered invalid environment variable names. +``` + + +#### Consuming Secret values from environment variables + +Inside a container that consumes a Secret using environment variables, the secret keys appear +as normal environment variables. The values of those variables are the base64 decoded values +of the secret data. + +This is the result of commands executed inside the container from the example above: + +```shell +echo "$SECRET_USERNAME" +``` + +The output is similar to: + +``` +admin +``` + +```shell +echo "$SECRET_PASSWORD" +``` + +The output is similar to: + +``` +1f2d1e2e67df +``` + +{{< note >}} +If a container already consumes a Secret in an environment variable, +a Secret update will not be seen by the container unless it is +restarted. There are third party solutions for triggering restarts when +secrets change. +{{< /note >}} + +### Container image pull secrets {#using-imagepullsecrets} + +If you want to fetch container images from a private repository, you need a way for +the kubelet on each node to authenticate to that repository. You can configure +_image pull secrets_ to make this possible. These secrets are configured at the Pod +level. + +The `imagePullSecrets` field for a Pod is a list of references to Secrets in the same namespace +as the Pod. +You can use an `imagePullSecrets` to pass image registry access credentials to +the kubelet. The kubelet uses this information to pull a private image on behalf of your Pod. +See `PodSpec` in the [Pod API reference](/docs/reference/kubernetes-api/workload-resources/pod-v1/#PodSpec) +for more information about the `imagePullSecrets` field. + +#### Using imagePullSecrets + +The `imagePullSecrets` field is a list of references to secrets in the same namespace. +You can use an `imagePullSecrets` to pass a secret that contains a Docker (or other) image registry +password to the kubelet. The kubelet uses this information to pull a private image on behalf of your Pod. +See the [PodSpec API](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#podspec-v1-core) for more information about the `imagePullSecrets` field. + +##### Manually specifying an imagePullSecret + +You can learn how to specify `imagePullSecrets` from the [container images](/docs/concepts/containers/images/#specifying-imagepullsecrets-on-a-pod) +documentation. + +##### Arranging for imagePullSecrets to be automatically attached + +You can manually create `imagePullSecrets`, and reference these from +a ServiceAccount. Any Pods created with that ServiceAccount +or created with that ServiceAccount by default, will get their `imagePullSecrets` +field set to that of the service account. +See [Add ImagePullSecrets to a service account](/docs/tasks/configure-pod-container/configure-service-account/#add-imagepullsecrets-to-a-service-account) + for a detailed explanation of that process. + +### Using Secrets with static Pods {#restriction-static-pod} + +You cannot use ConfigMaps or Secrets with +{{< glossary_tooltip text="static Pods" term_id="static-pod" >}}. + +## Use cases + +### Use case: As container environment variables + +Create a secret +```yaml +apiVersion: v1 +kind: Secret +metadata: + name: mysecret +type: Opaque +data: + USER_NAME: YWRtaW4= + PASSWORD: MWYyZDFlMmU2N2Rm +``` + +Create the Secret: +```shell +kubectl apply -f mysecret.yaml +``` + +Use `envFrom` to define all of the Secret's data as container environment variables. The key from the Secret becomes the environment variable name in the Pod. + +```yaml +apiVersion: v1 +kind: Pod +metadata: + name: secret-test-pod +spec: + containers: + - name: test-container + image: k8s.gcr.io/busybox + command: [ "/bin/sh", "-c", "env" ] + envFrom: + - secretRef: + name: mysecret + restartPolicy: Never +``` + +### Use case: Pod with SSH keys + +Create a Secret containing some SSH keys: + +```shell +kubectl create secret generic ssh-key-secret --from-file=ssh-privatekey=/path/to/.ssh/id_rsa --from-file=ssh-publickey=/path/to/.ssh/id_rsa.pub +``` + +The output is similar to: + +``` +secret "ssh-key-secret" created +``` + +You can also create a `kustomization.yaml` with a `secretGenerator` field containing ssh keys. + +{{< caution >}} +Think carefully before sending your own SSH keys: other users of the cluster may have access +to the Secret. + +You could instead create an SSH private key representing a service identity that you want to be +accessible to all the users with whom you share the Kubernetes cluster, and that you can revoke +if the credentials are compromised. +{{< /caution >}} + +Now you can create a Pod which references the secret with the SSH key and +consumes it in a volume: + +```yaml +apiVersion: v1 +kind: Pod +metadata: + name: secret-test-pod + labels: + name: secret-test +spec: + volumes: + - name: secret-volume + secret: + secretName: ssh-key-secret + containers: + - name: ssh-test-container + image: mySshImage + volumeMounts: + - name: secret-volume + readOnly: true + mountPath: "/etc/secret-volume" +``` + +When the container's command runs, the pieces of the key will be available in: + +``` +/etc/secret-volume/ssh-publickey +/etc/secret-volume/ssh-privatekey +``` + +The container is then free to use the secret data to establish an SSH connection. + +### Use case: Pods with prod / test credentials + +This example illustrates a Pod which consumes a secret containing production +credentials and another Pod which consumes a secret with test environment +credentials. + +You can create a `kustomization.yaml` with a `secretGenerator` field or run +`kubectl create secret`. + +```shell +kubectl create secret generic prod-db-secret --from-literal=username=produser --from-literal=password=Y4nys7f11 +``` + +The output is similar to: + +``` +secret "prod-db-secret" created +``` + +You can also create a secret for test environment credentials. + +```shell +kubectl create secret generic test-db-secret --from-literal=username=testuser --from-literal=password=iluvtests +``` + +The output is similar to: + +``` +secret "test-db-secret" created +``` + +{{< note >}} +Special characters such as `$`, `\`, `*`, `=`, and `!` will be interpreted by your [shell](https://en.wikipedia.org/wiki/Shell_(computing)) and require escaping. + +In most shells, the easiest way to escape the password is to surround it with single quotes (`'`). +For example, if your actual password is `S!B\*d$zDsb=`, you should execute the command this way: + +```shell +kubectl create secret generic dev-db-secret --from-literal=username=devuser --from-literal=password='S!B\*d$zDsb=' +``` + +You do not need to escape special characters in passwords from files (`--from-file`). +{{< /note >}} + +Now make the Pods: + +```shell +cat < pod.yaml +apiVersion: v1 +kind: List +items: +- kind: Pod + apiVersion: v1 + metadata: + name: prod-db-client-pod + labels: + name: prod-db-client + spec: + volumes: + - name: secret-volume + secret: + secretName: prod-db-secret + containers: + - name: db-client-container + image: myClientImage + volumeMounts: + - name: secret-volume + readOnly: true + mountPath: "/etc/secret-volume" +- kind: Pod + apiVersion: v1 + metadata: + name: test-db-client-pod + labels: + name: test-db-client + spec: + volumes: + - name: secret-volume + secret: + secretName: test-db-secret + containers: + - name: db-client-container + image: myClientImage + volumeMounts: + - name: secret-volume + readOnly: true + mountPath: "/etc/secret-volume" +EOF +``` + +Add the pods to the same `kustomization.yaml`: + +```shell +cat <> kustomization.yaml +resources: +- pod.yaml +EOF +``` + +Apply all those objects on the API server by running: + +```shell +kubectl apply -k . +``` + +Both containers will have the following files present on their filesystems with the values +for each container's environment: + +``` +/etc/secret-volume/username +/etc/secret-volume/password +``` + +Note how the specs for the two Pods differ only in one field; this facilitates +creating Pods with different capabilities from a common Pod template. + +You could further simplify the base Pod specification by using two service accounts: + +1. `prod-user` with the `prod-db-secret` +1. `test-user` with the `test-db-secret` + +The Pod specification is shortened to: + +```yaml +apiVersion: v1 +kind: Pod +metadata: + name: prod-db-client-pod + labels: + name: prod-db-client +spec: + serviceAccount: prod-db-client + containers: + - name: db-client-container + image: myClientImage +``` + +### Use case: dotfiles in a secret volume + +You can make your data "hidden" by defining a key that begins with a dot. +This key represents a dotfile or "hidden" file. For example, when the following secret +is mounted into a volume, `secret-volume`: + +```yaml +apiVersion: v1 +kind: Secret +metadata: + name: dotfile-secret +data: + .secret-file: dmFsdWUtMg0KDQo= +--- +apiVersion: v1 +kind: Pod +metadata: + name: secret-dotfiles-pod +spec: + volumes: + - name: secret-volume + secret: + secretName: dotfile-secret + containers: + - name: dotfile-test-container + image: k8s.gcr.io/busybox + command: + - ls + - "-l" + - "/etc/secret-volume" + volumeMounts: + - name: secret-volume + readOnly: true + mountPath: "/etc/secret-volume" +``` + +The volume will contain a single file, called `.secret-file`, and +the `dotfile-test-container` will have this file present at the path +`/etc/secret-volume/.secret-file`. + +{{< note >}} +Files beginning with dot characters are hidden from the output of `ls -l`; +you must use `ls -la` to see them when listing directory contents. +{{< /note >}} + +### Use case: Secret visible to one container in a Pod + +Consider a program that needs to handle HTTP requests, do some complex business +logic, and then sign some messages with an HMAC. Because it has complex +application logic, there might be an unnoticed remote file reading exploit in +the server, which could expose the private key to an attacker. + +This could be divided into two processes in two containers: a frontend container +which handles user interaction and business logic, but which cannot see the +private key; and a signer container that can see the private key, and responds +to simple signing requests from the frontend (for example, over localhost networking). + +With this partitioned approach, an attacker now has to trick the application +server into doing something rather arbitrary, which may be harder than getting +it to read a file. + ## Types of Secret {#secret-types} When creating a Secret, you can specify its type using the `type` field of -a Secret resource, or certain equivalent `kubectl` command line flags (if available). -The `type` of a Secret is used to facilitate programmatic handling of different -kinds of confidential data. +the [Secret](/docs/reference/kubernetes-api/config-and-storage-resources/secret-v1/) +resource, or certain equivalent `kubectl` command line flags (if available). +The Secret type is used to facilitate programmatic handling of the Secret data. -Kubernetes provides several builtin types for some common usage scenarios. +Kubernetes provides several built-in types for some common usage scenarios. These types vary in terms of the validations performed and the constraints Kubernetes imposes on them. -| Builtin Type | Usage | +| Built-in Type | Usage | |--------------|-------| | `Opaque` | arbitrary user-defined data | -| `kubernetes.io/service-account-token` | service account token | +| `kubernetes.io/service-account-token` | ServiceAccount token | | `kubernetes.io/dockercfg` | serialized `~/.dockercfg` file | | `kubernetes.io/dockerconfigjson` | serialized `~/.docker/config.json` file | | `kubernetes.io/basic-auth` | credentials for basic authentication | @@ -95,11 +851,16 @@ Kubernetes imposes on them. | `bootstrap.kubernetes.io/token` | bootstrap token data | You can define and use your own Secret type by assigning a non-empty string as the -`type` value for a Secret object. An empty string is treated as an `Opaque` type. +`type` value for a Secret object (an empty string is treated as an `Opaque` type). + Kubernetes doesn't impose any constraints on the type name. However, if you -are using one of the builtin types, you must meet all the requirements defined +are using one of the built-in types, you must meet all the requirements defined for that type. +If you are defining a type of secret that's for public use, follow the convention +and structure the secret type to have your domain name before the name, separated +by a `/`. For example: `cloud-hosting.example.net/cloud-api-credentials`. + ### Opaque secrets `Opaque` is the default Secret type if omitted from a Secret configuration file. @@ -120,16 +881,20 @@ empty-secret Opaque 0 2m6s ``` The `DATA` column shows the number of data items stored in the Secret. -In this case, `0` means we have created an empty Secret. +In this case, `0` means you have created an empty Secret. -### Service account token Secrets +### Service account token Secrets A `kubernetes.io/service-account-token` type of Secret is used to store a -token that identifies a service account. When using this Secret type, you need -to ensure that the `kubernetes.io/service-account.name` annotation is set to an -existing service account name. A Kubernetes controller fills in some other -fields such as the `kubernetes.io/service-account.uid` annotation and the -`token` key in the `data` field set to actual token content. +token that identifies a +{{< glossary_tooltip text="service account" term_id="service-account" >}}. +When using this Secret type, you need to ensure that the +`kubernetes.io/service-account.name` annotation is set to an existing +service account name. A Kubernetes +{{< glossary_tooltip text="controller" term_id="controller" >}} fills in some +other fields such as the `kubernetes.io/service-account.uid` annotation, and the +`token` key in the `data` field, which is set to contain an authentication +token. The following example configuration declares a service account token Secret: @@ -146,31 +911,25 @@ data: extra: YmFyCg== ``` +When creating a `Pod`, Kubernetes automatically finds or creates a service account +Secret and then automatically modifies your Pod to use this Secret. The service account +token Secret contains credentials for accessing the Kubernetes API. + +The automatic creation and use of API credentials can be disabled or +overridden if desired. However, if all you need to do is securely access the +API server, this is the recommended workflow. + See the [ServiceAccount](/docs/tasks/configure-pod-container/configure-service-account/) -documentation for more information on how service accounts work. +documentation for more information on how service accounts work. You can also check the `automountServiceAccountToken` field and the `serviceAccountName` field of the [`Pod`](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#pod-v1-core) for information on referencing service account from Pods. -{{< note >}} -Automatic creation of API credentials in secrets to mount into running pods -is no longer used in v1.22 and newer versions. Instead, API credentials are -obtained directly by using the [TokenRequest](/docs/reference/kubernetes-api/authentication-resources/token-request-v1/) API, -and are mounted into Pods using a [projected volume](/docs/reference/access-authn-authz/service-accounts-admin/#bound-service-account-token-volume). -The tokens obtained using this method have bounded lifetimes, and are automatically -invalidated when the Pod they are mounted into is deleted. - -Service account token secrets can still be [created manually](/docs/tasks/configure-pod-container/configure-service-account/#manually-create-a-service-account-api-token) -if you need a token that never expires. -However, using the [TokenRequest](/docs/reference/kubernetes-api/authentication-resources/token-request-v1/) -subresource to obtain a token to access the API is recommended instead. -{{< /note >}} - ### Docker config Secrets You can use one of the following `type` values to create a Secret to -store the credentials for accessing a Docker registry for images. +store the credentials for accessing a container image registry: - `kubernetes.io/dockercfg` - `kubernetes.io/dockerconfigjson` @@ -207,59 +966,64 @@ If you do not want to perform the base64 encoding, you can choose to use the {{< /note >}} When you create these types of Secrets using a manifest, the API -server checks whether the expected key does exists in the `data` field, and +server checks whether the expected key exists in the `data` field, and it verifies if the value provided can be parsed as a valid JSON. The API server doesn't validate if the JSON actually is a Docker config file. When you do not have a Docker config file, or you want to use `kubectl` -to create a Docker registry Secret, you can do: +to create a Secret for accessing a container registry, you can do: ```shell kubectl create secret docker-registry secret-tiger-docker \ + --docker-email=tiger@acme.example \ --docker-username=tiger \ - --docker-password=pass113 \ - --docker-email=tiger@acme.com \ + --docker-password=pass1234 \ --docker-server=my-registry.example:5000 ``` -This command creates a Secret of type `kubernetes.io/dockerconfigjson`. -If you dump the `.dockerconfigjson` content from the `data` field, you will -get the following JSON content which is a valid Docker configuration created -on the fly: +That command creates a Secret of type `kubernetes.io/dockerconfigjson`. +If you dump the `.data.dockercfgjson` field from that new Secret and then +decode it from base64: + +```shell +kubectl get secret secret-tiger-docker -o jsonpath='{.data.*}' | base64 -d +``` + +then the output is equivalent to this JSON document (which is also a valid +Docker configuration file): ```json { - "apiVersion": "v1", - "data": { - ".dockerconfigjson": "eyJhdXRocyI6eyJteS1yZWdpc3RyeTo1MDAwIjp7InVzZXJuYW1lIjoidGlnZXIiLCJwYXNzd29yZCI6InBhc3MxMTMiLCJlbWFpbCI6InRpZ2VyQGFjbWUuY29tIiwiYXV0aCI6ImRHbG5aWEk2Y0dGemN6RXhNdz09In19fQ==" - }, - "kind": "Secret", - "metadata": { - "creationTimestamp": "2021-07-01T07:30:59Z", - "name": "secret-tiger-docker", - "namespace": "default", - "resourceVersion": "566718", - "uid": "e15c1d7b-9071-4100-8681-f3a7a2ce89ca" - }, - "type": "kubernetes.io/dockerconfigjson" + "auths": { + "my-registry.example:5000": { + "username": "tiger", + "password": "pass1234", + "email": "tiger@acme.example", + "auth": "dGlnZXI6cGFzczEyMzQ=" + } + } } - ``` +{{< note >}} +The `auth` value there is base64 encoded; it is obscured but not secret. +Anyone who can read that Secret can learn the registry access bearer token. +{{< /note >}} + ### Basic authentication Secret The `kubernetes.io/basic-auth` type is provided for storing credentials needed for basic authentication. When using this Secret type, the `data` field of the Secret must contain one of the following two keys: -- `username`: the user name for authentication; -- `password`: the password or token for authentication. +- `username`: the user name for authentication +- `password`: the password or token for authentication Both values for the above two keys are base64 encoded strings. You can, of course, provide the clear text content using the `stringData` for Secret creation. -The following YAML is an example config for a basic authentication Secret: +The following manifest is an example of a basic authentication Secret: ```yaml apiVersion: v1 @@ -268,15 +1032,17 @@ metadata: name: secret-basic-auth type: kubernetes.io/basic-auth stringData: - username: admin - password: t0p-Secret + username: admin # required field for kubernetes.io/basic-auth + password: t0p-Secret # required field for kubernetes.io/basic-auth ``` -The basic authentication Secret type is provided only for user's convenience. -You can create an `Opaque` for credentials used for basic authentication. -However, using the builtin Secret type helps unify the formats of your credentials -and the API server does verify if the required keys are provided in a Secret -configuration. +The basic authentication Secret type is provided only for convenience. +You can create an `Opaque` type for credentials used for basic authentication. +However, using the defined and public Secret type (`kubernetes.io/basic-auth`) helps other +people to understand the purpose of your Secret, and sets a convention for what key names +to expect. +The Kubernetes API verifies that the required keys are set for a Secret +of this type. ### SSH authentication secrets @@ -285,7 +1051,8 @@ SSH authentication. When using this Secret type, you will have to specify a `ssh-privatekey` key-value pair in the `data` (or `stringData`) field as the SSH credential to use. -The following YAML is an example config for a SSH authentication Secret: +The following manifest is an example of a Secret used for SSH public/private +key authentication: ```yaml apiVersion: v1 @@ -300,8 +1067,10 @@ data: ``` The SSH authentication Secret type is provided only for user's convenience. -You can create an `Opaque` for credentials used for SSH authentication. -However, using the builtin Secret type helps unify the formats of your credentials +You could instead create an `Opaque` type Secret for credentials used for SSH authentication. +However, using the defined and public Secret type (`kubernetes.io/ssh-auth`) helps other +people to understand the purpose of your Secret, and sets a convention for what key names +to expect. and the API server does verify if the required keys are provided in a Secret configuration. @@ -315,9 +1084,11 @@ ConfigMap. ### TLS secrets Kubernetes provides a builtin Secret type `kubernetes.io/tls` for storing -a certificate and its associated key that are typically used for TLS . This -data is primarily used with TLS termination of the Ingress resource, but may -be used with other resources or directly by a workload. +a certificate and its associated key that are typically used for TLS. + +One common use for TLS secrets is to configure encryption in transit for +an [Ingress](/docs/concepts/services-networking/ingress/), but you can also use it +with other resources or directly in your workload. When using this type of Secret, the `tls.key` and the `tls.crt` key must be provided in the `data` (or `stringData`) field of the Secret configuration, although the API server doesn't actually validate the values for each key. @@ -352,20 +1123,28 @@ kubectl create secret tls my-tls-secret \ --key=path/to/key/file ``` -The public/private key pair must exist beforehand. The public key certificate -for `--cert` must be .PEM encoded (Base64-encoded DER format), and match the -given private key for `--key`. -The private key must be in what is commonly called PEM private key format, -unencrypted. In both cases, the initial and the last lines from PEM (for -example, `--------BEGIN CERTIFICATE-----` and `-------END CERTIFICATE----` for -a certificate) are *not* included. +The public/private key pair must exist before hand. The public key certificate +for `--cert` must be DER format as per +[Section 5.1 of RFC 7468](https://datatracker.ietf.org/doc/html/rfc7468#section-5.1), +and must match the given private key for `--key` (PKCS #8 in DER format; +[Section 11 of RFC 7468](https://datatracker.ietf.org/doc/html/rfc7468#section-11)). + +{{< note >}} +A kubernetes.io/tls Secret stores the Base64-encoded DER data for keys and +certificates. If you're familiar with PEM format for private keys and for certificates, +the base64 data are the same as that format except that you omit +the initial and the last lines that are used in PEM. + +For example, for a certificate, you do **not** include `--------BEGIN CERTIFICATE-----` +and `-------END CERTIFICATE----`. +{{< /note >}} ### Bootstrap token Secrets A bootstrap token Secret can be created by explicitly specifying the Secret `type` to `bootstrap.kubernetes.io/token`. This type of Secret is designed for tokens used during the node bootstrap process. It stores tokens used to sign -well known ConfigMaps. +well-known ConfigMaps. A bootstrap token Secret is usually created in the `kube-system` namespace and named in the form `bootstrap-token-` where `` is a 6 character @@ -427,367 +1206,23 @@ stringData: usage-bootstrap-signing: "true" ``` -## Creating a Secret - -There are several options to create a Secret: - -- [create Secret using `kubectl` command](/docs/tasks/configmap-secret/managing-secret-using-kubectl/) -- [create Secret from config file](/docs/tasks/configmap-secret/managing-secret-using-config-file/) -- [create Secret using kustomize](/docs/tasks/configmap-secret/managing-secret-using-kustomize/) - -## Editing a Secret - -An existing Secret may be edited with the following command: - -```shell -kubectl edit secrets mysecret -``` - -This will open the default configured editor and allow for updating the base64 encoded Secret values in the `data` field: - -```yaml -# Please edit the object below. Lines beginning with a '#' will be ignored, -# and an empty file will abort the edit. If an error occurs while saving this file will be -# reopened with the relevant failures. -# -apiVersion: v1 -data: - username: YWRtaW4= - password: MWYyZDFlMmU2N2Rm -kind: Secret -metadata: - annotations: - kubectl.kubernetes.io/last-applied-configuration: { ... } - creationTimestamp: 2016-01-22T18:41:56Z - name: mysecret - namespace: default - resourceVersion: "164619" - uid: cfee02d6-c137-11e5-8d73-42010af00002 -type: Opaque -``` - -## Using Secrets - -Secrets can be mounted as data volumes or exposed as -{{< glossary_tooltip text="environment variables" term_id="container-env-variables" >}} -to be used by a container in a Pod. Secrets can also be used by other parts of the -system, without being directly exposed to the Pod. For example, Secrets can hold -credentials that other parts of the system should use to interact with external -systems on your behalf. - -### Using Secrets as files from a Pod - -To consume a Secret in a volume in a Pod: - -1. Create a secret or use an existing one. Multiple Pods can reference the same secret. -1. Modify your Pod definition to add a volume under `.spec.volumes[]`. Name the volume anything, and have a `.spec.volumes[].secret.secretName` field equal to the name of the Secret object. -1. Add a `.spec.containers[].volumeMounts[]` to each container that needs the secret. Specify `.spec.containers[].volumeMounts[].readOnly = true` and `.spec.containers[].volumeMounts[].mountPath` to an unused directory name where you would like the secrets to appear. -1. Modify your image or command line so that the program looks for files in that directory. Each key in the secret `data` map becomes the filename under `mountPath`. - -This is an example of a Pod that mounts a Secret in a volume: - -```yaml -apiVersion: v1 -kind: Pod -metadata: - name: mypod -spec: - containers: - - name: mypod - image: redis - volumeMounts: - - name: foo - mountPath: "/etc/foo" - readOnly: true - volumes: - - name: foo - secret: - secretName: mysecret -``` - -Each Secret you want to use needs to be referred to in `.spec.volumes`. - -If there are multiple containers in the Pod, then each container needs its -own `volumeMounts` block, but only one `.spec.volumes` is needed per Secret. - -You can package many files into one secret, or use many secrets, whichever is convenient. - -#### Projection of Secret keys to specific paths - -You can also control the paths within the volume where Secret keys are projected. -You can use the `.spec.volumes[].secret.items` field to change the target path of each key: - -```yaml -apiVersion: v1 -kind: Pod -metadata: - name: mypod -spec: - containers: - - name: mypod - image: redis - volumeMounts: - - name: foo - mountPath: "/etc/foo" - readOnly: true - volumes: - - name: foo - secret: - secretName: mysecret - items: - - key: username - path: my-group/my-username -``` - -What will happen: - -* `username` secret is stored under `/etc/foo/my-group/my-username` file instead of `/etc/foo/username`. -* `password` secret is not projected. - -If `.spec.volumes[].secret.items` is used, only keys specified in `items` are projected. -To consume all keys from the secret, all of them must be listed in the `items` field. -All listed keys must exist in the corresponding secret. Otherwise, the volume is not created. - -#### Secret files permissions - -You can set the file access permission bits for a single Secret key. -If you don't specify any permissions, `0644` is used by default. -You can also set a default mode for the entire Secret volume and override per key if needed. - -For example, you can specify a default mode like this: - -```yaml -apiVersion: v1 -kind: Pod -metadata: - name: mypod -spec: - containers: - - name: mypod - image: redis - volumeMounts: - - name: foo - mountPath: "/etc/foo" - volumes: - - name: foo - secret: - secretName: mysecret - defaultMode: 0400 -``` - -Then, the secret will be mounted on `/etc/foo` and all the files created by the -secret volume mount will have permission `0400`. - -Note that the JSON spec doesn't support octal notation, so use the value 256 for -0400 permissions. If you use YAML instead of JSON for the Pod, you can use octal -notation to specify permissions in a more natural way. - -Note if you `kubectl exec` into the Pod, you need to follow the symlink to find -the expected file mode. For example, - -Check the secrets file mode on the pod. -``` -kubectl exec mypod -it sh - -cd /etc/foo -ls -l -``` - -The output is similar to this: -``` -total 0 -lrwxrwxrwx 1 root root 15 May 18 00:18 password -> ..data/password -lrwxrwxrwx 1 root root 15 May 18 00:18 username -> ..data/username -``` - -Follow the symlink to find the correct file mode. - -``` -cd /etc/foo/..data -ls -l -``` - -The output is similar to this: -``` -total 8 --r-------- 1 root root 12 May 18 00:18 password --r-------- 1 root root 5 May 18 00:18 username -``` - -You can also use mapping, as in the previous example, and specify different -permissions for different files like this: - -```yaml -apiVersion: v1 -kind: Pod -metadata: - name: mypod -spec: - containers: - - name: mypod - image: redis - volumeMounts: - - name: foo - mountPath: "/etc/foo" - volumes: - - name: foo - secret: - secretName: mysecret - items: - - key: username - path: my-group/my-username - mode: 0777 -``` - -In this case, the file resulting in `/etc/foo/my-group/my-username` will have -permission value of `0777`. If you use JSON, owing to JSON limitations, you -must specify the mode in decimal notation, `511`. - -Note that this permission value might be displayed in decimal notation if you -read it later. - -#### Consuming Secret values from volumes - -Inside the container that mounts a secret volume, the secret keys appear as -files and the secret values are base64 decoded and stored inside these files. -This is the result of commands executed inside the container from the example above: - -```shell -ls /etc/foo/ -``` - -The output is similar to: - -``` -username -password -``` - -```shell -cat /etc/foo/username -``` - -The output is similar to: - -``` -admin -``` - -```shell -cat /etc/foo/password -``` - -The output is similar to: - -``` -1f2d1e2e67df -``` - -The program in a container is responsible for reading the secrets from the -files. - -#### Mounted Secrets are updated automatically - -When a secret currently consumed in a volume is updated, projected keys are eventually updated as well. -The kubelet checks whether the mounted secret is fresh on every periodic sync. -However, the kubelet uses its local cache for getting the current value of the Secret. -The type of the cache is configurable using the `ConfigMapAndSecretChangeDetectionStrategy` field in -the [KubeletConfiguration struct](/docs/reference/config-api/kubelet-config.v1beta1/). -A Secret can be either propagated by watch (default), ttl-based, or by redirecting -all requests directly to the API server. -As a result, the total delay from the moment when the Secret is updated to the moment -when new keys are projected to the Pod can be as long as the kubelet sync period + cache -propagation delay, where the cache propagation delay depends on the chosen cache type -(it equals to watch propagation delay, ttl of cache, or zero correspondingly). - -{{< note >}} -A container using a Secret as a -[subPath](/docs/concepts/storage/volumes#using-subpath) volume mount will not receive -Secret updates. -{{< /note >}} - -### Using Secrets as environment variables - -To use a secret in an {{< glossary_tooltip text="environment variable" term_id="container-env-variables" >}} -in a Pod: - -1. Create a secret or use an existing one. Multiple Pods can reference the same secret. -1. Modify your Pod definition in each container that you wish to consume the value of a secret key to add an environment variable for each secret key you wish to consume. The environment variable that consumes the secret key should populate the secret's name and key in `env[].valueFrom.secretKeyRef`. -1. Modify your image and/or command line so that the program looks for values in the specified environment variables. - -This is an example of a Pod that uses secrets from environment variables: - -```yaml -apiVersion: v1 -kind: Pod -metadata: - name: secret-env-pod -spec: - containers: - - name: mycontainer - image: redis - env: - - name: SECRET_USERNAME - valueFrom: - secretKeyRef: - name: mysecret - key: username - - name: SECRET_PASSWORD - valueFrom: - secretKeyRef: - name: mysecret - key: password - restartPolicy: Never -``` - -#### Consuming Secret Values from environment variables - -Inside a container that consumes a secret in the environment variables, the secret keys appear as -normal environment variables containing the base64 decoded values of the secret data. -This is the result of commands executed inside the container from the example above: - -```shell -echo $SECRET_USERNAME -``` - -The output is similar to: - -``` -admin -``` - -```shell -echo $SECRET_PASSWORD -``` - -The output is similar to: - -``` -1f2d1e2e67df -``` - -#### Environment variables are not updated after a secret update - -If a container already consumes a Secret in an environment variable, a Secret update will not be seen by the container unless it is restarted. -There are third party solutions for triggering restarts when secrets change. ## Immutable Secrets {#secret-immutable} {{< feature-state for_k8s_version="v1.21" state="stable" >}} -The Kubernetes feature _Immutable Secrets and ConfigMaps_ provides an option to set -individual Secrets and ConfigMaps as immutable. For clusters that extensively use Secrets -(at least tens of thousands of unique Secret to Pod mounts), preventing changes to their -data has the following advantages: +Kubernetes lets you mark specific Secrets (and ConfigMaps) as _immutable_. +Preventing changes to the data of an existing Secret has the following benefits: - protects you from accidental (or unwanted) updates that could cause applications outages -- improves performance of your cluster by significantly reducing load on kube-apiserver, by -closing watches for secrets marked as immutable. +- (for clusters that extensively use Secrets - at least tens of thousands of unique Secret + to Pod mounts), switching to immutable Secrets improves the performance of your cluster + by significantly reducing load on kube-apiserver. The kubelet does not need to maintain + a [watch] on any Secrets that are marked as immutable. -This feature is controlled by the `ImmutableEphemeralVolumes` -[feature gate](/docs/reference/command-line-tools-reference/feature-gates/), -which is enabled by default since v1.19. You can create an immutable -Secret by setting the `immutable` field to `true`. For example, +### Marking a Secret as immutable {#secret-immutable-create} + +You can create an immutable Secret by setting the `immutable` field to `true`. For example, ```yaml apiVersion: v1 kind: Secret @@ -798,6 +1233,8 @@ data: immutable: true ``` +You can also update any existing mutable Secret to make it immutable. + {{< note >}} Once a Secret or ConfigMap is marked as immutable, it is _not_ possible to revert this change nor to mutate the contents of the `data` field. You can only delete and recreate the Secret. @@ -805,388 +1242,10 @@ Existing Pods maintain a mount point to the deleted Secret - it is recommended t these pods. {{< /note >}} -### Using imagePullSecrets +## Information security for Secrets -The `imagePullSecrets` field is a list of references to secrets in the same namespace. -You can use an `imagePullSecrets` to pass a secret that contains a Docker (or other) image registry -password to the kubelet. The kubelet uses this information to pull a private image on behalf of your Pod. -See the [PodSpec API](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#podspec-v1-core) for more information about the `imagePullSecrets` field. - -#### Manually specifying an imagePullSecret - -You can learn how to specify `ImagePullSecrets` from the [container images documentation](/docs/concepts/containers/images/#specifying-imagepullsecrets-on-a-pod). - -### Arranging for imagePullSecrets to be automatically attached - -You can manually create `imagePullSecrets`, and reference it from -a ServiceAccount. Any Pods created with that ServiceAccount -or created with that ServiceAccount by default, will get their `imagePullSecrets` -field set to that of the service account. -See [Add ImagePullSecrets to a service account](/docs/tasks/configure-pod-container/configure-service-account/#add-imagepullsecrets-to-a-service-account) - for a detailed explanation of that process. - -## Details - -### Restrictions - -Secret volume sources are validated to ensure that the specified object -reference actually points to an object of type Secret. Therefore, a secret -needs to be created before any Pods that depend on it. - -Secret resources reside in a {{< glossary_tooltip text="namespace" term_id="namespace" >}}. -Secrets can only be referenced by Pods in that same namespace. - -Individual secrets are limited to 1MiB in size. This is to discourage creation -of very large secrets which would exhaust the API server and kubelet memory. -However, creation of many smaller secrets could also exhaust memory. More -comprehensive limits on memory usage due to secrets is a planned feature. - -The kubelet only supports the use of secrets for Pods where the secrets -are obtained from the API server. -This includes any Pods created using `kubectl`, or indirectly via a replication -controller. It does not include Pods created as a result of the kubelet -`--manifest-url` flag, its `--config` flag, or its REST API (these are -not common ways to create Pods). -The `spec` of a {{< glossary_tooltip text="static Pod" term_id="static-pod" >}} cannot refer to a Secret -or any other API objects. - - -Secrets must be created before they are consumed in Pods as environment -variables unless they are marked as optional. References to secrets that do -not exist will prevent the Pod from starting. - -References (`secretKeyRef` field) to keys that do not exist in a named Secret -will prevent the Pod from starting. - -Secrets used to populate environment variables by the `envFrom` field that have keys -that are considered invalid environment variable names will have those keys -skipped. The Pod will be allowed to start. There will be an event whose -reason is `InvalidVariableNames` and the message will contain the list of -invalid keys that were skipped. The example shows a pod which refers to the -default/mysecret that contains 2 invalid keys: `1badkey` and `2alsobad`. - -```shell -kubectl get events -``` - -The output is similar to: - -``` -LASTSEEN FIRSTSEEN COUNT NAME KIND SUBOBJECT TYPE REASON -0s 0s 1 dapi-test-pod Pod Warning InvalidEnvironmentVariableNames kubelet, 127.0.0.1 Keys [1badkey, 2alsobad] from the EnvFrom secret default/mysecret were skipped since they are considered invalid environment variable names. -``` - -### Secret and Pod lifetime interaction - -When a Pod is created by calling the Kubernetes API, there is no check if a referenced -secret exists. Once a Pod is scheduled, the kubelet will try to fetch the -secret value. If the secret cannot be fetched because it does not exist or -because of a temporary lack of connection to the API server, the kubelet will -periodically retry. It will report an event about the Pod explaining the -reason it is not started yet. Once the secret is fetched, the kubelet will -create and mount a volume containing it. None of the Pod's containers will -start until all the Pod's volumes are mounted. - -## Use cases - -### Use-Case: As container environment variables - -Create a secret - -```yaml -apiVersion: v1 -kind: Secret -metadata: - name: mysecret -type: Opaque -data: - USER_NAME: YWRtaW4= - PASSWORD: MWYyZDFlMmU2N2Rm -``` - -Create the Secret: - -```shell -kubectl apply -f mysecret.yaml -``` - -Use `envFrom` to define all of the Secret's data as container environment variables. The key from the Secret becomes the environment variable name in the Pod. - -```yaml -apiVersion: v1 -kind: Pod -metadata: - name: secret-test-pod -spec: - containers: - - name: test-container - image: k8s.gcr.io/busybox - command: [ "/bin/sh", "-c", "env" ] - envFrom: - - secretRef: - name: mysecret - restartPolicy: Never -``` - -### Use-Case: Pod with ssh keys - -Create a secret containing some ssh keys: - -```shell -kubectl create secret generic ssh-key-secret --from-file=ssh-privatekey=/path/to/.ssh/id_rsa --from-file=ssh-publickey=/path/to/.ssh/id_rsa.pub -``` - -The output is similar to: - -``` -secret "ssh-key-secret" created -``` - -You can also create a `kustomization.yaml` with a `secretGenerator` field containing ssh keys. - -{{< caution >}} -Think carefully before sending your own ssh keys: other users of the cluster may have access to the secret. Use a service account which you want to be accessible to all the users with whom you share the Kubernetes cluster, and can revoke this account if the users are compromised. -{{< /caution >}} - -Now you can create a Pod which references the secret with the ssh key and -consumes it in a volume: - -```yaml -apiVersion: v1 -kind: Pod -metadata: - name: secret-test-pod - labels: - name: secret-test -spec: - volumes: - - name: secret-volume - secret: - secretName: ssh-key-secret - containers: - - name: ssh-test-container - image: mySshImage - volumeMounts: - - name: secret-volume - readOnly: true - mountPath: "/etc/secret-volume" -``` - -When the container's command runs, the pieces of the key will be available in: - -``` -/etc/secret-volume/ssh-publickey -/etc/secret-volume/ssh-privatekey -``` - -The container is then free to use the secret data to establish an ssh connection. - -### Use-Case: Pods with prod / test credentials - -This example illustrates a Pod which consumes a secret containing production -credentials and another Pod which consumes a secret with test environment -credentials. - -You can create a `kustomization.yaml` with a `secretGenerator` field or run -`kubectl create secret`. - -```shell -kubectl create secret generic prod-db-secret --from-literal=username=produser --from-literal=password=Y4nys7f11 -``` - -The output is similar to: - -``` -secret "prod-db-secret" created -``` - -You can also create a secret for test environment credentials. - -```shell -kubectl create secret generic test-db-secret --from-literal=username=testuser --from-literal=password=iluvtests -``` - -The output is similar to: - -``` -secret "test-db-secret" created -``` - -{{< note >}} -Special characters such as `$`, `\`, `*`, `=`, and `!` will be interpreted by your [shell](https://en.wikipedia.org/wiki/Shell_(computing)) and require escaping. -In most shells, the easiest way to escape the password is to surround it with single quotes (`'`). -For example, if your actual password is `S!B\*d$zDsb=`, you should execute the command this way: - -```shell -kubectl create secret generic dev-db-secret --from-literal=username=devuser --from-literal=password='S!B\*d$zDsb=' -``` - -You do not need to escape special characters in passwords from files (`--from-file`). -{{< /note >}} - -Now make the Pods: - -```shell -cat < pod.yaml -apiVersion: v1 -kind: List -items: -- kind: Pod - apiVersion: v1 - metadata: - name: prod-db-client-pod - labels: - name: prod-db-client - spec: - volumes: - - name: secret-volume - secret: - secretName: prod-db-secret - containers: - - name: db-client-container - image: myClientImage - volumeMounts: - - name: secret-volume - readOnly: true - mountPath: "/etc/secret-volume" -- kind: Pod - apiVersion: v1 - metadata: - name: test-db-client-pod - labels: - name: test-db-client - spec: - volumes: - - name: secret-volume - secret: - secretName: test-db-secret - containers: - - name: db-client-container - image: myClientImage - volumeMounts: - - name: secret-volume - readOnly: true - mountPath: "/etc/secret-volume" -EOF -``` - -Add the pods to the same kustomization.yaml: - -```shell -cat <> kustomization.yaml -resources: -- pod.yaml -EOF -``` - -Apply all those objects on the API server by running: - -```shell -kubectl apply -k . -``` - -Both containers will have the following files present on their filesystems with the values for each container's environment: - -``` -/etc/secret-volume/username -/etc/secret-volume/password -``` - -Note how the specs for the two Pods differ only in one field; this facilitates -creating Pods with different capabilities from a common Pod template. - -You could further simplify the base Pod specification by using two service accounts: - -1. `prod-user` with the `prod-db-secret` -1. `test-user` with the `test-db-secret` - -The Pod specification is shortened to: - -```yaml -apiVersion: v1 -kind: Pod -metadata: - name: prod-db-client-pod - labels: - name: prod-db-client -spec: - serviceAccount: prod-db-client - containers: - - name: db-client-container - image: myClientImage -``` - -### Use-case: dotfiles in a secret volume - -You can make your data "hidden" by defining a key that begins with a dot. -This key represents a dotfile or "hidden" file. For example, when the following secret -is mounted into a volume, `secret-volume`: - -```yaml -apiVersion: v1 -kind: Secret -metadata: - name: dotfile-secret -data: - .secret-file: dmFsdWUtMg0KDQo= ---- -apiVersion: v1 -kind: Pod -metadata: - name: secret-dotfiles-pod -spec: - volumes: - - name: secret-volume - secret: - secretName: dotfile-secret - containers: - - name: dotfile-test-container - image: k8s.gcr.io/busybox - command: - - ls - - "-l" - - "/etc/secret-volume" - volumeMounts: - - name: secret-volume - readOnly: true - mountPath: "/etc/secret-volume" -``` - -The volume will contain a single file, called `.secret-file`, and -the `dotfile-test-container` will have this file present at the path -`/etc/secret-volume/.secret-file`. - -{{< note >}} -Files beginning with dot characters are hidden from the output of `ls -l`; -you must use `ls -la` to see them when listing directory contents. -{{< /note >}} - -### Use-case: Secret visible to one container in a Pod - -Consider a program that needs to handle HTTP requests, do some complex business -logic, and then sign some messages with an HMAC. Because it has complex -application logic, there might be an unnoticed remote file reading exploit in -the server, which could expose the private key to an attacker. - -This could be divided into two processes in two containers: a frontend container -which handles user interaction and business logic, but which cannot see the -private key; and a signer container that can see the private key, and responds -to simple signing requests from the frontend (for example, over localhost networking). - -With this partitioned approach, an attacker now has to trick the application -server into doing something rather arbitrary, which may be harder than getting -it to read a file. - - - -## Best practices - -### Clients that use the Secret API - -When deploying applications that interact with the Secret API, you should -limit access using [authorization policies]( -/docs/reference/access-authn-authz/authorization/) such as [RBAC]( -/docs/reference/access-authn-authz/rbac/). +Although ConfigMap and Secret work similarly, Kubernetes applies some additional +protection for Secret objects. Secrets often hold values that span a spectrum of importance, many of which can cause escalations within Kubernetes (e.g. service account tokens) and to @@ -1194,78 +1253,78 @@ external systems. Even if an individual app can reason about the power of the Secrets it expects to interact with, other apps within the same namespace can render those assumptions invalid. -For these reasons `watch` and `list` requests for secrets within a namespace are -extremely powerful capabilities and should be avoided, since listing secrets allows -the clients to inspect the values of all secrets that are in that namespace. The ability to -`watch` and `list` all secrets in a cluster should be reserved for only the most -privileged, system-level components. +A Secret is only sent to a node if a Pod on that node requires it. +For mounting secrets into Pods, the kubelet stores a copy of the data into a `tmpfs` +so that the confidential data is not written to durable storage. +Once the Pod that depends on the Secret is deleted, the kubelet deletes its local copy +of the confidential data from the Secret. -Applications that need to access the Secret API should perform `get` requests on -the secrets they need. This lets administrators restrict access to all secrets -while [white-listing access to individual instances](/docs/reference/access-authn-authz/rbac/#referring-to-resources) that -the app needs. +There may be several containers in a Pod. By default, containers you define +only have access to the default ServiceAccount and its related Secret. +You must explicitly define environment variables or map a volume into a +container in order to provide access to any other Secret. -For improved performance over a looping `get`, clients can design resources that -reference a secret then `watch` the resource, re-requesting the secret when the -reference changes. Additionally, a ["bulk watch" API](https://github.com/kubernetes/community/blob/master/contributors/design-proposals/api-machinery/bulk_watch.md) -to let clients `watch` individual resources has also been proposed, and will likely -be available in future releases of Kubernetes. +There may be Secrets for several Pods on the same node. However, only the +Secrets that a Pod requests are potentially visible within its containers. +Therefore, one Pod does not have access to the Secrets of another Pod. -## Security properties +{{< warning >}} +Any privileged containers on a node are liable to have access to all Secrets used +on that node. +{{< /warning >}} -### Protections -Because secrets can be created independently of the Pods that use -them, there is less risk of the secret being exposed during the workflow of -creating, viewing, and editing Pods. The system can also take additional -precautions with Secrets, such as avoiding writing them to disk where -possible. +### Security recommendations for developers -A secret is only sent to a node if a Pod on that node requires it. -The kubelet stores the secret into a `tmpfs` so that the secret is not written -to disk storage. Once the Pod that depends on the secret is deleted, the kubelet -will delete its local copy of the secret data as well. +- Applications still need to protect the value of confidential information after reading it + from an environment variable or volume. For example, your application must avoid logging + the secret data in the clear or transmitting it to an untrusted party. +- If you are defining multiple containers in a Pod, and only one of those + containers needs access to a Secret, define the volume mount or environment + variable configuration so that the other containers do not have access to that + Secret. +- If you configure a Secret through a {{< glossary_tooltip text="manifest" term_id="manifest" >}}, + with the secret data encoded as base64, sharing this file or checking it in to a + source repository means the secret is available to everyone who can read the manifest. + Base64 encoding is _not_ an encryption method, it provides no additional confidentiality + over plain text. +- When deploying applications that interact with the Secret API, you should + limit access using + [authorization policies](/docs/reference/access-authn-authz/authorization/) such as + [RBAC]( /docs/reference/access-authn-authz/rbac/). +- In the Kubernetes API, `watch` and `list` requests for Secrets within a namespace + are extremely powerful capabilities. Avoid granting this access where feasible, since + listing Secrets allows the clients to inspect the values of every Secret in that + namespace. -There may be secrets for several Pods on the same node. However, only the -secrets that a Pod requests are potentially visible within its containers. -Therefore, one Pod does not have access to the secrets of another Pod. +### Security recommendations for cluster administrators -There may be several containers in a Pod. However, each container in a Pod has -to request the secret volume in its `volumeMounts` for it to be visible within -the container. This can be used to construct useful [security partitions at the -Pod level](#use-case-secret-visible-to-one-container-in-a-pod). +{{< caution >}} +A user who can create a Pod that uses a Secret can also see the value of that Secret. Even +if cluster policies do not allow a user to read the Secret directly, the same user could +have access to run a Pod that then exposes the Secret. +{{< /caution >}} -On most Kubernetes distributions, communication between users -and the API server, and from the API server to the kubelets, is protected by SSL/TLS. -Secrets are protected when transmitted over these channels. - -{{< feature-state for_k8s_version="v1.13" state="beta" >}} - -You can enable [encryption at rest](/docs/tasks/administer-cluster/encrypt-data/) -for secret data, so that the secrets are not stored in the clear into {{< glossary_tooltip term_id="etcd" >}}. - -### Risks - - - In the API server, secret data is stored in {{< glossary_tooltip term_id="etcd" >}}; - therefore: - - Administrators should enable encryption at rest for cluster data (requires v1.13 or later). - - Administrators should limit access to etcd to admin users. - - Administrators may want to wipe/shred disks used by etcd when no longer in use. - - If running etcd in a cluster, administrators should make sure to use SSL/TLS - for etcd peer-to-peer communication. - - If you configure the secret through a manifest (JSON or YAML) file which has - the secret data encoded as base64, sharing this file or checking it in to a - source repository means the secret is compromised. Base64 encoding is _not_ an - encryption method and is considered the same as plain text. - - Applications still need to protect the value of secret after reading it from the volume, - such as not accidentally logging it or transmitting it to an untrusted party. - - A user who can create a Pod that uses a secret can also see the value of that secret. Even - if the API server policy does not allow that user to read the Secret, the user could - run a Pod which exposes the secret. +- Reserve the ability to `watch` or `list` all secrets in a cluster (using the Kubernetes + API), so that only the most privileged, system-level components can perform this action. +- When deploying applications that interact with the Secret API, you should + limit access using + [authorization policies](/docs/reference/access-authn-authz/authorization/) such as + [RBAC]( /docs/reference/access-authn-authz/rbac/). +- In the API server, objects (including Secrets) are persisted into + {{< glossary_tooltip term_id="etcd" >}}; therefore: + - only allow cluster admistrators to access etcd (this includes read-only access); + - enable [encryption at rest](/docs/tasks/administer-cluster/encrypt-data/) + for Secret objects, so that the data of these Secrets are not stored in the clear + into {{< glossary_tooltip term_id="etcd" >}}; + - consider wiping / shredding the durable storage used by etcd once it is + no longer in use; + - if there are multiple etcd instances, make sure that etcd is + using SSL/TLS for communication between etcd peers. ## {{% heading "whatsnext" %}} -- Learn how to [manage Secret using `kubectl`](/docs/tasks/configmap-secret/managing-secret-using-kubectl/) -- Learn how to [manage Secret using config file](/docs/tasks/configmap-secret/managing-secret-using-config-file/) -- Learn how to [manage Secret using kustomize](/docs/tasks/configmap-secret/managing-secret-using-kustomize/) +- Learn how to [manage Secrets using `kubectl`](/docs/tasks/configmap-secret/managing-secret-using-kubectl/) +- Learn how to [manage Secrets using config file](/docs/tasks/configmap-secret/managing-secret-using-config-file/) +- Learn how to [manage Secrets using kustomize](/docs/tasks/configmap-secret/managing-secret-using-kustomize/) - Read the [API reference](/docs/reference/kubernetes-api/config-and-storage-resources/secret-v1/) for `Secret` diff --git a/content/en/docs/concepts/services-networking/ingress.md b/content/en/docs/concepts/services-networking/ingress.md index fa07048b52..94f667964d 100644 --- a/content/en/docs/concepts/services-networking/ingress.md +++ b/content/en/docs/concepts/services-networking/ingress.md @@ -88,13 +88,13 @@ has all the information needed to configure a load balancer or proxy server. Mos contains a list of rules matched against all incoming requests. Ingress resource only supports rules for directing HTTP(S) traffic. -If the `ingressClassName` is omitted, a [default Ingress class](#default-ingress-class) +If the `ingressClassName` is omitted, a [default Ingress class](#default-ingress-class) should be defined. -There are some ingress controllers, that work without the definition of a -default `IngressClass`. For example, the Ingress-NGINX controller can be -configured with a [flag](https://kubernetes.github.io/ingress-nginx/#what-is-the-flag-watch-ingress-without-class) -`--watch-ingress-without-class`. It is [recommended](https://kubernetes.github.io/ingress-nginx/#i-have-only-one-instance-of-the-ingresss-nginx-controller-in-my-cluster-what-should-i-do) though, to specify the +There are some ingress controllers, that work without the definition of a +default `IngressClass`. For example, the Ingress-NGINX controller can be +configured with a [flag](https://kubernetes.github.io/ingress-nginx/#what-is-the-flag-watch-ingress-without-class) +`--watch-ingress-without-class`. It is [recommended](https://kubernetes.github.io/ingress-nginx/#i-have-only-one-instance-of-the-ingresss-nginx-controller-in-my-cluster-what-should-i-do) though, to specify the default `IngressClass` as shown [below](#default-ingress-class). ### Ingress rules @@ -118,8 +118,14 @@ match a path in the spec. ### DefaultBackend {#default-backend} -An Ingress with no rules sends all traffic to a single default backend. The `defaultBackend` is conventionally a configuration option -of the [Ingress controller](/docs/concepts/services-networking/ingress-controllers) and is not specified in your Ingress resources. +An Ingress with no rules sends all traffic to a single default backend and `.spec.defaultBackend` +is the backend that should handle requests in that case. +The `defaultBackend` is conventionally a configuration option of the +[Ingress controller](/docs/concepts/services-networking/ingress-controllers) and +is not specified in your Ingress resources. +If no `.spec.rules` are specified, `.spec.defaultBackend` must be specified. +If `defaultBackend` is not set, the handling of requests that do not match any of the rules will be up to the +ingress controller (consult the documentation for your ingress controller to find out how it handles this case). If none of the hosts or paths match the HTTP request in the Ingress objects, the traffic is routed to your default backend. diff --git a/content/en/docs/reference/access-authn-authz/authentication.md b/content/en/docs/reference/access-authn-authz/authentication.md index 30e1725859..68e8b89869 100644 --- a/content/en/docs/reference/access-authn-authz/authentication.md +++ b/content/en/docs/reference/access-authn-authz/authentication.md @@ -733,7 +733,7 @@ The following HTTP headers can be used to performing an impersonation request: * `Impersonate-User`: The username to act as. * `Impersonate-Group`: A group name to act as. Can be provided multiple times to set multiple groups. Optional. Requires "Impersonate-User". -* `Impersonate-Extra-( extra name )`: A dynamic header used to associate extra fields with the user. Optional. Requires "Impersonate-User". In order to be preserved consistently, `( extra name )` should be lower-case, and any characters which aren't [legal in HTTP header labels](https://tools.ietf.org/html/rfc7230#section-3.2.6) MUST be utf8 and [percent-encoded](https://tools.ietf.org/html/rfc3986#section-2.1). +* `Impersonate-Extra-( extra name )`: A dynamic header used to associate extra fields with the user. Optional. Requires "Impersonate-User". In order to be preserved consistently, `( extra name )` must be lower-case, and any characters which aren't [legal in HTTP header labels](https://tools.ietf.org/html/rfc7230#section-3.2.6) MUST be utf8 and [percent-encoded](https://tools.ietf.org/html/rfc3986#section-2.1). * `Impersonate-Uid`: A unique identifier that represents the user being impersonated. Optional. Requires "Impersonate-User". Kubernetes does not impose any format requirements on this string. {{< note >}} diff --git a/content/en/docs/reference/command-line-tools-reference/kubelet.md b/content/en/docs/reference/command-line-tools-reference/kubelet.md index dc23747c22..555414a6eb 100644 --- a/content/en/docs/reference/command-line-tools-reference/kubelet.md +++ b/content/en/docs/reference/command-line-tools-reference/kubelet.md @@ -163,14 +163,14 @@ kubelet [flags] --cloud-config string -The path to the cloud provider configuration file. Empty string for no configuration file. (DEPRECATED: will be removed in 1.23, in favor of removing cloud providers code from Kubelet.) +The path to the cloud provider configuration file. Empty string for no configuration file. (DEPRECATED: will be removed in 1.24 or later, in favor of removing cloud providers code from kubelet.) --cloud-provider string -The provider for cloud services. Set to empty string for running with no cloud provider. If set, the cloud provider determines the name of the node (consult cloud provider documentation to determine if and how the hostname is used). (DEPRECATED: will be removed in 1.23, in favor of removing cloud provider code from Kubelet.) +The provider for cloud services. Set to empty string for running with no cloud provider. If set, the cloud provider determines the name of the node (consult cloud provider documentation to determine if and how the hostname is used). (DEPRECATED: will be removed in 1.24 or later, in favor of removing cloud provider code from Kubelet.) @@ -297,7 +297,7 @@ kubelet [flags] --dynamic-config-dir string -The Kubelet will use this directory for checkpointing downloaded configurations and tracking configuration health. The Kubelet will create this directory if it does not already exist. The path may be absolute or relative; relative paths start at the Kubelet's current working directory. Providing this flag enables dynamic Kubelet configuration. The DynamicKubeletConfig feature gate must be enabled to pass this flag. (DEPRECATED: Feature DynamicKubeletConfig is deprecated in 1.22 and will not move to GA. It is planned to be removed from Kubernetes in the version 1.23. Please use alternative ways to update kubelet configuration.) +The Kubelet will use this directory for checkpointing downloaded configurations and tracking configuration health. The Kubelet will create this directory if it does not already exist. The path may be absolute or relative; relative paths start at the Kubelet's current working directory. Providing this flag enables dynamic Kubelet configuration. The DynamicKubeletConfig feature gate must be enabled to pass this flag. (DEPRECATED: Feature DynamicKubeletConfig is deprecated in 1.22 and will not move to GA. It is planned to be removed from Kubernetes in the version 1.24 or later. Please use alternative ways to update kubelet configuration.) @@ -395,21 +395,21 @@ kubelet [flags] --experimental-allocatable-ignore-eviction     Default: false -When set to true, hard eviction thresholds will be ignored while calculating node allocatable. See https://kubernetes.io/docs/tasks/administer-cluster/reserve-compute-resources/ for more details. (DEPRECATED: will be removed in 1.23) +When set to true, hard eviction thresholds will be ignored while calculating node allocatable. See https://kubernetes.io/docs/tasks/administer-cluster/reserve-compute-resources/ for more details. (DEPRECATED: will be removed in 1.24 or later) --experimental-check-node-capabilities-before-mount -[Experimental] if set to true, the kubelet will check the underlying node for required components (binaries, etc.) before performing the mount (DEPRECATED: will be removed in 1.23, in favor of using CSI.) +[Experimental] if set to true, the kubelet will check the underlying node for required components (binaries, etc.) before performing the mount (DEPRECATED: will be removed in 1.24 or later, in favor of using CSI.) --experimental-kernel-memcg-notification -Use kernelMemcgNotification configuration, this flag will be removed in 1.23. (DEPRECATED: This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information.) +Use kernelMemcgNotification configuration, this flag will be removed in 1.24 or later. (DEPRECATED: This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information.) @@ -423,7 +423,7 @@ kubelet [flags] --experimental-mounter-path string     Default: mount -[Experimental] Path of mounter binary. Leave empty to use the default mount. (DEPRECATED: will be removed in 1.23, in favor of using CSI.) +[Experimental] Path of mounter binary. Leave empty to use the default mount. (DEPRECATED: will be removed in 1.24 or later, in favor of using CSI.) diff --git a/content/en/docs/reference/labels-annotations-taints/_index.md b/content/en/docs/reference/labels-annotations-taints/_index.md index 9250d07b8e..522937ba8d 100644 --- a/content/en/docs/reference/labels-annotations-taints/_index.md +++ b/content/en/docs/reference/labels-annotations-taints/_index.md @@ -478,7 +478,7 @@ the settings you specify apply to all containers in that Pod. ### container.seccomp.security.alpha.kubernetes.io/[NAME] {#container-seccomp-security-alpha-kubernetes-io} This annotation has been deprecated since Kubernetes v1.19 and will become non-functional in v1.25. -The tutorial [Restrict a Container's Syscalls with seccomp](/docs/tutorials/clusters/seccomp/) takes +The tutorial [Restrict a Container's Syscalls with seccomp](/docs/tutorials/security/seccomp/) takes you through the steps you follow to apply a seccomp profile to a Pod or to one of its containers. That tutorial covers the supported mechanism for configuring seccomp in Kubernetes, based on setting `securityContext` within the Pod's `.spec`. diff --git a/content/en/docs/tasks/administer-cluster/access-cluster-services.md b/content/en/docs/tasks/access-application-cluster/access-cluster-services.md similarity index 100% rename from content/en/docs/tasks/administer-cluster/access-cluster-services.md rename to content/en/docs/tasks/access-application-cluster/access-cluster-services.md diff --git a/content/en/docs/tasks/access-application-cluster/access-cluster.md b/content/en/docs/tasks/access-application-cluster/access-cluster.md index 3bd994f80b..8e89e12a59 100644 --- a/content/en/docs/tasks/access-application-cluster/access-cluster.md +++ b/content/en/docs/tasks/access-application-cluster/access-cluster.md @@ -237,7 +237,9 @@ In each case, the credentials of the pod are used to communicate securely with t ## Accessing services running on the cluster -The previous section describes how to connect to the Kubernetes API server. For information about connecting to other services running on a Kubernetes cluster, see [Access Cluster Services.](/docs/tasks/administer-cluster/access-cluster-services/) +The previous section describes how to connect to the Kubernetes API server. +For information about connecting to other services running on a Kubernetes cluster, see +[Access Cluster Services](/docs/tasks/access-application-cluster/access-cluster-services/). ## Requesting redirects diff --git a/content/en/docs/tasks/administer-cluster/coredns.md b/content/en/docs/tasks/administer-cluster/coredns.md index 54163058c8..43a75275b8 100644 --- a/content/en/docs/tasks/administer-cluster/coredns.md +++ b/content/en/docs/tasks/administer-cluster/coredns.md @@ -19,11 +19,14 @@ This page describes the CoreDNS upgrade process and how to install CoreDNS inste ## About CoreDNS -[CoreDNS](https://coredns.io) is a flexible, extensible DNS server that can serve as the Kubernetes cluster DNS. -Like Kubernetes, the CoreDNS project is hosted by the {{< glossary_tooltip text="CNCF" term_id="cncf" >}}. +[CoreDNS](https://coredns.io) is a flexible, extensible DNS server +that can serve as the Kubernetes cluster DNS. +Like Kubernetes, the CoreDNS project is hosted by the +{{< glossary_tooltip text="CNCF" term_id="cncf" >}}. -You can use CoreDNS instead of kube-dns in your cluster by replacing kube-dns in an existing -deployment, or by using tools like kubeadm that will deploy and upgrade the cluster for you. +You can use CoreDNS instead of kube-dns in your cluster by replacing +kube-dns in an existing deployment, or by using tools like kubeadm +that will deploy and upgrade the cluster for you. ## Installing CoreDNS @@ -34,51 +37,44 @@ For manual deployment or replacement of kube-dns, see the documentation at the ### Upgrading an existing cluster with kubeadm -In Kubernetes version 1.10 and later, you can also move to CoreDNS when you use `kubeadm` to upgrade -a cluster that is using `kube-dns`. In this case, `kubeadm` will generate the CoreDNS configuration +In Kubernetes version 1.21, kubeadm removed its support for `kube-dns` as a DNS application. +For `kubeadm` v{{< skew currentVersion >}}, the only supported cluster DNS application +is CoreDNS. + +You can move to CoreDNS when you use `kubeadm` to upgrade a cluster that is +using `kube-dns`. In this case, `kubeadm` generates the CoreDNS configuration ("Corefile") based upon the `kube-dns` ConfigMap, preserving configurations for stub domains, and upstream name server. -If you are moving from kube-dns to CoreDNS, make sure to set the `CoreDNS` feature gate to `true` -during an upgrade. For example, here is what a `v1.11.0` upgrade would look like: -``` -kubeadm upgrade apply v1.11.0 --feature-gates=CoreDNS=true -``` - -In Kubernetes version 1.13 and later the `CoreDNS` feature gate is removed and CoreDNS -is used by default. - -In versions prior to 1.11 the Corefile will be **overwritten** by the one created during upgrade. -**You should save your existing ConfigMap if you have customized it.** You may re-apply your -customizations after the new ConfigMap is up and running. - -If you are running CoreDNS in Kubernetes version 1.11 and later, during upgrade, -your existing Corefile will be retained. - -In Kubernetes version 1.21, support for `kube-dns` is removed from kubeadm. - ## Upgrading CoreDNS -CoreDNS is available in Kubernetes since v1.9. -You can check the version of CoreDNS shipped with Kubernetes and the changes made to CoreDNS [here](https://github.com/coredns/deployment/blob/master/kubernetes/CoreDNS-k8s_version.md). +You can check the version of CoreDNS that kubeadm installs for each version of +Kubernetes in the page +[CoreDNS version in Kubernetes](https://github.com/coredns/deployment/blob/master/kubernetes/CoreDNS-k8s_version.md). + +CoreDNS can be upgraded manually in case you want to only upgrade CoreDNS +or use your own custom image. +There is a helpful [guideline and walkthrough](https://github.com/coredns/deployment/blob/master/kubernetes/Upgrading_CoreDNS.md) +available to ensure a smooth upgrade. +Make sure the existing CoreDNS configuration ("Corefile") is retained when +upgrading your cluster. + +If you are upgrading your cluster using the `kubeadm` tool, `kubeadm` +can take care of retaining the existing CoreDNS configuration automatically. -CoreDNS can be upgraded manually in case you want to only upgrade CoreDNS or use your own custom image. -There is a helpful [guideline and walkthrough](https://github.com/coredns/deployment/blob/master/kubernetes/Upgrading_CoreDNS.md) available to ensure a smooth upgrade. ## Tuning CoreDNS -When resource utilisation is a concern, it may be useful to tune the configuration of CoreDNS. For more details, check out the +When resource utilisation is a concern, it may be useful to tune the +configuration of CoreDNS. For more details, check out the [documentation on scaling CoreDNS](https://github.com/coredns/deployment/blob/master/kubernetes/Scaling_CoreDNS.md). - - ## {{% heading "whatsnext" %}} - You can configure [CoreDNS](https://coredns.io) to support many more use cases than -kube-dns by modifying the `Corefile`. For more information, see the -[CoreDNS site](https://coredns.io/2017/05/08/custom-dns-entries-for-kubernetes/). - - - +kube-dns does by modifying the CoreDNS configuration ("Corefile"). +For more information, see the [documentation](https://coredns.io/plugins/kubernetes/) +for the `kubernetes` CoreDNS plugin, or read the +[Custom DNS Entries for Kubernetes](https://coredns.io/2017/05/08/custom-dns-entries-for-kubernetes/). +in the CoreDNS blog. diff --git a/content/en/docs/tasks/configure-pod-container/enforce-standards-admission-controller.md b/content/en/docs/tasks/configure-pod-container/enforce-standards-admission-controller.md index ded295635b..9f46360a5c 100644 --- a/content/en/docs/tasks/configure-pod-container/enforce-standards-admission-controller.md +++ b/content/en/docs/tasks/configure-pod-container/enforce-standards-admission-controller.md @@ -50,7 +50,7 @@ plugins: # Array of authenticated usernames to exempt. usernames: [] # Array of runtime class names to exempt. - runtimeClassNames: [] + runtimeClasses: [] # Array of namespaces to exempt. namespaces: [] ``` diff --git a/content/en/docs/tasks/inject-data-application/downward-api-volume-expose-pod-information.md b/content/en/docs/tasks/inject-data-application/downward-api-volume-expose-pod-information.md index 579adcb736..d50b3e91a5 100644 --- a/content/en/docs/tasks/inject-data-application/downward-api-volume-expose-pod-information.md +++ b/content/en/docs/tasks/inject-data-application/downward-api-volume-expose-pod-information.md @@ -6,20 +6,15 @@ weight: 40 -This page shows how a Pod can use a DownwardAPIVolumeFile to expose information -about itself to Containers running in the Pod. A DownwardAPIVolumeFile can expose -Pod fields and Container fields. - - - +This page shows how a Pod can use a +[`DownwardAPIVolumeFile`](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#downwardapivolumefile-v1-core) +to expose information about itself to Containers running in the Pod. +A `DownwardAPIVolumeFile` can expose Pod fields and Container fields. ## {{% heading "prerequisites" %}} - {{< include "task-tutorial-prereqs.md" >}} {{< version-check >}} - - ## The Downward API @@ -27,10 +22,10 @@ Pod fields and Container fields. There are two ways to expose Pod and Container fields to a running Container: * [Environment variables](/docs/tasks/inject-data-application/environment-variable-expose-pod-information/#the-downward-api) -* Volume Files +* Volume files Together, these two ways of exposing Pod and Container fields are called the -*Downward API*. +"Downward API". ## Store Pod fields @@ -60,13 +55,13 @@ Create the Pod: kubectl apply -f https://k8s.io/examples/pods/inject/dapi-volume.yaml ``` -Verify that the Container in the Pod is running: +Verify that the container in the Pod is running: ```shell kubectl get pods ``` -View the Container's logs: +View the container's logs: ```shell kubectl logs kubernetes-downwardapi-volume-example @@ -83,7 +78,7 @@ build="two" builder="john-doe" ``` -Get a shell into the Container that is running in your Pod: +Get a shell into the container that is running in your Pod: ```shell kubectl exec -it kubernetes-downwardapi-volume-example -- sh @@ -136,8 +131,7 @@ total 8 Using symbolic links enables dynamic atomic refresh of the metadata; updates are written to a new temporary directory, and the `..data` symlink is updated -atomically using -[rename(2)](http://man7.org/linux/man-pages/man2/rename.2.html). +atomically using [rename(2)](http://man7.org/linux/man-pages/man2/rename.2.html). {{< note >}} A container using Downward API as a @@ -153,17 +147,19 @@ Exit the shell: ## Store Container fields -The preceding exercise, you stored Pod fields in a DownwardAPIVolumeFile. +The preceding exercise, you stored Pod fields in a +[`DownwardAPIVolumeFile`](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#downwardapivolumefile-v1-core).. In this next exercise, you store Container fields. Here is the configuration file for a Pod that has one Container: {{< codenew file="pods/inject/dapi-volume-resources.yaml" >}} -In the configuration file, you can see that the Pod has a `downwardAPI` Volume, -and the Container mounts the Volume at `/etc/podinfo`. +In the configuration file, you can see that the Pod has a +[`downwardAPI` volume](/concepts/storage/volumes/#downwardapi), +and the Container mounts the volume at `/etc/podinfo`. Look at the `items` array under `downwardAPI`. Each element of the array is a -DownwardAPIVolumeFile. +[`DownwardAPIVolumeFile`](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#downwardapivolumefile-v1-core). The first element specifies that in the Container named `client-container`, the value of the `limits.cpu` field in the format specified by `1m` should be @@ -176,7 +172,7 @@ Create the Pod: kubectl apply -f https://k8s.io/examples/pods/inject/dapi-volume-resources.yaml ``` -Get a shell into the Container that is running in your Pod: +Get a shell into the container that is running in your Pod: ```shell kubectl exec -it kubernetes-downwardapi-volume-example-2 -- sh @@ -187,46 +183,56 @@ In your shell, view the `cpu_limit` file: ```shell /# cat /etc/podinfo/cpu_limit ``` + You can use similar commands to view the `cpu_request`, `mem_limit` and `mem_request` files. - - + ## Capabilities of the Downward API The following information is available to containers through environment variables and `downwardAPI` volumes: * Information available via `fieldRef`: + * `metadata.name` - the pod's name * `metadata.namespace` - the pod's namespace * `metadata.uid` - the pod's UID - * `metadata.labels['']` - the value of the pod's label `` (for example, `metadata.labels['mylabel']`) - * `metadata.annotations['']` - the value of the pod's annotation `` (for example, `metadata.annotations['myannotation']`) + * `metadata.labels['']` - the value of the pod's label `` + (for example, `metadata.labels['mylabel']`) + * `metadata.annotations['']` - the value of the pod's annotation `` + (for example, `metadata.annotations['myannotation']`) + * Information available via `resourceFieldRef`: + * A Container's CPU limit * A Container's CPU request * A Container's memory limit * A Container's memory request - * A Container's hugepages limit (providing that the `DownwardAPIHugePages` [feature gate](/docs/reference/command-line-tools-reference/feature-gates/) is enabled) - * A Container's hugepages request (providing that the `DownwardAPIHugePages` [feature gate](/docs/reference/command-line-tools-reference/feature-gates/) is enabled) + * A Container's hugepages limit (provided that the `DownwardAPIHugePages` + [feature gate](/docs/reference/command-line-tools-reference/feature-gates/) is enabled) + * A Container's hugepages request (provided that the `DownwardAPIHugePages` + [feature gate](/docs/reference/command-line-tools-reference/feature-gates/) is enabled) * A Container's ephemeral-storage limit * A Container's ephemeral-storage request In addition, the following information is available through `downwardAPI` volume `fieldRef`: -* `metadata.labels` - all of the pod's labels, formatted as `label-key="escaped-label-value"` with one label per line -* `metadata.annotations` - all of the pod's annotations, formatted as `annotation-key="escaped-annotation-value"` with one annotation per line +* `metadata.labels` - all of the pod's labels, formatted as `label-key="escaped-label-value"` + with one label per line +* `metadata.annotations` - all of the pod's annotations, formatted as + `annotation-key="escaped-annotation-value"` with one annotation per line The following information is available through environment variables: * `status.podIP` - the pod's IP address -* `spec.serviceAccountName` - the pod's service account name, available since v1.4.0-alpha.3 -* `spec.nodeName` - the node's name, available since v1.4.0-alpha.3 -* `status.hostIP` - the node's IP, available since v1.7.0-alpha.1 +* `spec.serviceAccountName` - the pod's service account name +* `spec.nodeName` - the name of the node to which the scheduler always attempts to + schedule the pod +* `status.hostIP` - the IP of the node to which the Pod is assigned {{< note >}} If CPU and memory limits are not specified for a Container, the @@ -241,7 +247,7 @@ basis. For more information, see ## Motivation for the Downward API -It is sometimes useful for a Container to have information about itself, without +It is sometimes useful for a container to have information about itself, without being overly coupled to Kubernetes. The Downward API allows containers to consume information about themselves or the cluster without using the Kubernetes client or API server. @@ -252,19 +258,17 @@ application, but that is tedious and error prone, and it violates the goal of lo coupling. A better option would be to use the Pod's name as an identifier, and inject the Pod's name into the well-known environment variable. - - - ## {{% heading "whatsnext" %}} - -* [PodSpec](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#podspec-v1-core) -* [Volume](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#volume-v1-core) -* [DownwardAPIVolumeSource](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#downwardapivolumesource-v1-core) -* [DownwardAPIVolumeFile](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#downwardapivolumefile-v1-core) -* [ResourceFieldSelector](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#resourcefieldselector-v1-core) - - - - +* Check the [`PodSpec`](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#podspec-v1-core) + API definition which defines the desired state of a Pod. +* Check the [`Volume`](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#volume-v1-core) + API definition which defines a generic volume in a Pod for containers to access. +* Check the [`DownwardAPIVolumeSource`](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#downwardapivolumesource-v1-core) + API definition which defines a volume that contains Downward API information. +* Check the [`DownwardAPIVolumeFile`](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#downwardapivolumefile-v1-core) + API definition which contains references to object or resource fields for + populating a file in the Downward API volume. +* Check the [`ResourceFieldSelector`](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#resourcefieldselector-v1-core) + API definition which specifies the container resources and their output format. diff --git a/content/en/docs/tasks/manage-kubernetes-objects/kustomization.md b/content/en/docs/tasks/manage-kubernetes-objects/kustomization.md index f391b37e09..cca985705b 100644 --- a/content/en/docs/tasks/manage-kubernetes-objects/kustomization.md +++ b/content/en/docs/tasks/manage-kubernetes-objects/kustomization.md @@ -308,7 +308,7 @@ metadata: type: Opaque ``` -Like ConfigMaps, generated Secrets can be used in Deployments by refering to the name of the secretGenerator: +Like ConfigMaps, generated Secrets can be used in Deployments by referring to the name of the secretGenerator: ```shell # Create a password.txt file diff --git a/content/en/docs/tasks/tools/included/optional-kubectl-configs-zsh.md b/content/en/docs/tasks/tools/included/optional-kubectl-configs-zsh.md index b7d9044605..176bdeeeb1 100644 --- a/content/en/docs/tasks/tools/included/optional-kubectl-configs-zsh.md +++ b/content/en/docs/tasks/tools/included/optional-kubectl-configs-zsh.md @@ -12,16 +12,11 @@ To do so in all your shell sessions, add the following to your `~/.zshrc` file: source <(kubectl completion zsh) ``` -If you have an alias for kubectl, you can extend shell completion to work with that alias: - -```zsh -echo 'alias k=kubectl' >>~/.zshrc -echo 'compdef __start_kubectl k' >>~/.zshrc -``` +If you have an alias for kubectl, kubectl autocompletion will automatically work with it. After reloading your shell, kubectl autocompletion should be working. -If you get an error like `complete:13: command not found: compdef`, then add the following to the beginning of your `~/.zshrc` file: +If you get an error like `2: command not found: compdef`, then add the following to the beginning of your `~/.zshrc` file: ```zsh autoload -Uz compinit diff --git a/content/en/docs/tutorials/security/apparmor.md b/content/en/docs/tutorials/security/apparmor.md index 992841e356..727b267608 100644 --- a/content/en/docs/tutorials/security/apparmor.md +++ b/content/en/docs/tutorials/security/apparmor.md @@ -382,27 +382,13 @@ If you do not want AppArmor to be available on your cluster, it can be disabled ``` When disabled, any Pod that includes an AppArmor profile will fail validation with a "Forbidden" -error. Note that by default docker always enables the "docker-default" profile on non-privileged -pods (if the AppArmor kernel module is enabled), and will continue to do so even if the feature-gate -is disabled. The option to disable AppArmor will be removed when AppArmor graduates to general +error. + +{{}} +Even if the Kubernetes feature is disabled, runtimes may still enforce the default profile. The +option to disable the AppArmor feature will be removed when AppArmor graduates to general availability (GA). - -### Upgrading to Kubernetes v1.4 with AppArmor - -No action is required with respect to AppArmor to upgrade your cluster to v1.4. However, if any -existing pods had an AppArmor annotation, they will not go through validation (or PodSecurityPolicy -admission). If permissive profiles are loaded on the nodes, a malicious user could pre-apply a -permissive profile to escalate the pod privileges above the docker-default. If this is a concern, it -is recommended to scrub the cluster of any pods containing an annotation with -`apparmor.security.beta.kubernetes.io`. - -### Upgrade path to General Availability - -When AppArmor is ready to be graduated to general availability (GA), the options currently specified -through annotations will be converted to fields. Supporting all the upgrade and downgrade paths -through the transition is very nuanced, and will be explained in detail when the transition -occurs. We will commit to supporting both fields and annotations for at least 2 releases, and will -explicitly reject the annotations for at least 2 releases after that. +{{}} ## Authoring Profiles @@ -415,10 +401,6 @@ tools to help with that: * [bane](https://github.com/jfrazelle/bane) is an AppArmor profile generator for Docker that uses a simplified profile language. -It is recommended to run your application through Docker on a development workstation to generate -the profiles, but there is nothing preventing running the tools on the Kubernetes node where your -Pod is running. - To debug problems with AppArmor, you can check the system logs to see what, specifically, was denied. AppArmor logs verbose messages to `dmesg`, and errors can usually be found in the system logs or through `journalctl`. More information is provided in @@ -441,9 +423,8 @@ Specifying the profile a container will run with: - `runtime/default`: Refers to the default runtime profile. - Equivalent to not specifying a profile (without a PodSecurityPolicy default), except it still requires AppArmor to be enabled. - - For Docker, this resolves to the - [`docker-default`](https://docs.docker.com/engine/security/apparmor/) profile for non-privileged - containers, and unconfined (no profile) for privileged containers. + - In practice, many container runtimes use the same OCI default profile, defined here: + https://github.com/containers/common/blob/main/pkg/apparmor/apparmor_linux_template.go - `localhost/`: Refers to a profile loaded on the node (localhost) by name. - The possible profile names are detailed in the [core policy reference](https://gitlab.com/apparmor/apparmor/wikis/AppArmor_Core_Policy_Reference#profile-names-and-attachment-specifications). @@ -474,5 +455,3 @@ Additional resources: * [Quick guide to the AppArmor profile language](https://gitlab.com/apparmor/apparmor/wikis/QuickProfileLanguage) * [AppArmor core policy reference](https://gitlab.com/apparmor/apparmor/wikis/Policy_Layout) - - diff --git a/content/en/docs/tutorials/security/cluster-level-pss.md b/content/en/docs/tutorials/security/cluster-level-pss.md index fb610f99a9..4da0502aca 100644 --- a/content/en/docs/tutorials/security/cluster-level-pss.md +++ b/content/en/docs/tutorials/security/cluster-level-pss.md @@ -12,7 +12,7 @@ Pod Security admission (PSA) is enabled by default in v1.23 and later, as it has [graduated to beta](/blog/2021/12/09/pod-security-admission-beta/). Pod Security is an admission controller that carries out checks against the Kubernetes -[Pod Security Standards](docs/concepts/security/pod-security-standards/) when new pods are +[Pod Security Standards](/docs/concepts/security/pod-security-standards/) when new pods are created. This tutorial shows you how to enforce the `baseline` Pod Security Standard at the cluster level which applies a standard configuration to all namespaces in a cluster. diff --git a/content/en/docs/tutorials/stateless-application/guestbook.md b/content/en/docs/tutorials/stateless-application/guestbook.md index c31bcbc49e..78e19ad34a 100644 --- a/content/en/docs/tutorials/stateless-application/guestbook.md +++ b/content/en/docs/tutorials/stateless-application/guestbook.md @@ -243,7 +243,7 @@ pointing to. This IP address is accessible only within the cluster. If you want guests to be able to access your guestbook, you must configure the frontend Service to be externally visible, so a client can request the Service -from outside the Kubernetes cluster. However a Kubernetes user you can use +from outside the Kubernetes cluster. However a Kubernetes user can use `kubectl port-forward` to access the service even though it uses a `ClusterIP`. diff --git a/content/es/docs/concepts/workloads/pods/disruptions.md b/content/es/docs/concepts/workloads/pods/disruptions.md new file mode 100644 index 0000000000..1cd146c001 --- /dev/null +++ b/content/es/docs/concepts/workloads/pods/disruptions.md @@ -0,0 +1,273 @@ +--- +reviewers: +- electrocucaracha +- raelga +- gamba47 +title: Interrupciones +content_type: concept +weight: 60 +--- + + +Esta guía es para los dueños de aplicaciones que quieren crear +aplicaciones con alta disponibilidad y que necesitan entender +que tipos de interrupciones pueden suceder en los Pods. + +También es para los administradores de clústers que quieren aplicar acciones +automatizadas en sus clústers, cómo actualizar o autoescalar los clústers. + + + +## Interrupciones voluntarias e involuntarias + +Los Pods no desaparecen hasta que algo (una persona o un controlador) los destruye +ó hay problemas de hardware ó software que son inevitables. + +Nosotros llamamos a esos casos inevitables *interrupciones involuntarias* de +una aplicación. Algunos ejemplos: + +- Una falla en hardware de la máquina física del nodo +- Un administrador del clúster borra una VM (instancia) por error +- El proveedor de la nube o el hipervisor falla y hace desaparecer la VM +- Un kernel panic +- El nodo desaparece del clúster por un problema de red que lo separa del clúster +- Una remoción del Pod porque el nodo [no tiene recursos suficientes](/docs/concepts/scheduling-eviction/node-pressure-eviction/). + +A excepción de la condición sin recursos suficientes, todas estas condiciones +deben ser familiares para la mayoría de los usuarios, no son específicas +de Kubernetes + +Nosotros llamamos a los otros casos *interrupciones voluntarias*. Estas incluyen +las acciones iniciadas por el dueño de la aplicación y aquellas iniciadas por el Administrador +del Clúster. Las acciones típicas de los dueños de la aplicación incluye: + +- borrar el Deployment u otro controlador que maneja el Pod +- actualizar el Deployment del Pod que causa un reinicio +- borrar un Pod (por ejemplo, por accidente) + +Las acciones del administrador del clúster incluyen: + +- [Drenar un nodo](/docs/tasks/administer-cluster/safely-drain-node/) para reparar o actualizar. +- Drenar un nodo del clúster para reducir el clúster (aprenda acerca de [Autoescalamiento de Clúster](https://github.com/kubernetes/autoscaler/#readme) +). +- Remover un Pod de un nodo para permitir que otra cosa pueda ingresar a ese nodo. + +Estas acciones pueden ser realizadas directamente por el administrador del clúster, por +tareas automatizadas del administrador del clúster ó por el proveedor del clúster. + +Consulte al administrador de su clúster, a su proveedor de la nube ó a la documentación de su distribución +para determinar si alguna de estas interrupciones voluntarias están habilitadas en su clúster. +Si ninguna se encuentra habilitada, puede omitir la creación del presupuesto de Interrupción de Pods. + +{{< caution >}} +No todas las interrupciones voluntarias son consideradas por el presupuesto de interrupción de Pods. Por ejemplo, +borrar un Deployment o Pods que evitan el uso del presupuesto. +{{< /caution >}} + +## Tratando con las interrupciones + +Estas son algunas de las maneras para mitigar las interrupciones involuntarias: + +- Asegurarse que el Pod [solicite los recursos](/docs/tasks/configure-pod-container/assign-memory-resource) que necesita. +- Replique su aplicación si usted necesita alta disponibilidad. (Aprenda sobre correr aplicaciones replicadas + [stateless](/docs/tasks/run-application/run-stateless-application-deployment/) + y [stateful](/docs/tasks/run-application/run-replicated-stateful-application/) +- Incluso, para una alta disponibilidad mayor cuando se corren aplicaciones replicadas, + propague las aplicaciones por varios racks (usando + [anti-affinity](/docs/concepts/scheduling-eviction/assign-pod-node/#affinity-and-anti-affinity)) + o usando zonas (si usa un [clúster multi-zona](/docs/setup/multiple-zones).) + +La frecuencia de las interrupciones voluntarias varía. En un clúster basico de Kubernetes, no hay +interrupciones voluntarias automáticas (solo el usuario las genera). Sin embargo, su administrador del clúster o proveedor de alojamiento +puede correr algun servicio adicional que pueda causar estas interrupciones voluntarias. Por ejemplo, +desplegando una actualización de software en los nodos puede causar interrupciones. También, algunas implementaciones +de clústers con autoescalamiento de nodos puede causar interrupciones para defragmentar o compactar los nodos. +Su administrador de clúster o proveedor de alojamiento debe tener documentado cuál es el nivel de interrupciones +voluntarias esperadas, sí es que las hay. Ciertas opciones de configuración, como ser +[usar PriorityClasses](/docs/concepts/scheduling-eviction/pod-priority-preemption/) +en las especificaciones de su Pod pueden también causar interrupciones voluntarias (o involuntarias). + + +## Presupuesto de Interrupción de Pods + +{{< feature-state for_k8s_version="v1.21" state="stable" >}} + +Kubernetes ofrece carácteristicas para ayudar a ejecutar aplicaciones con alta disponibliidad, incluso cuando usted +introduce interrupciones voluntarias frecuentes. + +Como dueño de la aplicación, usted puede crear un presupuesto de interrupción de Pods (PDB por sus siglas en inglés) para cada aplicación. +Un PDB limita el numero de Pods de una aplicación replicada, que estan caídos de manera simultánea por +interrupciones voluntarias. Por ejemplo, una aplicación basada en quórum puede +asegurarse que el número de réplicas corriendo nunca es menor al +número necesitado para obtener el quórum. Una web de tipo front end puede querer +asegurarse que el número de réplicas atendiendo al tráfico nunca puede caer bajo un cierto +porcentaje del total. + +Los administradores del clúster y proveedores de hosting pueden usar herramientas que +respeten el presupuesto de interrupción de Pods utilizando la [API de Desalojo](/docs/tasks/administer-clúster/safely-drain-node/#eviction-api) +en vez de directamente borrar Pods o Deployments. + +Por ejemplo, el subcomando `kubectl drain` le permite marcar un nodo a un modo fuera de +servicio. Cuando se ejecuta `kubectl drain`, la herramienta trata de quitar a todos los Pods en +el nodo que se esta dejando fuera de servicio. La petición de desalojo que `kubectl` solicita en +su nombre puede ser temporalmente denegado, entonces la herramienta periodicamente reintenta todas las +peticiones fallidas hasta que todos los Pods en el nodo afectado son terminados ó hasta que el tiempo de espera, +que puede ser configurado, es alcanzado. + +Un PDB especifica el número de réplicas que una aplicación puede tolerar, relativo a cuantas +se pretende tener. Por ejemplo, un Deployment que tiene un `.spec.replicas: 5` se +supone que tiene 5 Pods en cualquier momento. Si su PDB permite tener 4 a la vez, +entonces la API de Desalojo va a permitir interrupciones voluntarias de un (pero no a dos) Pod a la vez. + +El grupo de Pods que comprende a la aplicación esta especificada usando una etiqueta selectora, la misma +que es usada por el controlador de aplicación (deployment, stateful-set, etc). + +El numero de Pods "deseado" es calculado a partir de `.spec.replicas` de el recurso de Workload +que es manejado para esos Pods. El plano de control descubre el recurso Workload perteneciente a el +examinando las `.metadata.ownerReferences` del Pod. + +Las [Interrupciones Involuntarias](#voluntary-and-involuntary-disruptions) no pueden ser prevenidas por los PDB; pero si +son contabilizadas a partir este presupuesto. + +Los Pods que son borrados o no estan disponibles debido a una actualización continua de una aplicación forman parte del presupuesto de interrupciones, pero los recursos Workload (como los Deployments y StatefulSet) +no están limitados por los PDBs cuando se hacen actualizaciones continuas. En cambio, la administración de fallas +durante la actualización de la aplicación es configurada en la especificación para este recurso Workload específico. + +Cuando un Pod es quitado usando la API de desalojo, este es +[terminado](/docs/concepts/workloads/pods/pod-lifecycle/#pod-termination) correctamente, haciendo honor al +`terminationGracePeriodSeconds` configurado en su [PodSpec](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#podspec-v1-core). + +## Ejemplo de Presupuesto de Interrupción de POD {#pdb-example} + +Considere un clúster con 3 nodos, `nodo-1` hasta `nodo-3`. +El clúster esta corriendo varias aplicaciones. Uno de ellos tiene 3 replicas, que llamaremos +`pod-a`, `pod-b`, y `pod-c`. Otro Pod no relacionado y sin PDB, llamado `pod-x`, también se muestra. + +Inicialmente los pods estan distribuidos de esta manera: + + +| nodo-1 | nodo-2 | nodo-3 | +|:--------------------:|:-------------------:|:------------------:| +| pod-a *available* | pod-b *available* | pod-c *available* | +| pod-x *available* | | | + +Los 3 Pods son parte de un Deployment, ellos colectivamente tienen un PDB que requiere +que por lo menos 2 de los 3 Pods esten disponibles todo el tiempo. + +Por ejemplo, supongamos que el administrador del clúster quiere reiniciar para actualizar el kernel y arreglar un bug. +El administrador del clúster primero intenta desocupar el `nodo-1` usando el comando `kubectl drain`. +La herramienta intenta desalojar a los pods `pod-a` y `pod-x`. Esto tiene éxito inmediatamente. +Ambos Pods van al estado `terminating` al mismo tiempo. +Pone al clúster en el siguiente estado: + +| nodo-1 *draining* | nodo-2 | nodo-3 | +|:--------------------:|:-------------------:|:------------------:| +| pod-a *terminating* | pod-b *available* | pod-c *available* | +| pod-x *terminating* | | | + +El Deployment detecta que uno de los Pods esta terminando, entonces crea un reemplazo +llamado `pod-d`. Como el `nodo-1` esta bloqueado, el pod termina en otro nodo. Algo más, adicionalmente +a creado el pod `pod-y` como un reemplazo del `pod-x` . + +(Nota: para un StatefulSet, `pod-a`, el cual debería ser llamado algo como `pod-0`, necesitaría ser terminado completamente antes de su remplazo, el cual también es llamado `pod-0` pero tiene un UID diferente, podría ser creado. De lo contrario, el ejemplo también aplica a un StatefulSet.) + +Ahora el clúster esta en este estado: + +| nodo-1 *draining* | nodo-2 | nodo-3 | +|:--------------------:|:-------------------:|:------------------:| +| pod-a *terminating* | pod-b *available* | pod-c *available* | +| pod-x *terminating* | pod-d *starting* | pod-y | + +En algún punto, los Pods finalizan y el clúster se ve de esta forma: + +| nodo-1 *drained* | nodo-2 | nodo-3 | +|:--------------------:|:-------------------:|:------------------:| +| | pod-b *available* | pod-c *available* | +| | pod-d *starting* | pod-y | + +En este estado, si un administrador del clúster impaciente intenta desalojar el `nodo-2` ó el +`nodo-3`, el comando drain va a ser bloqueado, porque hay solamente 2 Pods disponibles para +el Deployment y el PDB requiere por lo menos 2. Después de pasado un tiempo el `pod-d` esta disponible. + +El estado del clúster ahora se ve así: + +| nodo-1 *drained* | nodo-2 | nodo-3 | +|:--------------------:|:-------------------:|:------------------:| +| | pod-b *available* | pod-c *available* | +| | pod-d *available* | pod-y | + +Ahora, el administrador del clúster desaloja el `nodo-2`. +El comando drain tratará de desalojar a los 2 Pods con algún orden, digamos +primero el `pod-b` y después el `pod-d`. Va a tener éxito en quitar el `pod-b`. +Pero cuando intente desalojar al `pod-d`, va a ser rechazado porque esto va a dejar +un Pod solamente disponible para el Deployment. + +El Deployment crea un reemplazo para el `pod-b` llamado `pod-e`. +Porque no hay recursos suficientes disponibles en el clúster para programar +el `pod-e` el desalojo será bloqueado nuevamente. El clúster va a terminar en este +estado: + +| nodo-1 *drained* | nodo-2 | nodo-3 | *no node* | +|:--------------------:|:-------------------:|:------------------:|:------------------:| +| | pod-b *terminating* | pod-c *available* | pod-e *pending* | +| | pod-d *available* | pod-y | | + +Ahora, el administrador del clúster necesita +agregar un nuevo nodo en el clúster para continuar con la actualización. + +Usted puede ver como Kubernetes varia la tasa a la que las interrupciones +pueden suceder, en función de: + +- cuantas réplicas una aplicación necesita +- cuanto toma apagar una instancia de manera correcta +- cuanto tiempo toma que una nueva instancia inicie +- el tipo de controlador +- la capacidad de recursos del clúster + +## Separando al dueño del Clúster y los roles de dueños de la Aplicación + +Muchas veces es útil pensar en el Administrador del Clúster +y al dueño de la aplicación como roles separados con conocimiento limitado +el uno del otro. Esta separación de responsabilidades +puede tener sentido en estos escenarios: + +- Cuando hay muchas equipos con aplicaciones compartiendo un clúster de Kubernetes y + hay una especialización natural de roles +- Cuando una herramienta de terceros o servicio es usado para automatizar el control del clúster + +El presupuesto de interrupción de Pods soporta esta separación de roles, ofreciendo +una interfaz entre los roles. + +Si no se tiene tal separación de responsabilidades en la organización, +posiblemente no se necesite el Presupuesto de Interrupción de Pods. + +## Como realizar Acciones Disruptivas en el Clúster + +Si usted es el Administrador del Clúster y necesita realizar una acción disruptiva en todos +los nodos en el clúster, como ser una actualización de nodo o de software, estas son algunas de las opciones: + +- Aceptar el tiempo sin funcionar mientras dura la actualización. +- Conmutar a otra replica completa del clúster. + - No hay tiempo sin funcionar, pero puede ser costoso tener duplicados los nodos + y tambien un esfuerzo humano para orquestar dicho cambio. +- Escribir la toleracia a la falla de la aplicación y usar PDBs. + - No hay tiempo sin funcionar. + - Duplicación de recursos mínimo. + - Permite mucha más automatización de la administración del clúster. + - Escribir aplicaciones que tengan tolerancia a fallas es complicado, pero el trabajo para tolerar interrupciones + involuntarias, largamente se sobrepone con el trabajo que es dar soporte a autoescalamientos y tolerar + interrupciones involuntarias. + + + + +## {{% heading "whatsnext" %}} + + +* Siga los pasos para proteger su aplicación con [configurar el Presupuesto de Interrupciones de Pods](/docs/tasks/run-application/configure-pdb/). + +* Aprenda más sobre [desalojar nodos](/docs/tasks/administer-clúster/safely-drain-node/) + +* Aprenda sobre [actualizar un Deployment](/docs/concepts/workloads/controllers/deployment/#updating-a-deployment) + incluyendo los pasos para mantener su disponibilidad mientras dura la actualización. + diff --git a/content/ja/docs/tasks/administer-cluster/securing-a-cluster.md b/content/ja/docs/tasks/administer-cluster/securing-a-cluster.md new file mode 100644 index 0000000000..d1a852efa2 --- /dev/null +++ b/content/ja/docs/tasks/administer-cluster/securing-a-cluster.md @@ -0,0 +1,197 @@ +--- +title: クラスターのセキュリティ +content_type: task +--- + + + +このドキュメントでは、偶発的または悪意のあるアクセスからクラスターを保護するためのトピックについて説明します。 +また、全体的なセキュリティに関する推奨事項を提供します。 + + +## {{% heading "prerequisites" %}} + + +* {{< include "task-tutorial-prereqs.md" >}} {{< version-check >}} + + + + + +## Kubernetes APIへのアクセスの制御 + +Kubernetesは完全にAPI駆動であるため、誰がクラスターにアクセスできるか、どのようなアクションを実行できるかを制御・制限することが第一の防御策となります。 + +### すべてのAPIトラフィックにTLS(Transport Layer Security)を使用する + +Kubernetesは、クラスター内のすべてのAPI通信がデフォルトでTLSにより暗号化されていることを期待しており、大半のインストール方法では、必要な証明書を作成してクラスターコンポーネントに配布することができます。 + +コンポーネントやインストール方法によっては、HTTP上のローカルポートを有効にする場合があることに注意してください。管理者は、潜在的に保護されていないトラフィックを特定するために、各コンポーネントの設定に精通している必要があります。 + +### APIの認証 + +クラスターのインストール時に、共通のアクセスパターンに合わせて、APIサーバーが使用する認証メカニズムを選択します。 +例えば、シングルユーザーの小規模なクラスターでは、シンプルな証明書や静的なBearerトークンを使用することができます。 +大規模なクラスターでは、ユーザーをグループに細分化できる既存のOIDCまたはLDAPサーバーを統合することができます。 + +ノード、プロキシー、スケジューラー、ボリュームプラグインなど、インフラの一部であるものも含めて、すべてのAPIクライアントを認証する必要があります。 +これらのクライアントは通常、[service accounts](/docs/reference/access-authn-authz/service-accounts-admin/)であるか、またはx509クライアント証明書を使用しており、クラスター起動時に自動的に作成されるか、クラスターインストールの一部として設定されます。 + +詳細については、[認証](/ja/docs/reference/access-authn-authz/authentication/)を参照してください。 + +### APIの認可 + +認証されると、すべてのAPIコールは認可チェックを通過することになります。 + +Kubernetesには、統合された[RBAC](/ja/docs/reference/access-authn-authz/rbac/)コンポーネントが搭載されており、入力されたユーザーやグループを、ロールにまとめられたパーミッションのセットにマッチさせます。 +これらのパーミッションは、動詞(get, create, delete)とリソース(pods, services, nodes)を組み合わせたもので、ネームスペース・スコープまたはクラスター・スコープに対応しています。 +すぐに使えるロールのセットが提供されており、クライアントが実行したいアクションに応じて、デフォルトで適切な責任の分離を提供します。 + +[Node](/docs/reference/access-authn-authz/node/)と[RBAC](/ja/docs/reference/access-authn-authz/rbac/)の承認者は、[NodeRestriction](/docs/reference/access-authn-authz/admission-controllers/#noderestriction)のアドミッションプラグインと組み合わせて使用することをお勧めします。 + +認証の場合と同様に、小規模なクラスターにはシンプルで幅広い役割が適切かもしれません。 +しかし、より多くのユーザーがクラスターに関わるようになるとチームを別の名前空間に分け、より限定的な役割を持たせることが必要になるかもしれません。 +認可においては、あるオブジェクトの更新が、他の場所でどのようなアクションを起こすかを理解することが重要です。 + +たとえば、ユーザーは直接Podを作成することはできませんが、ユーザーに代わってPodを作成するDeploymentの作成を許可することで、間接的にそれらのPodを作成することができます。 +同様に、APIからノードを削除すると、そのノードにスケジューリングされていたPodが終了し、他のノードに再作成されます。 +すぐに使えるロールは、柔軟性と一般的なユースケースのバランスを表していますが、より限定的なロールは、偶発的なエスカレーションを防ぐために慎重に検討する必要があります。 +すぐに使えるロールがニーズを満たさない場合は、ユースケースに合わせてロールを作成することができます。 + +詳しくは[authorization reference section](/docs/reference/access-authn-authz/authorization/)に参照してください。 + +## Kubeletへのアクセスの制御 + +Kubeletsは、ノードやコンテナの強力な制御を可能にするHTTPSエンドポイントを公開しています。 +デフォルトでは、KubeletsはこのAPIへの認証されていないアクセスを許可しています。 + +本番環境のクラスターでは、Kubeletの認証と認可を有効にする必要があります。 + +詳細は、[Kubelet 認証/認可](/ja/docs/reference/command-line-tools-reference/kubelet-authentication-authorization)に参照してください。 + +## ワークロードやユーザーのキャパシティーを実行時に制御 + +Kubernetesにおける権限付与は、意図的にハイレベルであり、リソースに対する粗いアクションに焦点を当てています。 + +より強力なコントロールは**policies**として存在し、それらのオブジェクトがクラスタや自身、その他のリソースにどのように作用するかをユースケースによって制限します。 + +### クラスターのリソース使用量の制限 + +[リソースクォータ](/ja/docs/concepts/policy/resource-quotas/)は、ネームスペースに付与されるリソースの数や容量を制限するものです。 + +これは、ネームスペースが割り当てることのできるCPU、メモリー、永続的なディスクの量を制限するためによく使われますが、各ネームスペースに存在するPod、サービス、ボリュームの数を制御することもできます。 + +[Limit ranges](/docs/tasks/administer-cluster/manage-resources/memory-default-namespace/)は、上記のリソースの一部の最大または最小サイズを制限することで、ユーザーがメモリーなどの一般的に予約されたリソースに対して不当に高いまたは低い値を要求するのを防いだり、何も指定されていない場合にデフォルトの制限を提供したりします。 + +### コンテナが利用する特権の制御 + +Podの定義には、[security context](/docs/tasks/configure-pod-container/security-context/)が含まれており、ノード上の特定の Linux ユーザー(rootなど)として実行するためのアクセス、特権的に実行するためのアクセス、ホストネットワークにアクセスするためのアクセス、その他の制御を要求することができます。 +[Pod security policies](/docs/concepts/policy/pod-security-policy/)は、危険なセキュリティコンテキスト設定を提供できるユーザーやサービスアカウントを制限することができます。 + +たとえば、Podのセキュリティポリシーでは、ボリュームマウント、特に`hostPath`を制限することができ、これはPodの制御すべき側面です。 +一般に、ほとんどのアプリケーションワークロードでは、ホストリソースへのアクセスを制限する必要があります。 +ホスト情報にアクセスすることなく、ルートプロセス(uid 0)として正常に実行できます。 +ただし、ルートユーザーに関連する権限を考慮して、非ルートユーザーとして実行するようにアプリケーションコンテナを記述する必要があります。 + +## コンテナが不要なカーネルモジュールをロードしないようにします + +Linuxカーネルは、ハードウェアが接続されたときやファイルシステムがマウントされたときなど、特定の状況下で必要となるカーネルモジュールをディスクから自動的にロードします。 +特にKubernetesでは、非特権プロセスであっても、適切なタイプのソケットを作成するだけで、特定のネットワークプロトコル関連のカーネルモジュールをロードさせることができます。これにより、管理者が使用されていないと思い込んでいるカーネルモジュールのセキュリティホールを攻撃者が利用できる可能性があります。 +特定のモジュールが自動的にロードされないようにするには、そのモジュールをノードからアンインストールしたり、ルールを追加してブロックしたりします。 + +ほとんどのLinuxディストリビューションでは、`/etc/modprobe.d/kubernetes-blacklist.conf`のような内容のファイルを作成することで実現できます。 + +``` +# DCCPは必要性が低く、複数の深刻な脆弱性があり、保守も十分ではありません。 +blacklist dccp + +# SCTPはほとんどのKubernetesクラスタでは使用されておらず、また過去には脆弱性がありました。 +blacklist sctp +``` + +モジュールのロードをより一般的にブロックするには、SELinuxなどのLinuxセキュリティモジュールを使って、コンテナに対する `module_request`権限を完全に拒否し、いかなる状況下でもカーネルがコンテナ用のモジュールをロードできないようにすることができます。 +(Podは、手動でロードされたモジュールや、より高い権限を持つプロセスに代わってカーネルがロードしたモジュールを使用することはできます)。 + + +### ネットワークアクセスの制限 + +名前空間の[ネットワークポリシー](/ja/docs/tasks/administer-cluster/declare-network-policy/)により、アプリケーション作成者は、他の名前空間のPodが自分の名前空間内のPodやポートにアクセスすることを制限することができます。 + +サポートされている[Kubernetes networking providers](/ja/docs/concepts/cluster-administration/networking/)の多くは、ネットワークポリシーを尊重するようになりました。 +クォータやリミットの範囲は、ユーザーがノードポートや負荷分散サービスを要求するかどうかを制御するためにも使用でき、多くのクラスターでは、ユーザーのアプリケーションがクラスターの外で見えるかどうかを制御できます。 +ノードごとのファイアウォール、クロストークを防ぐための物理的なクラスタノードの分離、高度なネットワークポリシーなど、プラグインや環境ごとにネットワークルールを制御する追加の保護機能が利用できる場合もあります。 + +### クラウドメタデータのAPIアクセスを制限 + +クラウドプラットフォーム(AWS、Azure、GCEなど)では、しばしばメタデータサービスをインスタンスローカルに公開しています。 +デフォルトでは、これらのAPIはインスタンス上で実行されているPodからアクセスでき、そのノードのクラウド認証情報や、kubelet認証情報などのプロビジョニングデータを含むことができます。 +これらの認証情報は、クラスター内でのエスカレーションや、同じアカウントの他のクラウドサービスへのエスカレーションに使用できます。 + +クラウドプラットフォーム上でKubernetesを実行する場合は、インスタンスの認証情報に与えられるパーミッションを制限し、[ネットワークポリシー](/ja/docs/tasks/administer-cluster/declare-network-policy/)を使用してメタデータAPIへのPodのアクセスを制限し、プロビジョニングデータを使用してシークレットを配信することは避けてください。 + +### Podのアクセス可能ノードを制御 + +デフォルトでは、どのノードがPodを実行できるかについての制限はありません。 +Kubernetesは、エンドユーザーが利用できる[Node上へのPodのスケジューリング](/ja/docs/concepts/scheduling-eviction/assign-pod-node/)と[TaintとToleration](/ja/docs/concepts/scheduling-eviction/taint-and-toleration/)を提供します。 +多くのクラスターでは、ワークロードを分離するためにこれらのポリシーを使用することは、作者が採用したり、ツールを使って強制したりする慣習になっています。 + +管理者としては、ベータ版のアドミッションプラグイン「PodNodeSelector」を使用して、ネームスペース内のPodをデフォルトまたは特定のノードセレクタを必要とするように強制することができます。 +エンドユーザーがネームスペースを変更できない場合は、特定のワークロード内のすべてのPodの配置を強く制限することができます。 + +## クラスターのコンポーネントの保護 + +このセクションでは、クラスターを危険から守るための一般的なパターンを説明します。 + +### etcdへのアクセスの制限 + +API用のetcdバックエンドへの書き込みアクセスは、クラスタ全体のrootを取得するのと同等であり、読み取りアクセスはかなり迅速にエスカレートするために使用できます。 +管理者は、TLSクライアント証明書による相互認証など、APIサーバーからetcdサーバーへの強力な認証情報を常に使用すべきであり、API サーバーのみがアクセスできるファイアウォールの後ろにetcdサーバーを隔離することがしばしば推奨されます。 + +{{< caution >}} +クラスター内の他のコンポーネントが、完全なキースペースへの読み取りまたは書き込みアクセスを持つマスターetcdインスタンスへのアクセスを許可することは、クラスター管理者のアクセスを許可することと同じです。 +マスター以外のコンポーネントに別のetcdインスタンスを使用するか、またはetcd ACLを使用してキースペースのサブセットへの読み取りおよび書き込みアクセスを制限することを強く推奨します。 +{{< /caution >}} + +### 監査ログの有効 + +[audit logger](/docs/tasks/debug-application-cluster/audit/)はベータ版の機能で、APIによって行われたアクションを記録し、侵害があった場合に後から分析できるようにするものです。 + +監査ログを有効にして、ログファイルを安全なサーバーにアーカイブすることをお勧めします。 + +### アルファまたはベータ機能へのアクセスの制限 + +アルファ版およびベータ版のKubernetesの機能は活発に開発が行われており、セキュリティ上の脆弱性をもたらす制限やバグがある可能性があります。 +常に、アルファ版またはベータ版の機能が提供する価値と、セキュリティ体制に起こりうるリスクを比較して評価してください。 +疑問がある場合は、使用しない機能を無効にしてください。 + +### インフラの認証情報を頻繁に交換 + +秘密やクレデンシャルの有効期間が短いほど、攻撃者がそのクレデンシャルを利用することは難しくなります。 +証明書の有効期間を短く設定し、そのローテーションを自動化します。 +発行されたトークンの利用可能期間を制御できる認証プロバイダーを使用し、可能な限り短いライフタイムを使用します。 +外部統合でサービス・アカウント・トークンを使用する場合、これらのトークンを頻繁にローテーションすることを計画します。 +例えば、ブートストラップ・フェーズが完了したら、ノードのセットアップに使用したブートストラップ・トークンを失効させるか、その認証を解除する必要があります。 + +### サードパーティの統合を有効にする前に確認 + +Kubernetesへの多くのサードパーティの統合は、クラスターのセキュリティプロファイルを変更する可能性があります。 +統合を有効にする際には、アクセスを許可する前に、拡張機能が要求するパーミッションを常に確認してください。 + +例えば、多くのセキュリティ統合は、事実上そのコンポーネントをクラスター管理者にしているクラスター上のすべての秘密を見るためのアクセスを要求するかもしれません。 +疑問がある場合は、可能な限り単一の名前空間で機能するように統合を制限してください。 +Podを作成するコンポーネントも、`kube-system`名前空間のような名前空間内で行うことができれば、予想外に強力になる可能性があります。これは、サービスアカウントのシークレットにアクセスしたり、サービスアカウントに寛容な[pod security policies](/docs/concepts/policy/pod-security-policy/)へのアクセスが許可されている場合に、昇格したパーミッションでPodが実行される可能性があるからです。 + +### etcdにあるSecretを暗号化 + +一般的に、etcdデータベースにはKubernetes APIを介してアクセス可能なあらゆる情報が含まれており、クラスターの状態に対する大きな可視性を攻撃者へ与える可能性があります。 +よく吟味されたバックアップおよび暗号化ソリューションを使用して、常にバックアップを暗号化し、可能な場合はフルディスク暗号化の使用を検討してください。 + +Kubernetesは1.7で導入された機能である[encryption at rest](/docs/tasks/administer-cluster/encrypt-data/)をサポートしており、これは1.13からはベータ版となっています。 +これは、etcdの`Secret`リソースを暗号化し、etcdのバックアップにアクセスした人が、それらのシークレットの内容を見ることを防ぎます。 +この機能は現在ベータ版ですが、バックアップが暗号化されていない場合や、攻撃者がetcdへの読み取りアクセスを得た場合に、追加の防御レベルを提供します。 + +### セキュリティアップデートのアラートの受信と脆弱性の報告 + +[kubernetes-announce](https://groups.google.com/forum/#!forum/kubernetes-announce)に参加してください。 +グループに参加すると、セキュリティアナウンスに関するメールを受け取ることができます。 +脆弱性の報告方法については、[security reporting](/docs/reference/issues-security/security/)ページを参照してください。 diff --git a/content/pt-br/docs/concepts/configuration/manage-resources-containers.md b/content/pt-br/docs/concepts/configuration/manage-resources-containers.md new file mode 100644 index 0000000000..7526880477 --- /dev/null +++ b/content/pt-br/docs/concepts/configuration/manage-resources-containers.md @@ -0,0 +1,872 @@ +--- +title: Gerenciamento de recursos em Pods e contêineres +content_type: concept +weight: 40 +feature: + title: Empacotamento automático + description: > + Distribui contêineres automaticamente com base em requerimentos de recursos + e em outras restrições, evitando sacrificar disponibilidade. + Combina cargas de trabalho críticas com cargas de trabalho de prioridades + mais baixas para melhorar a utilização e reduzir o desperdício de recursos. +--- + + + +Ao criar a especificação de um {{< glossary_tooltip term_id="pod" >}}, você pode +opcionalmente especificar quanto de cada recurso um {{< glossary_tooltip text="contêiner" term_id="container" >}} +precisa. Os recursos mais comuns a serem especificados são CPU e memória (RAM); +há outros recursos que podem ser especificados. + +Quando você especifica o _requerimento_ de recursos em um Pod, o +{{< glossary_tooltip text="kube-scheduler" term_id="kube-scheduler" >}} utiliza +esta informação para decidir a qual nó o Pod será atribuído. Quando você +especifica um _limite_ de recurso para um contêiner, o kubelet garante o +cumprimento de tais limites, de modo que o contêiner em execução não consiga +utilizar uma quantidade de tal recurso além do limite especificado. O kubelet +também reserva pelo menos o _requerimento_ daquele recurso de sistema +especificamente para que este contêiner utilize. + + + +## Requerimentos e limites + +Se o nó em que um Pod está rodando tem o suficiente de um recurso específico +disponível, é possível (e permitido) a um contêiner utilizar mais do que o seu +`request` para aquele recurso especifica. No entanto, não é permitido a um +contêiner consumir mais do que o seu `limit` para um recurso. + +Por exemplo, se você especificar um requerimento de `memory` de 256 MiB para um +contêiner, e aquele contêiner está em um Pod atribuído a um nó com 8GiB de +memória, sem outros Pods, então este contêiner pode tentar consumir mais memória +RAM. + +Se você especificar um limite de `memory` de 4GiB para aquele contêiner, o +kubelet (e o +{{< glossary_tooltip text="agente de execução de contêiner" term_id="container-runtime" >}}) +vão garantir o cumprimento do limite. O agente de execução impede que o contêiner +utilize mais de um recurso do que seu limite configurado. Por exemplo, quando +um processo no contêiner tenta consumir mais que o limite permitido de memória, +o núcleo do sistema encerra o processo que tentou efetuar a alocação de memória +com um erro de memória esgotada (_out of memory (OOM) error_). + +Limites podem ser implementados de forma reativa (o sistema intervém quando +uma violação ocorre) ou por garantia (o sistema previne o contêiner de exceder +o limite). Diferentes agentes de execução implementam as mesmas restrições de +maneiras diferentes. + +{{< note >}} +Se um contêiner especifica seu próprio limite de memória, mas não especifica seu +requerimento de memória, o Kubernetes automaticamente cria um requerimento de +memória com o mesmo valor do limite. A mesma regra vale para o limite de CPU: +quando não há requerimento de CPU, o Kubernetes automaticamente cria um +requerimento de CPU idêntico ao limite. +{{< /note >}} + +## Tipos de recursos + +_CPU_ e _memória_ são _tipos de recursos_. Um tipo de recurso possui uma unidade +básica. CPU representa processamento computacional e é especificada em unidades +de [CPU do Kubernetes](#meaning-of-cpu). +Memória é especificada em bytes. Em cargas de trabalho Linux, você pode +especificar o recurso _huge pages_. _Huge pages_ são uma funcionalidade +específica do Linux que permite ao núcleo do sistema operacional alocar +blocos de memória muito maiores que o tamanho de página de memória padrão. + +Por exemplo, em um sistema onde o tamanho da página de memória padrão é de 4 KiB, +você pode especificar um limite `hugepages-2Mi: 80Mi`. Se o contêiner tentar +alocar mais de 40 _huge pages_ de 2 MiB cada, ou um total de 80 MiB, essa +alocação irá falhar. + +{{< note >}} +Você não pode superdimensionar (ou solicitar acima do limite físico) recursos do +tipo `hugepages-*`. +O recurso `hugepages-*` difere dos recursos `memory` e `cpu` neste aspecto. +{{< /note >}} + +CPU e memória são chamados coletivamente de _recursos computacionais_, ou apenas +_recursos_. Recursos computacionais são quantidades mensuráveis que podem ser +requisitadas, alocadas, e consumidas. Estes recursos diferem dos +[recursos de API](/docs/concepts/overview/kubernetes-api/). Recursos de API, +como Pods e [Services](/docs/concepts/services-networking/service/) são objetos +que podem ser lidos e modificados através do servidor da API do Kubernetes. + +## Requerimentos de recursos e limites de Pod e contêiner + +Para cada contêiner, você pode especificar limites e requerimentos de recursos, +incluindo os seguintes recursos: + +* `spec.containers[].resources.limits.cpu` +* `spec.containers[].resources.limits.memory` +* `spec.containers[].resources.limits.hugepages-` +* `spec.containers[].resources.requests.cpu` +* `spec.containers[].resources.requests.memory` +* `spec.containers[].resources.requests.hugepages-` + +Embora você possa especificar apenas requerimentos e limites para contêineres +individuais, é útil também pensar sobre os requerimentos e limites gerais de um +Pod. +Para um recurso em particular, um _requerimento ou limite de recurso de um Pod_ +é a soma de todos os valores dos requerimentos ou limites de um recurso daquele +tipo, especificados em cada um dos contêineres daquele Pod. + +## Unidades de recursos no Kubernetes + +### Unidades de recurso de CPU {#meaning-of-cpu} + +Limites e requerimentos de recursos de CPU são mensurados em unidades de _cpu_. +No Kubernetes, uma unidade de CPU é equivalente a **um núcleo físico de CPU**, +ou **um núcleo virtual**, dependendo se o nó é uma máquina física ou uma máquina +virtual rodando em uma máquina física. + +Requerimentos fracionários são permitidos. Quando você define um contêiner cujo +valor do campo `spec.containers[].resources.requests.cpu` é `0.5`, você está +solicitando metade da quantidade de CPU que teria sido solicitada caso o valor +fosse `1.0`. +No caso de unidades de recurso de CPU, a expressão de +[quantidade](/docs/reference/kubernetes-api/common-definitions/quantity/) `0.1` +é equivalente à expressão `100m`, que pode ser lida como "cem milicpus", ou +"cem milinúcleos". "Milicpu" ou "milinúcleo" equivalem à milésima parte de um +núcleo ou CPU, de modo que "100m" equivalem a 10% do tempo computacional de um +processador. + +Recursos de CPU são sempre especificados como uma quantidade absoluta de recurso, +nunca como uma quantidade relativa. Por exemplo, `500m` de CPU representam +grosseiramente a mesma quantidade de poder computacional, independentemente do +contêiner rodar em uma máquina com processador de núcleo único, de dois núcleos +ou de 48 núcleos. + +{{< note >}} +O Kubernetes não permite que você especifique recursos de CPU com uma precisão +maior que `1m`. Devido a isso, é útil especificar unidades de CPU menores do que +`1.0` ou `1000m` utilizando a notação de milicpu. Por exemplo, `5m` ao invés de +`0.005`. +{{< /note >}} + +### Unidades de recurso de memória {#meaning-of-memory} + +Limites e requerimentos de `memory` são medidos em bytes. Você pode expressar +memória como um número inteiro ou como um número de ponto fixo, utilizando um +destes sufixos de +[quantidade](/docs/reference/kubernetes-api/common-definitions/quantity/): +E, P, T, G, M, k. Você também pode utilizar os equivalentes de potência de dois: +Ei, Pi, Ti, Gi, Mi, Ki. Por exemplo, as quantidades abaixo representam, a grosso +modo, o mesmo valor: + +```shell +128974848, 129e6, 129M, 128974848000m, 123Mi +``` + +Tome cuidado com os sufixos. Se você solicitar `400m` de memória, esta +quantidade estará de fato requerendo o equivalente a 0,4 byte de memória. A +intenção da pessoa que fez esta requisição provavelmente era solictar 400 +mebibytes (`400Mi`) ou 400 megabytes (`400M`). + +## Exemplo de recursos de contêiner {#example-1} + +O Pod seguinte tem dois contêineres. Ambos os contêineres têm um requerimento de +0,25 CPU e 64 MiB (ou 226 bytes) de memória. Cada contêiner tem um +limite de 0,5 CPU e 128 MiB de memória. Você pode dizer que o Pod tem um +requerimento de 0,5 CPU e 128 MiB de memória, e um limite de 1 CPU e 256 MiB de +memória. + +```yaml +--- +apiVersion: v1 +kind: Pod +metadata: + name: frontend +spec: + containers: + - name: app + image: images.my-company.example/app:v4 + resources: + requests: + memory: "64Mi" + cpu: "250m" + limits: + memory: "128Mi" + cpu: "500m" + - name: log-aggregator + image: images.my-company.example/log-aggregator:v6 + resources: + requests: + memory: "64Mi" + cpu: "250m" + limits: + memory: "128Mi" + cpu: "500m" +``` + +## Como Pods com requerimentos de recursos são agendados + +Quando você cria um Pod, o escalonador do Kubernetes seleciona um nó para que o +Pod rode. Cada nó possui uma capacidade máxima para cada um dos tipos de recurso: +a quantidade de CPU e memória que o nó pode fornecer aos Pods. O escalonador +garante que, para cada tipo de recurso, a soma dos requerimentos de recursos dos +contêineres agendados seja menor que a capacidade do nó. +Note que, embora o consumo de memória ou CPU real nos nós seja muito baixo, o +escalonador ainda irá se recusar a agendar um Pod em um nó se a verificação de +capacidade falhar. Isso protege contra a falta de um recurso em um nó quando o +consumo de recursos aumenta com o passar do tempo, como por exemplo durante o +pico diário de requisições a um serviço. + +## Como o Kubernetes aplica requisições e limites de recursos {#how-pods-with-resource-limits-are-run} + +Quando o kubelet inicia um contêiner como parte de um Pod, o kubelet envia as +requisições e limites de memória e de CPU ao agente de execução de contêiner. + +No Linux, o agente de execução de contêiner normalmente configura os +{{< glossary_tooltip text="cgroups" term_id="cgroup" >}} que aplicam e garantem +os limites que você definiu. + +- O limite de CPU determina um teto de quanto tempo de CPU o contêiner pode + utilizar. A cada intervalo de agendamento, o núcleo do sistema operacional do + Linux verifica se este limite foi excedido; se este for o caso, o núcleo + aguarda antes de permitir que aquele cgroup continue sua execução. +- O requerimento de CPU normalmente define um método de balanceamento. Se vários + contêineres diferentes (cgroups) querem rodar em um sistema disputado, cargas + de trabalho com requerimentos maiores de CPU têm mais tempo de CPU alocado + para si do que cargas de trabalho com pequenos requerimentos. +- O requerimento de memória é usado principalmente durante o agendamento de um + Pod. Em um nó que utiliza cgroups v2, o agente de execução de contêiner pode + utilizar o requerimento de memória como uma dica para definir valores para + `memory.min` e `memory.low`. +- O limite de memória define um limite de memória para aquele cgroup. Se o + contêiner tenta alocar mais memória que aquele limite, o subsistema + _out-of-memory_ do núcleo do sistema operacional Linux é ativado e, + normalmente, intervém encerrando um dos processos do contêiner que tentou + alocar mais memória. Se o processo em questão for o PID 1 do contêiner, e o + contêiner estiver marcado como reinicializável, então o Kubernetes irá + reiniciar o contêiner. +- O limite de memória para um Pod ou contêiner é também aplicado a páginas em + volumes armazenados em memória, como um `emptyDir`. O kubelet considera + sistemas de arquivos `tmpfs` em volumes do tipo `emptyDir` como uso de memória + em um contêiner, ao invés de armazenamento efêmero local. + +Se um contêiner exceder seu requerimento de memória e o nó em que esse contêiner +está rodando ficar com pouca memória no total, é provável que o Pod a que este +contêiner pertence seja {{< glossary_tooltip text="removido" term_id="eviction" >}}. + +A um contêiner pode ou não ser permitido exceder seu limite de CPU por períodos +de tempo estendidos. No entanto, agentes de execução de contêiner não encerram +Pods por uso excessivo de CPU. + +A fim de determinar se um contêiner não pode ser agendado ou está sendo +encerrado devido a limites de recursos, consulte a seção de +[solução de problemas](#troubleshooting). + +### Monitorando utilização de recursos computacionais e de memória + +O kubelet relata a utilização de recursos de um Pod como parte do +[`status`](/docs/concepts/overview/working-with-objects/kubernetes-objects/#object-spec-and-status) +do Pod. + +Se ferramentas opcionais para +[monitoramento de recursos](/docs/tasks/debug-application-cluster/resource-usage-monitoring/) +estiverem disponíveis em seu cluster, a utilização de recursos de um Pod pode +ser verificada diretamente através de +[API de métricas](/docs/tasks/debug-application-cluster/resource-metrics-pipeline/#the-metrics-api) +ou através das suas ferramentas de monitoramento + +## Armazenamento efêmero local + + +{{< feature-state for_k8s_version="v1.10" state="beta" >}} + +Nós possuem armazenamento efêmero local, através de dispositivos de escrita +conectados localmente ou através de RAM. "Efêmero" significa que não há garantia +de longo termo com relação a durabilidade. + +Pods utilizam armazenamento local efêmero para dados temporários, cache e logs. +O kubelet pode fornecer armazenamento temporário a Pods que utilizam +armazenamento local efêmero para montar {{< glossary_tooltip term_id="volume" text="volumes" >}} +do tipo [`emptyDir`](/docs/concepts/storage/volumes/#emptydir) em contêineres. + +O kubelet também utiliza este tipo de armazenamento para +[logs de contêineres a nível de nó](/pt-br/docs/concepts/cluster-administration/logging/#logs-no-nível-do-nó), +imagens de contêiner e camadas graváveis de contêineres em execução. + +{{< caution >}} +Se um nó falhar, os dados em seu armazenamento efêmero podem ser perdidos. +Suas aplicações não devem ter expectativas de cumprimento de SLAs de desempenho +(como quantidade de operações de entrada e saída de disco por segundo (IOPS), +por exemplo) pelo armazenamento local efêmero. +{{< /caution >}} + +Com esta funcionalidade em fase beta, o Kubernetes permite que você rastreie, +reserve e limite quanto armazenamento local efêmero um Pod pode consumir. + +### Configurações para armazenamento local efêmero {#configurations-for-local-ephemeral-storage} + +O Kubernetes suporta duas formas de configuração para o armazenamento local +efêmero em um nó: + +{{< tabs name="local_storage_configurations" >}} +{{% tab name="Sistema de arquivos único" %}} +Nesta configuração, você armazena todos os tipos diferentes de dados locais +efêmeros (volumes do tipo `emptyDir`, camadas graváveis, imagens de contêiner, +logs) em um sistema de arquivos único. A forma mais efetiva de configurar o +kubelet é dedicar este sistema de arquivos aos dados do Kubernetes (kubelet). + +O kubelet também escreve +[logs de contêiner a nível de nó](/pt-br/docs/concepts/cluster-administration/logging/#logs-no-nível-do-nó) +e trata estes logs de maneira semelhante ao armazenamento efêmero local. + +O kubelet escreve logs em arquivos dentro do seu diretório de log configurado +(`/var/log` por padrão) e possui um diretório base para outros dados armazenados +localmente (`/var/lib/kubelet` por padrão). + +Normalmente, ambos os diretórios `/var/lib/kubelet` e `/var/log` encontram-se no +sistema de arquivos raiz, e o kubelet é projetado com este desenho em mente. + +Seu nó pode ter tantos outros sistemas de arquivos não utilizados pelo Kubernetes +quantos você desejar. +{{% /tab %}} + +{{% tab name="Dois sistemas de arquivos" %}} +Você tem um sistema de arquivos no nó que você utiliza para dados efêmeros que +vêm de Pods em execução: logs e volumes do tipo `emptyDir`. Você pode utilizar +este sistema de arquivos para outros dados (por exemplo, logs de sistema não +relacionados ao Kubernetes); este sistema de arquivos pode até mesmo ser o +sistema de arquivos raiz. + +O kubelet também escreve +[logs de contêiner a nível de nó](/pt-br/docs/concepts/cluster-administration/logging/#logs-no-nível-do-nó) +no primeiro sistema de arquivos e os trata de forma semelhante ao armazenamento +local efêmero. + +Você também tem um segundo sistema de arquivos, separado, conectado a um +dispositivo lógico de armazenamento distinto. Nesta configuração, o diretório +que você configurou o kubelet para armazenar as camadas de imagens de contêiner +e as camadas graváveis de contêineres em execução estará neste segundo sistema +de arquivos. + +O primeiro sistema de arquivos não armazena nenhuma camada de imagens de +contêiner ou camada gravável. + +Seu nó pode ter tantos outros sistemas de arquivos não utilizados pelo Kubernetes +quantos você desejar. +{{% /tab %}} +{{< /tabs >}} + + +O kubelet consegue medir quanto armazenamento local está sendo utilizado. O +kubelet faz isso desde que: + +- o [_feature gate_](/docs/reference/command-line-tools-reference/feature-gates/) + `LocalStorageCapacityIsolation` esteja habilitado (a funcionalidade está + ligada por padrão), e +- você tenha configurado o nó utilizando uma das configurações suportadas para + o armazenamento local efêmero. + +Se você tiver uma configuração diferente, o kubelet não irá aplicar limites de +recursos para o armazenamento local efêmero. + +{{< note >}} +O kubelet rastreia volumes `emptyDir` que utilizem o sistema de arquivos `tmpfs` +como uso de memória de contêiner, ao invés de consumo de armazenamento local +efêmero. +{{< /note >}} + +### Configurando requerimentos e limites para armazenamento local efêmero + +Você pode especificar o recurso `ephemeral-storage` para gerenciar o +armazenamento local efêmero. Cada contêiner de um Pod pode especificar um dos +valores abaixo, ou ambos: + +* `spec.containers[].resources.limits.ephemeral-storage` +* `spec.containers[].resources.requests.ephemeral-storage` + +Limites e requerimentos de `ephemeral-storage` são medidos em quantidades de +bytes. Você pode expressar armazenamento como um inteiro ou como um valor de +ponto fixo utilizando um dos seguintes sufixos: E, P, T, G, M, k. Você pode +também utilizar os equivalentes de potência de dois: Ei, Pi, Ti, Gi, Mi, Ki. +Por exemplo, as quantidades abaixo representam grosseiramente o mesmo valor: + +- `128974848` +- `129e6` +- `129M` +- `123Mi` + +No exemplo a seguir, o Pod tem dois contêineres. Cada contêiner tem um +requerimento de 2GiB de armazenamento efêmero local. Cada contêiner tem um +limite de 4GiB de armazenamento efêmero local. Portanto, o Pod tem um +requerimento de 4GiB e um limite de 8GiB de armazenamento efêmero local. + +```yaml +apiVersion: v1 +kind: Pod +metadata: + name: frontend +spec: + containers: + - name: app + image: images.my-company.example/app:v4 + resources: + requests: + ephemeral-storage: "2Gi" + limits: + ephemeral-storage: "4Gi" + volumeMounts: + - name: ephemeral + mountPath: "/tmp" + - name: log-aggregator + image: images.my-company.example/log-aggregator:v6 + resources: + requests: + ephemeral-storage: "2Gi" + limits: + ephemeral-storage: "4Gi" + volumeMounts: + - name: ephemeral + mountPath: "/tmp" + volumes: + - name: ephemeral + emptyDir: {} +``` + +### Como Pods com requerimentos de `ephemeral-storage` são agendados + +Quando você cria um Pod, o Kubernetes seleciona um nó para o Pod rodar. Cada nó +tem uma quantidade máxima de armazenamento efêmero local que pode ser fornecida +aos Pods. Para mais informações, consulte +[_Node Allocatable_](/docs/tasks/administer-cluster/reserve-compute-resources/#node-allocatable). + +O escalonador garante que a soma dos requerimentos de recursos dos contêineres +agendados é menor que a capacidade do nó. + +### Gerenciamento do consumo do armazenamento efêmero {#resource-emphemeralstorage-consumption} + +Se o kubelet estiver gerenciando armazenamento local efêmero como um recurso, +o kubelet irá medir o consumo de armazenamento em: + +- volumes `emptyDir`, com exceção dos volumes do tipo `tmpfs` +- diretórios que armazenem logs a nível de nó +- camadas de contêiner graváveis + +Se um Pod estiver utilizando mais armazenamento efêmero do que o permitido, o +kubelet irá gerar um sinal de remoção para aquele Pod. + +Para isolamento a nível de contêiner, se o consumo de armazenamento de um +contêiner em camadas graváveis e logs exceder seu limite de armazenamento, o +kubelet irá marcar o Pod para remoção. + +Para isolamento a nível de Pod, o kubelet calcula um limite de armazenamento +total para um Pod somando os limites de cada contêiner naquele Pod. Neste caso, +se a soma do consumo de armazenamento efêmero local de todas os contêineres e +também dos volumes `emptyDir` de um Pod exceder o limite de armazenamento total +do Pod, então o kubelet marca o Pod para remoção. + +{{< caution >}} +Se o kubelet não estiver medindo armazenamento efêmero local, um Pod que exeder +seu limite de armazenamento local não será removido por exceder os limites de +recurso de armazenamento local. + +No entanto, se o espaço de um sistema de arquivos para camadas de contêiner +graváveis, logs a nível de nó, ou volumes `emptyDir` ficar reduzido, o nó irá +marcar a si próprio com um {{< glossary_tooltip text="_taint_" term_id="taint" >}} +indicando que está com armazenamento local reduzido, e esse _taint_ dispara a +remoção de Pods que não toleram o _taint_ em questão. + +Veja as [configurações](#configurations-for-local-ephemeral-storage) suportadas +para armazenamento efêmero local. +{{< /caution >}} + +O kubelet suporta formas diferentes de medir o uso de armazenamento dos Pods: + +{{< tabs name="resource-emphemeralstorage-measurement" >}} +{{% tab name="Varredura periódica" %}} +O kubelet executa verificações agendadas, em intervalos regulares, que varrem +cada volume do tipo `emptyDir`, diretório de log de contêiner, e camada gravável +de contêiner. + +A varredura mede quanto espaço está sendo utilizado. + +{{< note >}} +Neste modo, o kubelet não rastreia descritores de arquivos abertos para arquivos +removidos. + +Se você (ou um contêiner) criar um arquivo dentro de um volume `emptyDir`, um +processo ou usuário abrir tal arquivo, e você apagar o arquivo enquanto ele +ainda estiver aberto, o nó de índice para o arquivo apagado será mantido até que +o arquivo seja fechado novamente. O kubelet, no entanto, não computa este espaço +como espaço em uso. +{{< /note >}} + +{{% /tab %}} +{{% tab name="Quota de projeto do sistema de arquivos" %}} + +Quotas de projeto são uma funcionalidade a nível de sistema operacional para +gerenciamento de uso do armazenamento em sistemas de arquivos. Com o Kubernetes, +você pode habilitar quotas de projeto para o monitoramento de armazenamento em +uso. Tenha certeza que o sistema de arquivos do nó que esteja sendo utilizado em +volumes do tipo `emptyDir` possui suporte a quotas de projeto. Por exemplo, +os sistemas de arquivos XFS e ext4fs oferecem suporte a quotas de projeto. + +{{< note >}} +Quotas de projeto permitem o monitoramento do uso de armazenamento, mas não +garantem limites. +{{< /note >}} + +O Kubernetes utiliza IDs de projeto iniciando em `1048576`. Os IDs em uso estão +registrados nos diretórios `/etc/projects` e `/etc/projid`. Se os IDs de projeto +nestes intervalos forem utilizados para outros propósitos no sistema, estes IDs +de projeto deverão estar registrados nos diretórios especificados acima para que +o Kubernetes não os tente utilizar. + +Quotas fornecem melhor desempenho e mais precisão do que varredura de diretórios. +Quando um diretório é atribuído a um projeto, todos os arquivos criados no +diretório são também criados no projeto, e o núcleo do sistema pode simplesmente +manter controle de quantos blocos estão em uso por arquivos daquele projeto. Se +um arquivo é criado e apagado, mas possui um descritor de arquivo aberto, ele +continua a consumir espaço. O rastreio de quotas registra este espaço de forma +precisa, enquanto varreduras de diretório ignoram o uso de espaço de +armazenamento por arquivos apagados. + +Se você deseja utilizar quotas de projeto, você deve: + +* Habilitar o [_feature gate_](/docs/reference/command-line-tools-reference/feature-gates/) + `LocalStorageCapacityIsolationFSQuotaMonitoring=true` utilizando o campo + `featureGates` na [configuração do kubelet](/docs/reference/config-api/kubelet-config.v1beta1/) + ou a opção de linha de comando `--feature-gates`. + +* Garantir que o sistema de arquivos raiz (ou o sistema de arquivos opcional de + tempo de execução) tem quotas de projeto habilitadas. Todos os sistemas de + arquivos XFS suportam quotas de projeto. Em sistemas de arquivos ext4, você + precisa habilitar a funcionalidade de rastreio de quotas de projeto enquanto + o sistema de arquivos ainda não está montado. + + ```bash + # Para sistema de arquivos ext4, com o volume /dev/block-device não montado + sudo tune2fs -O project -Q prjquota /dev/block-device + ``` + +* Garanta que o sistema de arquivos raiz (ou sistema de arquivos opcional de + tempo de execução) esteja montado com quotas de projeto habilitadas. Em ambos + os sistemas XFS e ext4fs, a opção de montagem é chamada `prjquota`. + +{{% /tab %}} +{{< /tabs >}} + +## Recursos estendidos + +Recursos estendidos são nomes de recursos absolutos fora do domínio +`kubernetes.io`. Estes recursos permitem a operadores de cluster anunciar e a +usuários consumir recursos que não são embutidos pelo Kubernetes. + +Dois passos são necessários para a utilização de recursos estendidos. +Primeiramente, o operador do cluster deve anunciar um recurso estendido. Em +segundo lugar, os usuários devem solicitar o recurso estendido em Pods. + +### Gerenciando recursos estendidos + +#### Recursos estendidos a nível de nó + +Recursos estendidos a nível de nó são recursos ligados ao nó. + +##### Recursos gerenciados por dispositivos conectados + +Veja [Device Plugin](/docs/concepts/extend-kubernetes/compute-storage-net/device-plugins/) +para mais informações sobre como anunciar recursos gerenciados por dispositivos +conectados em cada nó. + +##### Outros recursos + +A fim de anunciar um novo recurso estendido a nível de nó, o operador do cluster +pode enviar uma requisição HTTP com o método `PATCH` para o servidor da API do +Kubernetes para especificar a quantidade disponível em um nó no cluster, através +do campo `status.capacity`. Após a realização desta operação, o campo +`status.capacity` do nó irá conter um novo recurso. O campo `status.allocatable` +é atualizado automaticamente pelo kubelet, de forma assíncrona, com o novo +recurso. + +Como o escalonador utiliza o valor do campo `status.allocatable` do nó ao +verificar a saúde do Pod, o escalonador somente considerará o novo valor do +campo após esta atualização assíncrona. Pode haver um pequeno atraso entre a +atualização da capacidade do nó com um novo recurso e o momento em que o +primeiro Pod que requer o recurso poderá ser agendado naquele nó. + +**Exemplo**: + +Este exemplo demonstra como utilizar a ferramenta `curl` para criar uma +requisição HTTP que anuncia cinco recursos "example.com/foo" no nó `k8s-node-1`, +cujo nó da camada de gerenciamento é `k8s-master`. + +```shell +curl --header "Content-Type: application/json-patch+json" \ + --request PATCH \ + --data '[{"op": "add", "path": "/status/capacity/example.com~1foo", "value": "5"}]' \ + http://k8s-master:8080/api/v1/nodes/k8s-node-1/status +``` + +{{< note >}} +Na requisição anterior, a notação `~1` é a codificação do caractere `/` no campo +`path` para a operação de atualização. O valor do campo `path` em JSON-Patch é +interpretado como um JSON-Pointer. Para maiores detalhes, veja +[a seção 3 da IETF RFC 6901](https://tools.ietf.org/html/rfc6901#section-3). +{{< /note >}} + +#### Recursos estendidos a nível de cluster + +Recursos estendidos a nível de cluster não são vinculados aos nós. Estes +recursos são normalmente gerenciados por extensões do escalonador, que manipulam +o consumo e as quotas de recursos. + +Você pode especificar os recursos estendidos que são manipulados por extensões +do escalonador nas [configurações do kube-scheduler](/docs/reference/config-api/kube-scheduler-config.v1beta3/). + +**Exemplo**: + +A configuração abaixo para uma política do escalonador indica que o recurso +estendido a nível de cluster "example.com/foo" é manipulado pelas extensões do +escalonador. + +- O escalonador envia um Pod para a extensão do escalonador somente se o Pod + solicitar "example.com/foo". +- O campo `ignoredByScheduler` especifica que o escalonador não verifica o + recurso "example.com/foo" em seu predicado `PodFitsResources`. + +```json +{ + "kind": "Policy", + "apiVersion": "v1", + "extenders": [ + { + "urlPrefix":"", + "bindVerb": "bind", + "managedResources": [ + { + "name": "example.com/foo", + "ignoredByScheduler": true + } + ] + } + ] +} +``` + +### Consumindo recursos estendidos + +Usuários podem consumir recursos estendidos em especificações de Pods como CPU +e memória. O escalonador controla a contagem de recursos de modo que a +quantidade alocada simultaneamente a Pods não seja maior que a quantidade +disponível. + +O servidor da API limita as quantidades de recursos estendidos a números inteiros. +Exemplos de quantidades _válidas_ são `3`, `3000m` e `3Ki`. Exemplos de +quantidades _inválidas_ são `0.5` e `1500m`. + +{{< note >}} +Recursos estendidos substituem os Recursos Inteiros Opacos. +Usuários podem escolher qualquer prefixo de nome de domínio, com exceção do +domínio `kubernetes.io`, que é reservado. +{{< /note >}} + +Para consumir um recurso estendido em um Pod, inclua o nome do recurso como uma +chave no mapa `spec.containers[].resources.limits` na especificação do contêiner. + +{{< note >}} +Recursos estendidos não podem ser superdimensionados. Portanto, `request` e +`limit` devem ser iguais se ambos estiverem presentes na especificação de um +contêiner. +{{< /note >}} + +Um Pod só é agendado se todos os seus requerimentos de recursos forem +satisfeitos, incluindo CPU, memória e quaisquer recursos estendidos. O Pod +permanece no estado `PENDING` enquanto seus requerimentos de recursos não puderem +ser satisfeitos. + +**Exemplo**: + +O Pod abaixo requisita duas CPUs e um "example.com/foo" (um recurso estendido). + +```yaml +apiVersion: v1 +kind: Pod +metadata: + name: my-pod +spec: + containers: + - name: my-container + image: myimage + resources: + requests: + cpu: 2 + example.com/foo: 1 + limits: + example.com/foo: 1 +``` + +## Limitação de PID + +Limites de ID de processo (PID) permitem à configuração de um kubelet limitar o +número de PIDs que um dado Pod pode consumir. Consulte +[PID Limiting](/docs/concepts/policy/pid-limiting/) para mais informações. + +## Solução de problemas {#troubleshooting} + +### Meus pods estão pendentes com um evento `FailedScheduling` + +Se o escalonador não conseguir encontrar nenhum nó que atenda aos requisitos de +recursos do Pod, este Pod permanecerá não-agendado até que um local destino +possa ser encontrado. Um [Evento](/docs/reference/kubernetes-api/cluster-resources/event-v1/) +é produzido cada vez que o escalonador falhar em encontrar um local para agendar +o Pod. Você pode utilizar o utilitário `kubectl` para ver os eventos de um Pod. +Por exemplo: + +```shell +kubectl describe pod frontend | grep -A 9999999999 Events +``` +``` +Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Warning FailedScheduling 23s default-scheduler 0/42 nodes available: insufficient cpu +``` + +No exemplo acima, o Pod de nome "frontend" não pôde ser agendado devido à nenhum +nó possuir CPU suficiente para suprir seu requerimento de CPU. Mensagens de erro +semelhantes a essa podem sugerir falha devido a falta de memória +(`PodExceedsFreeMemory`). De maneira geral, se um Pod estiver pendente com uma +mensagem deste tipo, há diversas possibilidades de solução a serem tentadas: + +- Adicione mais nós ao cluster. +- Encerre Pods desnecessários para liberar espaço para Pods pendentes. +- Verifique se o Pod não é maior que todos os nós. Por exemplo, se todos os nós + têm uma capacidade de `cpu: 1`, um Pod que requisita `cpu: 1.1` nunca será + agendado. +- Verifique se os nós não possuem _taints_. Se a maioria dos seus nós possuem + _taints_, e o novo Pod não tolera tal _taint_, o escalonador somente considera + agendar o Pod nos nós que não possuem aquele _taint_. + +Você pode verificar capacidades de nós e quantidades alocadas com o comando +`kubectl describe nodes`. Por exemplo: + +```shell +kubectl describe nodes e2e-test-node-pool-4lw4 +``` +``` +Name: e2e-test-node-pool-4lw4 +[ ... linhas abreviadas para simplificação ...] +Capacity: + cpu: 2 + memory: 7679792Ki + pods: 110 +Allocatable: + cpu: 1800m + memory: 7474992Ki + pods: 110 +[ ... linhas abreviadas para simplificação ...] +Non-terminated Pods: (5 in total) + Namespace Name CPU Requests CPU Limits Memory Requests Memory Limits + --------- ---- ------------ ---------- --------------- ------------- + kube-system fluentd-gcp-v1.38-28bv1 100m (5%) 0 (0%) 200Mi (2%) 200Mi (2%) + kube-system kube-dns-3297075139-61lj3 260m (13%) 0 (0%) 100Mi (1%) 170Mi (2%) + kube-system kube-proxy-e2e-test-... 100m (5%) 0 (0%) 0 (0%) 0 (0%) + kube-system monitoring-influxdb-grafana-v4-z1m12 200m (10%) 200m (10%) 600Mi (8%) 600Mi (8%) + kube-system node-problem-detector-v0.1-fj7m3 20m (1%) 200m (10%) 20Mi (0%) 100Mi (1%) +Allocated resources: + (Total limits may be over 100 percent, i.e., overcommitted.) + CPU Requests CPU Limits Memory Requests Memory Limits + ------------ ---------- --------------- ------------- + 680m (34%) 400m (20%) 920Mi (11%) 1070Mi (13%) +``` + +No exemplo anterior, você pode verificar que se um Pod requisitar mais que 1,120 +CPUs ou mais que 6,23Gi de memória, tal Pod não caberá neste nó. + +Ao verificar a seção "Pods", você pode observar quais Pods estão consumindo +espaço neste nó. + +A quantidade de recursos disponível aos Pods é menor que a capacidade do nó, pois +daemons do sistema utilizam uma parcela dos recursos disponíveis. Dentro da API +do Kubernetes, cada nó tem um campo `.status.allocatable` +(consulte [NodeStatus](/docs/reference/kubernetes-api/cluster-resources/node-v1/#NodeStatus) +para mais detalhes). + +O campo `.status.allocatable` descreve a quantidade de recursos que está +disponível a Pods naquele nó (por exemplo: 15 CPUs virtuais e 7538 MiB de +memória). Para mais informações sobre recursos alocáveis do nó no Kubernetes, +veja [Reserve Compute Resources for System Daemons](/docs/tasks/administer-cluster/reserve-compute-resources/). + +Você pode configurar [quotas de recursos](/docs/concepts/policy/resource-quotas/) +para limitar a quantidade total de recursos que um namespace pode consumir. +O Kubernetes garante quotas para objetos em um namespace específico quando há +uma `ResourceQuota` naquele namespace. Por exemplo, se você atribuir namespaces +específicos a times diferentes, você pode adicionar `ResourceQuota`s nestes +namespaces. Criar quotas de recursos ajuda a evitar que um time utilize tanto de +um recurso que chegue a afetar outros times utilizando o mesmo cluster. + +Você deve também considerar o nível de acesso fornecido aos usuários de qualquer +namespace: acesso **completo** para escrita permite a alguém com este acesso +remover **qualquer** recurso, incluindo uma configuração de `ResourceQuota`. + +### Meu contêiner foi terminado + +Seu contêiner pode ser terminado se faltar recursos para que este rode. Para +verificar se um contêiner está sendo terminado por chegar no limite de algum +recurso, utilize o comando `kubectl describe pod` no Pod em questão: + +```shell +kubectl describe pod simmemleak-hra99 +``` + +A saída será semelhante a: +``` +Name: simmemleak-hra99 +Namespace: default +Image(s): saadali/simmemleak +Node: kubernetes-node-tf0f/10.240.216.66 +Labels: name=simmemleak +Status: Running +Reason: +Message: +IP: 10.244.2.75 +Containers: + simmemleak: + Image: saadali/simmemleak:latest + Limits: + cpu: 100m + memory: 50Mi + State: Running + Started: Tue, 07 Jul 2019 12:54:41 -0700 + Last State: Terminated + Reason: OOMKilled + Exit Code: 137 + Started: Fri, 07 Jul 2019 12:54:30 -0700 + Finished: Fri, 07 Jul 2019 12:54:33 -0700 + Ready: False + Restart Count: 5 +Conditions: + Type Status + Ready False +Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Normal Scheduled 42s default-scheduler Successfully assigned simmemleak-hra99 to kubernetes-node-tf0f + Normal Pulled 41s kubelet Container image "saadali/simmemleak:latest" already present on machine + Normal Created 41s kubelet Created container simmemleak + Normal Started 40s kubelet Started container simmemleak + Normal Killing 32s kubelet Killing container with id ead3fb35-5cf5-44ed-9ae1-488115be66c6: Need to kill Pod +``` + +No exemplo acima, o campo `Restart Count: 5` indica que o contêiner `simmemleak` +deste Pod foi terminado e reiniciado cinco vezes até o momento. A razão +`OOMKilled` demonstra que o contêiner tentou consumir mais memória do que o seu +limite. + +O próximo passo neste cenário seria vasculhar e depurar o código da aplicação, +procurando por vazamentos de memória. Se você determinar que a aplicação está se +comportando conforme o esperado, considere aumentar o limite (e possivelmente +o requerimento) de memória para aquele contêiner. + +## {{% heading "whatsnext" %}} + +* Pratique [a criação de requerimentos de recursos de memória em contêineres e Pods](/docs/tasks/configure-pod-container/assign-memory-resource/). +* Pratique [a criação de requerimentos de CPU em contêineres and Pods](/docs/tasks/configure-pod-container/assign-cpu-resource/). +* Leia como a referência da API define um [contêiner](/docs/reference/kubernetes-api/workload-resources/pod-v1/#Container) + e seus [requerimentos de recursos](/docs/reference/kubernetes-api/workload-resources/pod-v1/#resources). +* Leia sobre [quotas de projeto](https://xfs.org/index.php/XFS_FAQ#Q:_Quota:_Do_quotas_work_on_XFS.3F) no XFS. +* Leia mais sobre a [referência de configuração do kube-scheduler (v1beta3)](/docs/reference/config-api/kube-scheduler-config.v1beta3/). + diff --git a/content/pt-br/docs/reference/glossary/cgroup.md b/content/pt-br/docs/reference/glossary/cgroup.md new file mode 100644 index 0000000000..ab46553c79 --- /dev/null +++ b/content/pt-br/docs/reference/glossary/cgroup.md @@ -0,0 +1,19 @@ +--- +title: cgroup (control group) +id: cgroup +date: 2019-06-25 +full_link: +short_description: > + Um grupo de processos do Linux com isolamento de recursos opcional, contagem e limites. + +aka: +tags: +- fundamental +--- +Um grupo de processos do Linux com isolamento de recursos opcional, contagem e limites. + + + +cgroup é uma funcionalidade do núcleo de sistema do Linux que limita, conta e +isola o uso de recursos (CPU, memória, entrada e saída de disco, rede) para um +conjunto de processos. diff --git a/content/pt-br/docs/reference/glossary/eviction.md b/content/pt-br/docs/reference/glossary/eviction.md new file mode 100644 index 0000000000..a20f281216 --- /dev/null +++ b/content/pt-br/docs/reference/glossary/eviction.md @@ -0,0 +1,18 @@ +--- +title: Evicção +id: eviction +date: 2022-03-05 +full_link: /pt-br/docs/concepts/scheduling-eviction/ +short_description: > + Processo de encerramento de um ou mais Pods em Nós +aka: +tags: +- operation +--- + +Evicção é o processo de encerramento de um ou mais Pods em Nós. + + +Existem dois tipos de evicção: +* [Evicção por pressão no nó](/docs/concepts/scheduling-eviction/node-pressure-eviction/) +* [Evicção iniciada pela API](/docs/concepts/scheduling-eviction/api-eviction/) diff --git a/content/zh/docs/concepts/cluster-administration/addons.md b/content/zh/docs/concepts/cluster-administration/addons.md index c579cacc17..15b9efe804 100644 --- a/content/zh/docs/concepts/cluster-administration/addons.md +++ b/content/zh/docs/concepts/cluster-administration/addons.md @@ -31,7 +31,6 @@ Add-ons 扩展了 Kubernetes 的功能。 * [Canal](https://github.com/tigera/canal/tree/master/k8s-install) unites Flannel and Calico, providing networking and network policy. * [Cilium](https://github.com/cilium/cilium) is a L3 network and network policy plugin that can enforce HTTP/API/L7 policies transparently. Both routing and overlay/encapsulation mode are supported. * [CNI-Genie](https://github.com/Huawei-PaaS/CNI-Genie) enables Kubernetes to seamlessly connect to a choice of CNI plugins, such as Calico, Canal, Flannel, Romana, or Weave. -* [Contiv](http://contiv.github.io) provides configurable networking (native L3 using BGP, overlay using vxlan, classic L2, and Cisco-SDN/ACI) for various use cases and a rich policy framework. Contiv project is fully [open sourced](http://github.com/contiv). The [installer](http://github.com/contiv/install) provides both kubeadm and non-kubeadm based installation options. * [Contrail](http://www.juniper.net/us/en/products-services/sdn/contrail/contrail-networking/), based on [Tungsten Fabric](https://tungsten.io), is an open source, multi-cloud network virtualization and policy management platform. Contrail and Tungsten Fabric are integrated with orchestration systems such as Kubernetes, OpenShift, OpenStack and Mesos, and provide isolation modes for virtual machines, containers/pods and bare metal workloads. * [Flannel](https://github.com/flannel-io/flannel#deploying-flannel-manually) is an overlay network provider that can be used with Kubernetes. * [Knitter](https://github.com/ZTE/Knitter/) is a network solution supporting multiple networking in Kubernetes. @@ -55,9 +54,6 @@ Add-ons 扩展了 Kubernetes 的功能。 同时支持路由(routing)和覆盖/封装(overlay/encapsulation)模式。 * [CNI-Genie](https://github.com/Huawei-PaaS/CNI-Genie) 使 Kubernetes 无缝连接到一种 CNI 插件, 例如:Flannel、Calico、Canal、Romana 或者 Weave。 -* [Contiv](https://contiv.github.io) 为多种用例提供可配置网络(使用 BGP 的原生 L3,使用 vxlan 的覆盖网络, - 经典 L2 和 Cisco-SDN/ACI)和丰富的策略框架。Contiv 项目完全[开源](https://github.com/contiv)。 - [安装工具](https://github.com/contiv/install)同时提供基于和不基于 kubeadm 的安装选项。 * 基于 [Tungsten Fabric](https://tungsten.io) 的 [Contrail](https://www.juniper.net/us/en/products-services/sdn/contrail/contrail-networking/) 是一个开源的多云网络虚拟化和策略管理平台,Contrail 和 Tungsten Fabric 与业务流程系统 diff --git a/content/zh/docs/concepts/cluster-administration/logging.md b/content/zh/docs/concepts/cluster-administration/logging.md index 93707e07ea..25890be94a 100644 --- a/content/zh/docs/concepts/cluster-administration/logging.md +++ b/content/zh/docs/concepts/cluster-administration/logging.md @@ -22,7 +22,7 @@ Application logs can help you understand what is happening inside your applicati 但是,由容器引擎或运行时提供的原生功能通常不足以构成完整的日志记录方案。 @@ -94,12 +94,25 @@ The output is: ``` 你可以使用命令 `kubectl logs --previous` 检索之前容器实例的日志。 如果 Pod 中有多个容器,你应该为该命令附加容器名以访问对应容器的日志。 详见 [`kubectl logs` 文档](/docs/reference/generated/kubectl/kubectl-commands#logs)。 +如果 Pod 有多个容器,你应该为该命令附加容器名以访问对应容器的日志, +使用 `-c` 标志来指定要访问的容器的日志,如下所示: +```console +kubectl logs counter -c count + +``` +详见 [kubectl logs 文档](/zh/docs/reference/generated/kubectl/kubectl-commands#logs)。 容器向标准输出和标准错误输出写出数据,但在格式上并不统一。 节点级代理 diff --git a/content/zh/docs/concepts/configuration/manage-resources-containers.md b/content/zh/docs/concepts/configuration/manage-resources-containers.md index a45a8a3f30..bb15c00543 100644 --- a/content/zh/docs/concepts/configuration/manage-resources-containers.md +++ b/content/zh/docs/concepts/configuration/manage-resources-containers.md @@ -1,43 +1,47 @@ --- -title: 为容器管理资源 +title: 为 Pod 和容器管理资源 content_type: concept weight: 40 feature: title: 自动装箱 description: > - 根据资源需求和其他约束自动放置容器,同时避免影响可用性。将关键性工作负载和尽力而为性质的服务工作负载进行混合放置,以提高资源利用率并节省更多资源。 + 根据资源需求和其他约束自动放置容器,同时避免影响可用性。 + 将关键性的和尽力而为性质的工作负载进行混合放置,以提高资源利用率并节省更多资源。 --- - 当你定义 {{< glossary_tooltip text="Pod" term_id="pod" >}} 时可以选择性地为每个 {{< glossary_tooltip text="容器" term_id="container" >}}设定所需要的资源数量。 最常见的可设定资源是 CPU 和内存(RAM)大小;此外还有其他类型的资源。 -当你为 Pod 中的 Container 指定了资源 __请求__ 时,调度器就利用该信息决定将 Pod 调度到哪个节点上。 +当你为 Pod 中的 Container 指定了资源 __请求__ 时, +{{< glossary_tooltip text="kube-scheduler" term_id="kube-scheduler" >}} +就利用该信息决定将 Pod 调度到哪个节点上。 当你还为 Container 指定了资源 __约束__ 时,kubelet 就可以确保运行的容器不会使用超出所设约束的资源。 kubelet 还会为容器预留所 __请求__ 数量的系统资源,供其使用。 @@ -65,7 +69,7 @@ more RAM. 运行,那么该容器就可以尝试使用更多的内存。 -如果某 Container 设置了自己的内存限制但未设置内存请求,Kubernetes +如果某容器设置了自己的内存限制但未设置内存请求,Kubernetes 自动为其设置与内存限制相匹配的请求值。类似的,如果某 Container 设置了 -CPU 限制值但未设置 CPU 请求值,则 Kubernetes 自动为其设置 CPU 请求 -并使之与 CPU 限制值匹配。 +CPU 限制值但未设置 CPU 请求值,则 Kubernetes 自动为其设置 CPU +请求并使之与 CPU 限制值匹配。 {{< /note >}} ## 资源类型 {#resource-types} -*CPU* 和*内存*都是*资源类型*。每种资源类型具有其基本单位。 +*CPU* 和 *内存* 都是 *资源类型*。每种资源类型具有其基本单位。 CPU 表达的是计算处理能力,其单位是 [Kubernetes CPUs](#meaning-of-cpu)。 内存的单位是字节。 -如果你使用的是 Kubernetes v1.14 或更高版本,则可以指定巨页(Huge Page)资源。 +对于 Linux 负载,则可以指定巨页(Huge Page)资源。 巨页是 Linux 特有的功能,节点内核在其中分配的内存块比默认页大小大得多。 例如,在默认页面大小为 4KiB 的系统上,你可以指定约束 `hugepages-2Mi: 80Mi`。 @@ -141,16 +145,21 @@ consumed. They are distinct from [Services](/docs/concepts/services-networking/service/) are objects that can be read and modified through the Kubernetes API server. --> -CPU 和内存统称为*计算资源*,或简称为*资源*。 +CPU 和内存统称为“计算资源”,或简称为“资源”。 计算资源的数量是可测量的,可以被请求、被分配、被消耗。 它们与 [API 资源](/zh/docs/concepts/overview/kubernetes-api/) 不同。 API 资源(如 Pod 和 [Service](/zh/docs/concepts/services-networking/service/))是可通过 Kubernetes API 服务器读取和修改的对象。 +## Pod 和 容器的资源请求和约束 + +针对每个容器,你都可以指定其资源约束和请求,包括如下选项: * `spec.containers[].resources.limits.cpu` * `spec.containers[].resources.limits.memory` @@ -159,93 +168,114 @@ Each Container of a Pod can specify one or more of the following: * `spec.containers[].resources.requests.memory` * `spec.containers[].resources.requests.hugepages-` -Although requests and limits can only be specified on individual Containers, it -is convenient to talk about Pod resource requests and limits. A -*Pod resource request/limit* for a particular resource type is the sum of the -resource requests/limits of that type for each Container in the Pod. + - -## Pod 和 容器的资源请求和约束 - -Pod 中的每个容器都可以指定以下的一个或者多个值: - -- `spec.containers[].resources.limits.cpu` -- `spec.containers[].resources.limits.memory` -- `spec.containers[].resources.limits.hugepages-` -- `spec.containers[].resources.requests.cpu` -- `spec.containers[].resources.requests.memory` -- `spec.containers[].resources.requests.hugepages-` - -尽管请求和限制值只能在单个容器上指定,我们仍可方便地计算出 Pod 的资源请求和约束。 -Pod 对特定资源类型的请求/约束值是 Pod 中各容器对该类型资源的请求/约束值的总和。 +尽管你只能逐个容器地指定请求和限制值,考虑 Pod 的总体资源请求和约束也是有用的。 +对特定资源而言,Pod 的资源请求/约束值是 Pod 中各容器对该类型资源的请求/约束值的总和。 ## Kubernetes 中的资源单位 {#resource-units-in-kubernetes} -### CPU 的含义 {#meaning-of-cpu} +### CPU 资源单位 {#meaning-of-cpu} -CPU 资源的约束和请求以 *CPU* 为单位。 - -Kubernetes 中的一个 CPU 等于云平台上的 **1 个 vCPU/核**和裸机 Intel -处理器上的 **1 个超线程**。 - -你也可以表达带小数 CPU 的请求。`spec.containers[].resources.requests.cpu` 为 0.5 -的 Container 肯定能够获得请求 1 CPU 的容器的一半 CPU 资源。表达式 `0.1` 等价于表达式 `100m`, -可以看作 “100 millicpu”。有些人说成是“一百毫 cpu”,其实说的是同样的事情。 -具有小数点(如 `0.1`)的请求由 API 转换为 `100m`;最大精度是 `1m`。 -因此,或许你应该优先考虑使用 `100m` 的形式。 - -CPU 总是按绝对数量来请求的,不可以使用相对数量; -0.1 的 CPU 在单核、双核、48 核的机器上的意义是一样的。 +CPU 资源的约束和请求以 “cpu” 为单位。 +在 Kubernetes 中,一个 CPU 等于**1 个物理 CPU 核** 或者 **一个虚拟核**, +取决于节点是一台物理主机还是运行在某物理主机上的虚拟机。 +你也可以表达带小数 CPU 的请求。 +当你定义一个容器,将其 `spec.containers[].resources.requests.cpu` 设置为 0.5 时, +你所请求的 CPU 是你请求 `1.0` CPU 时的一半。 +对于 CPU 资源单位,[数量](/docs/reference/kubernetes-api/common-definitions/quantity/) +表达式 `0.1` 等价于表达式 `100m`,可以看作 “100 millicpu”。 +有些人说成是“一百毫核”,其实说的是同样的事情。 + + +CPU 资源总是设置为资源的绝对数量而非相对数量值。 +例如,无论容器运行在单核、双核或者 48-核的机器上,`500m` CPU 表示的是大约相同的计算能力。 + +{{< note >}} + +Kubernetes 不允许设置精度小于 `1m` 的 CPU 资源。 +因此,当 CPU 单位小于 `1` 或 `1000m` 时,使用毫核的形式是有用的; +例如 `5m` 而不是 `0.005`。 +{{< /note >}} + + -## 内存的含义 {#meaning-of-memory} +## 内存资源单位 {#meaning-of-memory} -内存的约束和请求以字节为单位。你可以使用以下后缀之一以一般整数或定点数字形式来表示内存: -E、P、T、G、M、k。你也可以使用对应的 2 的幂数:Ei、Pi、Ti、Gi、Mi、Ki。 +`memory` 的约束和请求以字节为单位。 +你可以使用普通的证书,或者带有以下 +[数量](/docs/reference/kubernetes-api/common-definitions/quantity/)后缀 +的定点数字来表示内存:E、P、T、G、M、k。 +你也可以使用对应的 2 的幂数:Ei、Pi、Ti、Gi、Mi、Ki。 例如,以下表达式所代表的是大致相同的值: ``` -128974848、129e6、129M、123Mi +128974848、129e6、129M、128974848000m、123Mi ``` -下面是个例子。 +请注意后缀的大小写。如果你请求 `400m` 内存,实际上请求的是 0.4 字节。 +如果有人这样设定资源请求或限制,可能他的实际想法是申请 400 兆字节(`400Mi`) +或者 400M 字节。 -以下 Pod 有两个 Container。每个 Container 的请求为 0.25 cpu 和 64MiB(226 字节)内存, -每个容器的资源约束为 0.5 cpu 和 128MiB 内存。 -你可以认为该 Pod 的资源请求为 0.5 cpu 和 128 MiB 内存,资源限制为 1 cpu 和 256MiB 内存。 + +## 容器资源示例 {#example-1} + +以下 Pod 有两个容器。每个容器的请求为 0.25 CPU 和 64MiB(226 字节)内存, +每个容器的资源约束为 0.5 CPU 和 128MiB 内存。 +你可以认为该 Pod 的资源请求为 0.5 CPU 和 128 MiB 内存,资源限制为 1 CPU 和 256MiB 内存。 ```yaml apiVersion: v1 @@ -256,9 +286,6 @@ spec: containers: - name: app image: images.my-company.example/app:v4 - env: - - name: MYSQL_ROOT_PASSWORD - value: "password" resources: requests: memory: "64Mi" @@ -284,7 +311,8 @@ When you create a Pod, the Kubernetes scheduler selects a node for the Pod to run on. Each node has a maximum capacity for each of the resource types: the amount of CPU and memory it can provide for Pods. The scheduler ensures that, for each resource type, the sum of the resource requests of the scheduled -Containers is less than the capacity of the node. Note that although actual memory +containers is less than the capacity of the node. +Note that although actual memory or CPU resource usage on nodes is very low, the scheduler still refuses to place a Pod on a node if the capacity check fails. This protects against a resource shortage on a node when resource usage later increases, for example, during a @@ -300,84 +328,88 @@ daily peak in request rate. 当稍后节点上资源用量增加,例如到达请求率的每日峰值区间时,节点上也不会出现资源不足的问题。 -## 带资源约束的 Pod 如何运行 +## Kubernetes 应用资源请求与约束的方式 {#how-pods-with-resource-limits-are-run} -当 kubelet 启动 Pod 中的 Container 时,它会将 CPU 和内存约束信息传递给容器运行时。 +当 kubelet 启动 Pod 中的容器时,它会将容器的 CPU 和内存请求与约束信息传递给容器运行时。 -当使用 Docker 时: +在 Linux 系统上,容器运行时通常会配置内核 +{{< glossary_tooltip text="CGroups" term_id="cgroup" >}},负责应用并实施所定义的请求。 - -- `spec.containers[].resources.requests.cpu` 先被转换为可能是小数的基础值,再乘以 1024。 - 这个数值和 2 的较大者用作 `docker run` 命令中的 - [`--cpu-shares`](https://docs.docker.com/engine/reference/run/#/cpu-share-constraint) - 标志的值。 -- `spec.containers[].resources.limits.cpu` 先被转换为 millicore 值,再乘以 100。 - 其结果就是每 100 毫秒内容器可以使用的 CPU 时间总量,单位为微秒。在此期间(100ms), - 容器所使用的 CPU 时间不可以超过它被分配的时间。 - - {{< note >}} - 默认的配额(Quota)周期为 100 毫秒。CPU 配额的最小精度为 1 毫秒。 - {{}} - -- `spec.containers[].resources.limits.memory` 被转换为整数值,作为 `docker run` 命令中的 - [`--memory`](https://docs.docker.com/engine/reference/run/#/user-memory-constraints) - 参数值。 +- CPU 约束值定义的是容器可使用的 CPU 时间的硬性上限。 + 在每个调度周期(时间片)期间,Linux 内核检查是否已经超出该约束值; + 内核会在允许该 cgroup 恢复执行之前会等待。 + +- CPU 请求值定义的是一个权重值。如果若干不同的容器(CGroups)需要在一个共享的系统上竞争运行, + CPU 请求值大的负载会获得比请求值小的负载更多的 CPU 时间。 + +- 内存请求值主要用于(Kubernetes)Pod 调度期间。在一个启用了 CGroup v2 的节点上, + 容器运行时可能会使用内存请求值作为设置 `memory.min` 和 `memory.low` 的提示值。 + +- 内存约束值定义的是 CGroup 的内存约束。如果容器尝试分配的内存量超出约束值, + 则 Linux 内核的内存不足处理子系统会被激活,并停止尝试分配内存的容器中的某个进程。 + 如果该进程在容器中 PID 为 1,而容器被标记为可重新启动,则 Kubernetes + 会重新启动该容器。 + +- Pod 或容器的内存约束值也适用于通过内存供应的卷,例如 `emptyDir` 卷。 + kubelet 会跟踪 `tmpfs` 形式的 emptyDir 卷用量,将其作为容器的内存用量, + 而不是临时存储用量。 -如果 Container 超过其内存限制,则可能会被终止。如果容器可重新启动,则与所有其他类型的 -运行时失效一样,kubelet 将重新启动容器。 +如果某容器内存用量超过其内存请求值并且所在节点内存不足时,容器所处的 Pod +可能被{{< glossary_tooltip text="逐出" term_id="eviction" >}}. -如果一个 Container 内存用量超过其内存请求值,那么当节点内存不足时,容器所处的 Pod 可能被逐出。 +每个容器可能被允许也可能不被允许使用超过其 CPU 约束的处理时间。 +但是,容器运行时不会由于 CPU 使用率过高而杀死 Pod 或容器。 -每个 Container 可能被允许也可能不被允许使用超过其 CPU 约束的处理时间。 -但是,容器不会由于 CPU 使用率过高而被杀死。 - -要确定 Container 是否会由于资源约束而无法调度或被杀死,请参阅[疑难解答](#troubleshooting) 部分。 +要确定某容器是否会由于资源约束而无法调度或被杀死,请参阅[疑难解答](#troubleshooting)节。 ## 监控计算和内存资源用量 -Pod 的资源使用情况是作为 Pod 状态的一部分来报告的。 +kubelet 会将 Pod 的资源使用情况作为 Pod +[`status`](/zh/docs/concepts/overview/working-with-objects/kubernetes-objects/#object-spec-and-status) +的一部分来报告的。 -如果为集群配置了可选的 -[监控工具](/zh/docs/tasks/debug-application-cluster/resource-usage-monitoring/), -则可以直接从 -[指标 API](/zh/docs/tasks/debug-application-cluster/resource-metrics-pipeline/#the-metrics-api) +如果为集群配置了可选的[监控工具](/zh/docs/tasks/debug-application-cluster/resource-usage-monitoring/), +则可以直接从[指标 API](/zh/docs/tasks/debug-application-cluster/resource-metrics-pipeline/#the-metrics-api) 或者监控工具获得 Pod 的资源使用情况。 + {{< feature-state for_k8s_version="v1.10" state="beta" >}} 节点通常还可以具有本地的临时性存储,由本地挂接的可写入设备或者有时也用 RAM @@ -425,7 +458,7 @@ The kubelet also uses this kind of storage to hold [node-level container logs](/docs/concepts/cluster-administration/logging/#logging-at-the-node-level), container images, and the writable layers of running containers. -If a node fails, the data in its ephemeral storage can be lost. +If a node fails, the data in its ephemeral storage can be lost. Your applications cannot expect any performance SLAs (disk IOPS for example) from local ephemeral storage. @@ -569,43 +602,44 @@ kubelet 会将 `tmpfs` emptyDir 卷的用量当作容器内存用量,而不是 ### 为本地临时性存储设置请求和约束值 -你可以使用 _ephemeral-storage_ 来管理本地临时性存储。 -Pod 中的每个 Container 可以设置以下属性: +你可以使用 `ephemeral-storage` 来管理本地临时性存储。 +Pod 中的每个容器可以设置以下属性: * `spec.containers[].resources.limits.ephemeral-storage` * `spec.containers[].resources.requests.ephemeral-storage` -`ephemeral-storage` 的请求和约束值是按字节计量的。你可以使用一般整数或者定点数字 +`ephemeral-storage` 的请求和约束值是按量纲计量的。你可以使用一般整数或者定点数字 加上下面的后缀来表达存储量:E、P、T、G、M、K。 你也可以使用对应的 2 的幂级数来表达:Ei、Pi、Ti、Gi、Mi、Ki。 例如,下面的表达式所表达的大致是同一个值: -``` -128974848, 129e6, 129M, 123Mi -``` +- `128974848` +- `129e6` +- `129M` +- `123Mi` -在下面的例子中,Pod 包含两个 Container。每个 Container 请求 2 GiB 大小的本地临时性存储。 -每个 Container 都设置了 4 GiB 作为其本地临时性存储的约束值。 +在下面的例子中,Pod 包含两个容器。每个容器请求 2 GiB 大小的本地临时性存储。 +每个容器都设置了 4 GiB 作为其本地临时性存储的约束值。 因此,整个 Pod 的本地临时性存储请求是 4 GiB,且其本地临时性存储的约束为 8 GiB。 ```yaml @@ -644,9 +678,11 @@ spec: ### How Pods with ephemeral-storage requests are scheduled When you create a Pod, the Kubernetes scheduler selects a node for the Pod to -run on. Each node has a maximum amount of local ephemeral storage it can provide for Pods. For more information, see [Node Allocatable](/docs/tasks/administer-cluster/reserve-compute-resources/#node-allocatable). +run on. Each node has a maximum amount of local ephemeral storage it can provide for Pods. +For more information, see +[Node Allocatable](/docs/tasks/administer-cluster/reserve-compute-resources/#node-allocatable). -The scheduler ensures that the sum of the resource requests of the scheduled Containers is less than the capacity of the node. +The scheduler ensures that the sum of the resource requests of the scheduled containers is less than the capacity of the node. --> ### 带临时性存储的 Pods 的调度行为 @@ -657,7 +693,7 @@ The scheduler ensures that the sum of the resource requests of the scheduled Con [节点可分配资源](/zh/docs/tasks/administer-cluster/reserve-compute-resources/#node-allocatable) 节。 -调度器会确保所调度的 Containers 的资源请求总和不会超出节点的资源容量。 +调度器会确保所调度的容器的资源请求总和不会超出节点的资源容量。 ##### 其他资源 {#other-resources} @@ -918,7 +953,16 @@ that requests the resource to be scheduled on that node. 以在集群中节点的 `status.capacity` 中为其配置可用数量。 完成此操作后,节点的 `status.capacity` 字段中将包含新资源。 kubelet 会异步地对 `status.allocatable` 字段执行自动更新操作,使之包含新资源。 -请注意,由于调度器在评估 Pod 是否适合在某节点上执行时会使用节点的 `status.allocatable` 值, + + +由于调度器在评估 Pod 是否适合在某节点上执行时会使用节点的 `status.allocatable` 值, +调度器只会考虑异步更新之后的新值。 在更新节点容量使之包含新资源之后和请求该资源的第一个 Pod 被调度到该节点之间, 可能会有短暂的延迟。 @@ -929,7 +973,6 @@ Here is an example showing how to use `curl` to form an HTTP request that advertises five "example.com/foo" resources on node `k8s-node-1` whose master is `k8s-master`. --> - **示例:** 这是一个示例,显示了如何使用 `curl` 构造 HTTP 请求,公告主节点为 `k8s-master` @@ -963,14 +1006,14 @@ Cluster-level extended resources are not tied to nodes. They are usually managed by scheduler extenders, which handle the resource consumption and resource quota. You can specify the extended resources that are handled by scheduler extenders -in [scheduler policy configuration](/docs/reference/config-api/kube-scheduler-policy-config.v1/) +in [scheduler policy configuration](/docs/reference/config-api/kube-scheduler-config.v1beta3/) --> #### 集群层面的扩展资源 {#cluster-level-extended-resources} 集群层面的扩展资源并不绑定到具体节点。 它们通常由调度器扩展程序(Scheduler Extenders)管理,这些程序处理资源消耗和资源配额。 -你可以在[调度器策略配置](/zh/docs/reference/config-api/kube-scheduler-policy-config.v1/) +你可以在[调度器策略配置](/zh/docs/reference/config-api/kube-scheduler-config.v1beta3/) 中指定由调度器扩展程序处理的扩展资源。 **示例:** @@ -1091,9 +1134,10 @@ spec: - ## PID 限制 {#pid-limiting} 进程 ID(PID)限制允许对 kubelet 进行配置,以限制给定 Pod 可以消耗的 PID 数量。 @@ -1102,43 +1146,52 @@ Process ID (PID) limits allow for the configuration of a kubelet to limit the nu ## 疑难解答 -### 我的 Pod 处于悬决状态且事件信息显示 failedScheduling +### 我的 Pod 处于悬决状态且事件信息显示 `FailedScheduling` 如果调度器找不到该 Pod 可以匹配的任何节点,则该 Pod 将保持未被调度状态, 直到找到一个可以被调度到的位置。每当调度器找不到 Pod 可以调度的地方时, -会产生一个事件,如下所示: +会产生一个 [Event](/docs/reference/kubernetes-api/cluster-resources/event-v1/)。 +你可以使用 `kubectl` 来查看 Pod 的事件;例如: ```shell -kubectl describe pod frontend | grep -A 3 Events +kubectl describe pod frontend | grep -A 9999999999 Events ``` + ``` Events: - FirstSeen LastSeen Count From Subobject PathReason Message - 36s 5s 6 {scheduler} FailedScheduling Failed for reason PodExceedsFreeCPU and possibly others + Type Reason Age From Message + ---- ------ ---- ---- ------- + Warning FailedScheduling 23s default-scheduler 0/42 nodes available: insufficient cpu ``` - 在上述示例中,由于节点上的 CPU 资源不足,名为 “frontend” 的 Pod 无法被调度。 由于内存不足(PodExceedsFreeMemory)而导致失败时,也有类似的错误消息。 一般来说,如果 Pod 处于悬决状态且有这种类型的消息时,你可以尝试如下几件事情: @@ -1147,12 +1200,15 @@ You can check node capacities and amounts allocated with the - 终止不需要的 Pod,为悬决的 Pod 腾出空间。 - 检查 Pod 所需的资源是否超出所有节点的资源容量。例如,如果所有节点的容量都是`cpu:1`, 那么一个请求为 `cpu: 1.1` 的 Pod 永远不会被调度。 +- 检查节点上的污点设置。如果集群中节点上存在污点,而新的 Pod 不能容忍污点, + 调度器只会考虑将 Pod 调度到不带有该污点的节点上。 你可以使用 `kubectl describe nodes` 命令检查节点容量和已分配的资源数量。 例如: ```shell kubectl describe nodes e2e-test-node-pool-4lw4 ``` + ``` Name: e2e-test-node-pool-4lw4 [ ... 这里忽略了若干行以便阅读 ...] @@ -1184,34 +1240,60 @@ Allocated resources: In the preceding output, you can see that if a Pod requests more than 1120m CPUs or 6.23Gi of memory, it will not fit on the node. -By looking at the `Pods` section, you can see which Pods are taking up space on +By looking at the "Pods" section, you can see which Pods are taking up space on the node. - -The amount of resources available to Pods is less than the node capacity, because -system daemons use a portion of the available resources. The `allocatable` field -[NodeStatus](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#nodestatus-v1-core) -gives the amount of resources that are available to Pods. For more information, see -[Node Allocatable Resources](https://git.k8s.io/community/contributors/design-proposals/node/node-allocatable.md). -The [resource quota](/docs/concepts/policy/resource-quotas/) feature can be configured -to limit the total amount of resources that can be consumed. If used in conjunction -with namespaces, it can prevent one team from hogging all the resources. --> 在上面的输出中,你可以看到如果 Pod 请求超过 1120m CPU 或者 6.23Gi 内存,节点将无法满足。 -通过查看 `Pods` 部分,你将看到哪些 Pod 占用了节点上的资源。 +通过查看 "Pods" 部分,你将看到哪些 Pod 占用了节点上的资源。 -可供 Pod 使用的资源量小于节点容量,因为系统守护程序也会使用一部分可用资源。 -[NodeStatus](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#nodestatus-v1-core) -的 `allocatable` 字段给出了可用于 Pod 的资源量。 -有关更多信息,请参阅 [节点可分配资源](https://git.k8s.io/community/contributors/design-proposals/node-allocatable.md)。 + +Pods 可用的资源量低于节点的资源总量,因为系统守护进程也会使用一部分可用资源。 +在 Kubernetes API 中,每个 Node 都有一个 `.status.allocatable` 字段 +(详情参见 [NodeStatus](/docs/reference/kubernetes-api/cluster-resources/node-v1/#NodeStatus))。 -可以配置 [资源配额](/zh/docs/concepts/policy/resource-quotas/) 功能特性 -以限制可以使用的资源总量。 -如果与名字空间配合一起使用,就可以防止一个团队占用所有资源。 + +字段 `.status.allocatable` 描述节点上可以用于 Pod 的资源总量(例如:15 个虚拟 +CPU、7538 MiB 内存)。关于 Kubernetes 中节点可分配资源的信息,可参阅 +[为系统守护进程预留计算资源](/zh/docs/tasks/administer-cluster/reserve-compute-resources/)。 + + +你可以配置[资源配额](/zh/docs/concepts/policy/resource-quotas/)功能特性以限制每个名字空间可以使用的资源总量。 +当某名字空间中存在 ResourceQuota 时,Kubernetes 会在该名字空间中的对象强制实施配额。 +例如,如果你为不同的团队分配名字空间,你可以为这些名字空间添加 ResourceQuota。 +设置资源配额有助于防止一个团队占用太多资源,以至于这种占用会影响其他团队。 + +你还需要考虑为这些名字空间设置授权访问: +为名字空间提供 **全部** 的写权限时,具有合适权限的人可能删除所有资源, +包括所配置的 ResourceQuota。 @@ -1225,6 +1307,11 @@ whether a Container is being killed because it is hitting a resource limit, call kubectl describe pod simmemleak-hra99 ``` + +输出类似于: + ``` Name: simmemleak-hra99 Namespace: default @@ -1235,7 +1322,6 @@ Status: Running Reason: Message: IP: 10.244.2.75 -Replication Controllers: simmemleak (1/1 replicas created) Containers: simmemleak: Image: saadali/simmemleak @@ -1254,57 +1340,47 @@ Conditions: Type Status Ready False Events: - FirstSeen LastSeen Count From SubobjectPath Reason Message - Tue, 07 Jul 2015 12:53:51 -0700 Tue, 07 Jul 2015 12:53:51 -0700 1 {scheduler } scheduled Successfully assigned simmemleak-hra99 to kubernetes-node-tf0f - Tue, 07 Jul 2015 12:53:51 -0700 Tue, 07 Jul 2015 12:53:51 -0700 1 {kubelet kubernetes-node-tf0f} implicitly required container POD pulled Pod container image "k8s.gcr.io/pause:0.8.0" already present on machine - Tue, 07 Jul 2015 12:53:51 -0700 Tue, 07 Jul 2015 12:53:51 -0700 1 {kubelet kubernetes-node-tf0f} implicitly required container POD created Created with docker id 6a41280f516d - Tue, 07 Jul 2015 12:53:51 -0700 Tue, 07 Jul 2015 12:53:51 -0700 1 {kubelet kubernetes-node-tf0f} implicitly required container POD started Started with docker id 6a41280f516d - Tue, 07 Jul 2015 12:53:51 -0700 Tue, 07 Jul 2015 12:53:51 -0700 1 {kubelet kubernetes-node-tf0f} spec.containers{simmemleak} created Created with docker id 87348f12526a + Type Reason Age From Message + ---- ------ ---- ---- ------- + Normal Scheduled 42s default-scheduler Successfully assigned simmemleak-hra99 to kubernetes-node-tf0f + Normal Pulled 41s kubelet Container image "saadali/simmemleak:latest" already present on machine + Normal Created 41s kubelet Created container simmemleak + Normal Started 40s kubelet Started container simmemleak + Normal Killing 32s kubelet Killing container with id ead3fb35-5cf5-44ed-9ae1-488115be66c6: Need to kill Pod ``` -在上面的例子中,`Restart Count: 5` 意味着 Pod 中的 `simmemleak` 容器被终止并重启了五次。 - -你可以使用 `kubectl get pod` 命令加上 `-o go-template=...` 选项来获取之前终止容器的状态。 - -```shell -kubectl get pod -o go-template='{{range.status.containerStatuses}}{{"Container Name: "}}{{.name}}{{"\r\nLastState: "}}{{.lastState}}{{end}}' simmemleak-hra99 -``` -``` -Container Name: simmemleak -LastState: map[terminated:map[exitCode:137 reason:OOM Killed startedAt:2015-07-07T20:58:43Z finishedAt:2015-07-07T20:58:43Z containerID:docker://0e4095bba1feccdfe7ef9fb6ebffe972b4b14285d5acdec6f0d3ae8a22fad8b2]] -``` +在上面的例子中,`Restart Count: 5` 意味着 Pod 中的 `simmemleak` +容器被终止并且(到目前为止)重启了五次。 +原因 `OOMKilled` 显示容器尝试使用超出其限制的内存量。 - -你可以看到容器因为 `reason:OOM killed` 而被终止,`OOM` 表示内存不足(Out Of Memory)。 +你接下来要做的或许是检查应用代码,看看是否存在内存泄露。 +如果你发现应用的行为与你所预期的相同,则可以考虑为该容器设置一个更高的内存约束 +(也可能需要设置请求值)。 ## {{% heading "whatsnext" %}} - * 获取[分配内存资源给容器和 Pod ](/zh/docs/tasks/configure-pod-container/assign-memory-resource/) 的实践经验 * 获取[分配 CPU 资源给容器和 Pod ](/zh/docs/tasks/configure-pod-container/assign-cpu-resource/) 的实践经验 -* 关于请求和约束之间的区别,细节信息可参见[资源服务质量](https://git.k8s.io/community/contributors/design-proposals/node/resource-qos.md) -* 阅读 API 参考文档中 [Container](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#container-v1-core) 部分。 -* 阅读 API 参考文档中 [ResourceRequirements](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#resourcerequirements-v1-core) 部分。 -* 阅读 XFS 中关于[项目配额](https://xfs.org/docs/xfsdocs-xml-dev/XFS_User_Guide/tmp/en-US/html/xfs-quotas.html) 的文档。 -* 阅读更多关于[kube-scheduler 策略参考 (v1)](/zh/docs/reference/config-api/kube-scheduler-policy-config.v1/) 的文档。 +* 阅读 API 参考中 [Container](/docs/reference/kubernetes-api/workload-resources/pod-v1/#Container) + 和其[资源请求](/docs/reference/kubernetes-api/workload-resources/pod-v1/#resources)定义。 +* 阅读 XFS 中[配额](https://xfs.org/docs/xfsdocs-xml-dev/XFS_User_Guide/tmp/en-US/html/xfs-quotas.html)的文档 +* 进一步阅读 [kube-scheduler 配置参考 (v1beta3)](/zh/docs/reference/config-api/kube-scheduler-config.v1beta3/) diff --git a/content/zh/docs/concepts/extend-kubernetes/operator.md b/content/zh/docs/concepts/extend-kubernetes/operator.md index 4812a272bd..c9734f4798 100644 --- a/content/zh/docs/concepts/extend-kubernetes/operator.md +++ b/content/zh/docs/concepts/extend-kubernetes/operator.md @@ -212,6 +212,7 @@ Operator. {{% thirdparty-content %}} * [Charmed Operator Framework](https://juju.is/) +* [Kopf](https://github.com/nolar/kopf) (Kubernetes Operator Pythonic Framework) * [kubebuilder](https://book.kubebuilder.io/) * [KubeOps](https://buehler.github.io/dotnet-operator-sdk/) (dotnet operator SDK) * [KUDO](https://kudo.dev/) (Kubernetes Universal Declarative Operator) @@ -225,6 +226,7 @@ you implement yourself {{% thirdparty-content %}} * [Charmed Operator Framework](https://juju.is/) +* [Kopf](https://github.com/nolar/kopf) (Kubernetes Operator Pythonic Framework) * [kubebuilder](https://book.kubebuilder.io/) * [KubeOps](https://buehler.github.io/dotnet-operator-sdk/) (dotnet operator SDK) * [KUDO](https://kudo.dev/) (Kubernetes 通用声明式 Operator) diff --git a/content/zh/docs/concepts/extend-kubernetes/service-catalog.md b/content/zh/docs/concepts/extend-kubernetes/service-catalog.md index 12bb99f2ed..fe5437c1ba 100644 --- a/content/zh/docs/concepts/extend-kubernetes/service-catalog.md +++ b/content/zh/docs/concepts/extend-kubernetes/service-catalog.md @@ -434,7 +434,6 @@ The following example describes how to map secret values into application enviro * If you are familiar with {{< glossary_tooltip text="Helm Charts" term_id="helm-chart" >}}, [install Service Catalog using Helm](/docs/tasks/service-catalog/install-service-catalog-using-helm/) into your Kubernetes cluster. Alternatively, you can [install Service Catalog using the SC tool](/docs/tasks/service-catalog/install-service-catalog-using-sc/). * View [sample service brokers](https://github.com/openservicebrokerapi/servicebroker/blob/master/gettingStarted.md#sample-service-brokers). * Explore the [kubernetes-sigs/service-catalog](https://github.com/kubernetes-sigs/service-catalog) project. -* View [svc-cat.io](https://svc-cat.io/docs/). --> * 如果你熟悉 {{< glossary_tooltip text="Helm Charts" term_id="helm-chart" >}}, 可以[使用 Helm 安装服务目录](/zh/docs/tasks/service-catalog/install-service-catalog-using-helm/) @@ -442,6 +441,5 @@ The following example describes how to map secret values into application enviro [使用 SC 工具安装服务目录](/zh/docs/tasks/service-catalog/install-service-catalog-using-sc/)。 * 查看[服务代理示例](https://github.com/openservicebrokerapi/servicebroker/blob/master/gettingStarted.md#sample-service-brokers) * 浏览 [kubernetes-sigs/service-catalog](https://github.com/kubernetes-sigs/service-catalog) 项目 -* 查看 [svc-cat.io](https://svc-cat.io/docs/) diff --git a/content/zh/docs/concepts/overview/components.md b/content/zh/docs/concepts/overview/components.md index a79418ed1d..fd359fc1ce 100644 --- a/content/zh/docs/concepts/overview/components.md +++ b/content/zh/docs/concepts/overview/components.md @@ -57,13 +57,13 @@ Control plane components can be run on any machine in the cluster. However, for simplicity, set up scripts typically start all control plane components on the same machine, and do not run user containers on this machine. See [Creating Highly Available clusters with kubeadm](/docs/setup/production-environment/tools/kubeadm/high-availability/) -for an example control plane setup that runs across multiple VMs. +for an example control plane setup that runs across multiple machines. --> 控制平面组件可以在集群中的任何节点上运行。 然而,为了简单起见,设置脚本通常会在同一个计算机上启动所有控制平面组件, 并且不会在此计算机上运行用户容器。 请参阅[使用 kubeadm 构建高可用性集群](/zh/docs/setup/production-environment/tools/kubeadm/high-availability/) -中关于多 VM 控制平面设置的示例。 +中关于跨多机器控制平面设置的示例。 ### kube-apiserver diff --git a/content/zh/docs/concepts/policy/pod-security-policy.md b/content/zh/docs/concepts/policy/pod-security-policy.md index 7be672cb0b..f1b099352d 100644 --- a/content/zh/docs/concepts/policy/pod-security-policy.md +++ b/content/zh/docs/concepts/policy/pod-security-policy.md @@ -3,6 +3,7 @@ title: Pod 安全策略 content_type: concept weight: 30 --- + ### AppArmor 通过 PodSecurityPolicy 上的注解来控制。 详情请参阅 -[AppArmor 文档](/zh/docs/tutorials/clusters/apparmor/#podsecuritypolicy-annotations)。 +[AppArmor 文档](/zh/docs/tutorials/policy/apparmor/#podsecuritypolicy-annotations)。 -1. 检查是否在使用 Kubernetes v1.11+,以便 NodeRestriction 功能可用。 -2. 确保你在使用[节点授权](/zh/docs/reference/access-authn-authz/node/)并且已经_启用_ +1. 确保你在使用[节点授权](/zh/docs/reference/access-authn-authz/node/)并且已经 _启用_ [NodeRestriction 准入插件](/zh/docs/reference/access-authn-authz/admission-controllers/#noderestriction)。 -3. 将 `node-restriction.kubernetes.io/` 前缀下的标签添加到 Node 对象, +2. 将 `node-restriction.kubernetes.io/` 前缀下的标签添加到 Node 对象, 然后在节点选择器中使用这些标签。 例如,`example.com.node-restriction.kubernetes.io/fips=true` 或 `example.com.node-restriction.kubernetes.io/pci-dss=true`。 @@ -216,7 +214,7 @@ feature, greatly expands the types of constraints you can express. The key enhan 3. you can constrain against labels on other pods running on the node (or other topological domain), rather than against labels on the node itself, which allows rules about which pods can and cannot be co-located --> -1. 语言更具表现力(不仅仅是“对完全匹配规则的 AND”) +1. 语言表达能力更强(不仅仅是“对完全匹配规则的 AND”) 2. 你可以发现规则是“软需求”/“偏好”,而不是硬性要求,因此, 如果调度器无法满足该要求,仍然调度该 Pod 3. 你可以使用节点上(或其他拓扑域中)的 Pod 的标签来约束,而不是使用 @@ -369,7 +367,7 @@ in the [scheduler configuration](/docs/reference/scheduling/config/). For exampl 例如: ```yaml -apiVersion: kubescheduler.config.k8s.io/v1beta1 +apiVersion: kubescheduler.config.k8s.io/v1beta3 kind: KubeSchedulerConfiguration profiles: diff --git a/content/zh/docs/concepts/scheduling-eviction/kube-scheduler.md b/content/zh/docs/concepts/scheduling-eviction/kube-scheduler.md index c950fb1e20..30791e6249 100644 --- a/content/zh/docs/concepts/scheduling-eviction/kube-scheduler.md +++ b/content/zh/docs/concepts/scheduling-eviction/kube-scheduler.md @@ -173,7 +173,7 @@ of the scheduler: * Read about [scheduler performance tuning](/docs/concepts/scheduling-eviction/scheduler-perf-tuning/) * Read about [Pod topology spread constraints](/docs/concepts/workloads/pods/pod-topology-spread-constraints/) * Read the [reference documentation](/docs/reference/command-line-tools-reference/kube-scheduler/) for kube-scheduler -* Read the [kube-scheduler config (v1beta1)](/docs/reference/config-api/kube-scheduler-config.v1beta2/) reference +* Read the [kube-scheduler config (v1beta3)](/docs/reference/config-api/kube-scheduler-config.v1beta3/) reference * Learn about [configuring multiple schedulers](/docs/tasks/extend-kubernetes/configure-multiple-schedulers/) * Learn about [topology management policies](/docs/tasks/administer-cluster/topology-manager/) * Learn about [Pod Overhead](/docs/concepts/scheduling-eviction/pod-overhead/) @@ -181,7 +181,7 @@ of the scheduler: * 阅读关于 [调度器性能调优](/zh/docs/concepts/scheduling-eviction/scheduler-perf-tuning/) * 阅读关于 [Pod 拓扑分布约束](/zh/docs/concepts/workloads/pods/pod-topology-spread-constraints/) * 阅读关于 kube-scheduler 的 [参考文档](/zh/docs/reference/command-line-tools-reference/kube-scheduler/) -* 阅读 [kube-scheduler 配置参考 (v1beta1)](/zh/docs/reference/config-api/kube-scheduler-config.v1beta2/) +* 阅读 [kube-scheduler 配置参考 (v1beta3)](/zh/docs/reference/config-api/kube-scheduler-config.v1beta3/) * 了解关于 [配置多个调度器](/zh/docs/tasks/extend-kubernetes/configure-multiple-schedulers/) 的方式 * 了解关于 [拓扑结构管理策略](/zh/docs/tasks/administer-cluster/topology-manager/) * 了解关于 [Pod 额外开销](/zh/docs/concepts/scheduling-eviction/pod-overhead/) diff --git a/content/zh/docs/concepts/security/pod-security-admission.md b/content/zh/docs/concepts/security/pod-security-admission.md index d84b7c3c7b..f55f058701 100644 --- a/content/zh/docs/concepts/security/pod-security-admission.md +++ b/content/zh/docs/concepts/security/pod-security-admission.md @@ -33,7 +33,7 @@ Kubernetes [Pod 安全性标准(Security Standards)](/zh/docs/concepts/secur 为 Pod 定义不同的隔离级别。这些标准能够让你以一种清晰、一致的方式定义如何限制 Pod 行为。 diff --git a/content/zh/docs/concepts/storage/ephemeral-volumes.md b/content/zh/docs/concepts/storage/ephemeral-volumes.md index 10e0015512..a0a2988030 100644 --- a/content/zh/docs/concepts/storage/ephemeral-volumes.md +++ b/content/zh/docs/concepts/storage/ephemeral-volumes.md @@ -33,7 +33,7 @@ services are often limited by memory size and can move infrequently used data into storage that is slower than memory with little impact on overall performance. --> -有些应用程序需要额外的存储,但并不关心数据在重启后仍然可用,既是否被持久地保存。 +有些应用程序需要额外的存储,但并不关心数据在重启后仍然可用。 例如,缓存服务经常受限于内存大小,将不常用的数据转移到比内存慢、但对总体性能的影响很小的存储中。 ### 通用临时卷 {#generic-ephemeral-volumes} -{{< feature-state for_k8s_version="v1.21" state="beta" >}} - - -这个特性需要启用 `GenericEphemeralVolume` -[特性门控](/zh/docs/reference/command-line-tools-reference/feature-gates/)。 -因为这是一个 beta 特性,默认情况下启用。 +{{< feature-state for_k8s_version="v1.23" state="stable" >}} ### 通用临时卷 {#generic-ephemeral-volumes} - 有关设计的更多信息,参阅 [Generic ephemeral inline volumes KEP](https://github.com/kubernetes/enhancements/blob/master/keps/sig-storage/1698-generic-ephemeral-volumes/README.md)。 -- 关于本特性下一步开发的更多信息,参阅 - [enhancement tracking issue #1698](https://github.com/kubernetes/enhancements/issues/1698)。 diff --git a/content/zh/docs/concepts/storage/persistent-volumes.md b/content/zh/docs/concepts/storage/persistent-volumes.md index 34341afd97..fdf9e6ee50 100644 --- a/content/zh/docs/concepts/storage/persistent-volumes.md +++ b/content/zh/docs/concepts/storage/persistent-volumes.md @@ -666,7 +666,7 @@ size that is within the capacity limits of underlying storage provider. You can @@ -810,7 +810,7 @@ Helper programs relating to the volume type may be required for consumption of a @@ -818,9 +818,9 @@ Currently, storage size is the only resource that can be set or requested. Futu 一般而言,每个 PV 卷都有确定的存储容量。 容量属性是使用 PV 对象的 `capacity` 属性来设置的。 -参考 Kubernetes -[资源模型(Resource Model)](https://git.k8s.io/community/contributors/design-proposals/scheduling/resources.md) -设计提案,了解 `capacity` 字段可以接受的单位。 +参考词汇表中的 +[量纲(Quantity)](/zh/docs/reference/glossary/?all=true#term-quantity) +词条,了解 `capacity` 字段可以接受的单位。 目前,存储大小是可以设置和请求的唯一资源。 未来可能会包含 IOPS、吞吐量等属性。 @@ -1038,19 +1038,19 @@ The following volume types support mount options: --> 以下卷类型支持挂载选项: -* AWSElasticBlockStore -* AzureDisk -* AzureFile -* CephFS -* Cinder (OpenStack 块存储) -* GCEPersistentDisk -* Glusterfs -* NFS -* Quobyte 卷 -* RBD (Ceph 块设备) -* StorageOS -* VsphereVolume -* iSCSI +* `awsElasticBlockStore` +* `azureDisk` +* `azureFile` +* `cephfs` +* `cinder` (**已弃用**于 v1.18) +* `gcePersistentDisk` +* `glusterfs` +* `iscsi` +* `nfs` +* `quobyte` (**已弃用**于 v1.22) +* `rbd` +* `storageos` (**已弃用**于 v1.22) +* `vsphereVolume` -Container 中的文件在磁盘上是临时存放的,这给 Container 中运行的较重要的应用 -程序带来一些问题。问题之一是当容器崩溃时文件丢失。kubelet 会重新启动容器, -但容器会以干净的状态重启。 +Container 中的文件在磁盘上是临时存放的,这给 Container 中运行的较重要的应用程序带来一些问题。 +问题之一是当容器崩溃时文件丢失。 +kubelet 会重新启动容器,但容器会以干净的状态重启。 第二个问题会在同一 `Pod` 中运行多个容器并共享文件时出现。 Kubernetes {{< glossary_tooltip text="卷(Volume)" term_id="volume" >}} 这一抽象概念能够解决这两个问题。 阅读本文前建议你熟悉一下 [Pods](/zh/docs/concepts/workloads/pods)。 @@ -59,15 +59,15 @@ Docker 提供卷驱动程序,但是其功能非常有限。 Kubernetes supports many types of volumes. A {{< glossary_tooltip term_id="pod" text="Pod" >}} can use any number of volume types simultaneously. Ephemeral volume types have a lifetime of a pod, but persistent volumes exist beyond -the lifetime of a pod. When a pod ceases to exist, Kubernetes destroys ephemeral volumes; -however, Kubernetes does not destroy persistent volumes. +the lifetime of a pod. When a pod ceases to exist, Kubernetes destroys ephemeral volumes; +however, Kubernetes does not destroy persistent volumes. For any kind of volume in a given pod, data is preserved across container restarts. --> Kubernetes 支持很多类型的卷。 {{< glossary_tooltip term_id="pod" text="Pod" >}} 可以同时使用任意数目的卷类型。 临时卷类型的生命周期与 Pod 相同,但持久卷可以比 Pod 的存活期长。 -当 Pod 不再存在时,Kubernetes 也会销毁临时卷;不过 Kubernetes 不会销毁 -持久卷。对于给定 Pod 中任何类型的卷,在容器重启期间数据都不会丢失。 +当 Pod 不再存在时,Kubernetes 也会销毁临时卷;不过 Kubernetes 不会销毁持久卷。 +对于给定 Pod 中任何类型的卷,在容器重启期间数据都不会丢失。 卷的核心是一个目录,其中可能存有数据,Pod 中的容器可以访问该目录中的数据。 -所采用的特定的卷类型将决定该目录如何形成的、使用何种介质保存数据以及目录中存放 -的内容。 +所采用的特定的卷类型将决定该目录如何形成的、使用何种介质保存数据以及目录中存放的内容。 使用卷时, 在 `.spec.volumes` 字段中设置为 Pod 提供的卷,并在 `.spec.containers[*].volumeMounts` 字段中声明卷在容器中的挂载位置。 -容器中的进程看到的是由它们的 Docker 镜像和卷组成的文件系统视图。 -[Docker 镜像](https://docs.docker.com/userguide/dockerimages/) -位于文件系统层次结构的根部。各个卷则挂载在镜像内的指定路径上。 -卷不能挂载到其他卷之上,也不能与其他卷有硬链接。 +容器中的进程看到的文件系统视图是由它们的 {{< glossary_tooltip text="容器镜像" term_id="image" >}} +的初始内容以及挂载在容器中的卷(如果定义了的话)所组成的。 +其中根文件系统同容器镜像的内容相吻合。 +任何在该文件系统下的写入操作,如果被允许的话,都会影响接下来容器中进程访问文件系统时所看到的内容。 + + +卷挂载在镜像中的[指定路径](#using-subpath)下。 Pod 配置中的每个容器必须独立指定各个卷的挂载位置。 +卷不能挂载到其他卷之上(不过存在一种[使用 subPath](#using-subpath) 的相关机制),也不能与其他卷有硬链接。 + `awsElasticBlockStore` 卷将 Amazon Web服务(AWS)[EBS 卷](https://aws.amazon.com/ebs/) -挂载到你的 Pod 中。与 `emptyDir` 在 Pod 被删除时也被删除不同,EBS 卷的内容在删除 Pod 时 -会被保留,卷只是被卸载掉了。 +挂载到你的 Pod 中。与 `emptyDir` 在 Pod 被删除时也被删除不同,EBS 卷的内容在删除 Pod +时会被保留,卷只是被卸载掉了。 这意味着 EBS 卷可以预先填充数据,并且该数据可以在 Pod 之间共享。 -如果启用了对 `awsElasticBlockStore` 的 `CSIMigration` 特性支持,所有插件操作都 -不再指向树内插件(In-Tree Plugin),转而指向 `ebs.csi.aws.com` 容器存储接口 -(Container Storage Interface,CSI)驱动。为了使用此特性,必须在集群中安装 +如果启用了对 `awsElasticBlockStore` 的 `CSIMigration` +特性支持,所有插件操作都不再指向树内插件(In-Tree Plugin),转而指向 +`ebs.csi.aws.com` 容器存储接口(Container Storage Interface,CSI)驱动。 +为了使用此特性,必须在集群中安装 [AWS EBS CSI 驱动](https://github.com/kubernetes-sigs/aws-ebs-csi-driver), 并确保 `CSIMigration` 和 `CSIMigrationAWS` Beta 功能特性被启用。 @@ -308,8 +323,9 @@ that data can be shared between Pods. The `cephfs` can be mounted by multiple writers simultaneously. --> `cephfs` 卷允许你将现存的 CephFS 卷挂载到 Pod 中。 -不像 `emptyDir` 那样会在 Pod 被删除的同时也会被删除,`cephfs` 卷的内容在 Pod 被删除 -时会被保留,只是卷被卸载了。这意味着 `cephfs` 卷可以被预先填充数据,且这些数据可以在 +不像 `emptyDir` 那样会在 Pod 被删除的同时也会被删除,`cephfs` +卷的内容在 Pod 被删除时会被保留,只是卷被卸载了。 +这意味着 `cephfs` 卷可以被预先填充数据,且这些数据可以在 Pod 之间共享。同一 `cephfs` 卷可同时被多个写者挂载。 -[`configMap`](/zh/docs/tasks/configure-pod-container/configure-pod-configmap/) 卷 -提供了向 Pod 注入配置数据的方法。 -ConfigMap 对象中存储的数据可以被 `configMap` 类型的卷引用,然后被 Pod 中运行的 -容器化应用使用。 +[`configMap`](/zh/docs/tasks/configure-pod-container/configure-pod-configmap/) +卷提供了向 Pod 注入配置数据的方法。 +ConfigMap 对象中存储的数据可以被 `configMap` 类型的卷引用,然后被 Pod 中运行的容器化应用使用。 -`log-config` ConfigMap 以卷的形式挂载,并且存储在 `log_level` 条目中的所有内容 -都被挂载到 Pod 的 `/etc/config/log_level` 路径下。 +`log-config` ConfigMap 以卷的形式挂载,并且存储在 `log_level` +条目中的所有内容都被挂载到 Pod 的 `/etc/config/log_level` 路径下。 请注意,这个路径来源于卷的 `mountPath` 和 `log_level` 键对应的 `path`。 {{< note >}} -当启用 `SizeMemoryBackedVolumes` [特性门控](/zh/docs/reference/command-line-tools-reference/feature-gates/)时, -你可以为基于内存提供的卷指定大小。 +当启用 `SizeMemoryBackedVolumes` [特性门控](/zh/docs/reference/command-line-tools-reference/feature-gates/) +时,你可以为基于内存提供的卷指定大小。 如果未指定大小,则基于内存的卷的大小为 Linux 主机上内存的 50%。 {{< /note>}} @@ -589,8 +604,8 @@ targetWWNs expect that those WWNs are from multi-path connections. You must configure FC SAN Zoning to allocate and mask those LUNs (volumes) to the target WWNs beforehand so that Kubernetes hosts can access them. --> {{< note >}} -你必须配置 FC SAN Zoning,以便预先向目标 WWN 分配和屏蔽这些 LUN(卷), -这样 Kubernetes 主机才可以访问它们。 +你必须配置 FC SAN Zoning,以便预先向目标 WWN 分配和屏蔽这些 LUN(卷),这样 +Kubernetes 主机才可以访问它们。 {{< /note >}} -[区域持久盘](https://cloud.google.com/compute/docs/disks/#repds) 功能允许你创建能在 -同一区域的两个可用区中使用的持久盘。 -要使用这个功能,必须以持久卷(PersistentVolume)的方式提供卷;直接从 Pod 引用这种卷 -是不可以的。 +[区域持久盘](https://cloud.google.com/compute/docs/disks/#repds) +功能允许你创建能在同一区域的两个可用区中使用的持久盘。 +要使用这个功能,必须以持久卷(PersistentVolume)的方式提供卷;直接从 +Pod 引用这种卷是不可以的。 #### 手动供应基于区域 PD 的 PersistentVolume {#manually-provisioning-regional-pd-pv} -使用[为 GCE PD 定义的存储类](/zh/docs/concepts/storage/storage-classes/#gce) 可以 -实现动态供应。在创建 PersistentVolume 之前,你首先要创建 PD。 +使用[为 GCE PD 定义的存储类](/zh/docs/concepts/storage/storage-classes/#gce) +可以实现动态供应。在创建 PersistentVolume 之前,你首先要创建 PD。 ```shell gcloud beta compute disks create --size=500GB my-data-disk @@ -824,8 +839,8 @@ and the kubelet, set the `InTreePluginGCEUnregister` flag to `true`. {{< feature-state for_k8s_version="v1.21" state="alpha" >}} -要禁止控制器管理器和 kubelet 加载 `gcePersistentDisk` 存储插件, -请将 `InTreePluginGCEUnregister` 标志设置为 `true`。 +要禁止控制器管理器和 kubelet 加载 `gcePersistentDisk` 存储插件,请将 +`InTreePluginGCEUnregister` 标志设置为 `true`。 {{< warning >}} `gitRepo` 卷类型已经被废弃。如果需要在容器中提供 git 仓库,请将一个 -[EmptyDir](#emptydir) 卷挂载到 InitContainer 中,使用 git 命令完成仓库的克隆操作, -然后将 [EmptyDir](#emptydir) 卷挂载到 Pod 的容器中。 +[EmptyDir](#emptydir) 卷挂载到 InitContainer 中,使用 git +命令完成仓库的克隆操作,然后将 [EmptyDir](#emptydir) 卷挂载到 Pod 的容器中。 {{< /warning >}} 当使用这种类型的卷时要小心,因为: -* HostPath 卷可能会暴露特权系统凭据(例如 Kubelet)或特权 API(例如容器运行时套接字), - 可用于容器逃逸或攻击集群的其他部分。 -* 具有相同配置(例如基于同一 PodTemplate 创建)的多个 Pod 会由于节点上文件的不同 - 而在不同节点上有不同的行为。 +* HostPath 卷可能会暴露特权系统凭据(例如 Kubelet)或特权 + API(例如容器运行时套接字),可用于容器逃逸或攻击集群的其他部分。 +* 具有相同配置(例如基于同一 PodTemplate 创建)的多个 Pod + 会由于节点上文件的不同而在不同节点上有不同的行为。 * 下层主机上创建的文件或目录只能由 root 用户写入。你需要在 [特权容器](/zh/docs/tasks/configure-pod-container/security-context/) 中以 root 身份运行进程,或者修改主机上的文件权限以便容器能够写入 `hostPath` 卷。 @@ -1078,8 +1093,8 @@ unmounted. This means that an iscsi volume can be pre-populated with data, and that data can be shared between pods. --> `iscsi` 卷能将 iSCSI (基于 IP 的 SCSI) 卷挂载到你的 Pod 中。 -不像 `emptyDir` 那样会在删除 Pod 的同时也会被删除,`iscsi` 卷的内容在删除 Pod 时 -会被保留,卷只是被卸载。 +不像 `emptyDir` 那样会在删除 Pod 的同时也会被删除,`iscsi` +卷的内容在删除 Pod 时会被保留,卷只是被卸载。 这意味着 `iscsi` 卷可以被预先填充数据,并且这些数据可以在 Pod 之间共享。 然而,`local` 卷仍然取决于底层节点的可用性,并不适合所有应用程序。 -如果节点变得不健康,那么`local` 卷也将变得不可被 Pod 访问。使用它的 Pod 将不能运行。 -使用 `local` 卷的应用程序必须能够容忍这种可用性的降低,以及因底层磁盘的耐用性特征 -而带来的潜在的数据丢失风险。 +如果节点变得不健康,那么 `local` 卷也将变得不可被 Pod 访问。使用它的 Pod 将不能运行。 +使用 `local` 卷的应用程序必须能够容忍这种可用性的降低,以及因底层磁盘的耐用性特征而带来的潜在的数据丢失风险。 下面是一个使用 `local` 卷和 `nodeAffinity` 的持久卷示例: @@ -1198,9 +1212,8 @@ such as node resource requirements, node selectors, Pod affinity, and Pod anti-a 使用 `local` 卷时,建议创建一个 StorageClass 并将其 `volumeBindingMode` 设置为 `WaitForFirstConsumer`。要了解更多详细信息,请参考 [local StorageClass 示例](/zh/docs/concepts/storage/storage-classes/#local)。 -延迟卷绑定的操作可以确保 Kubernetes 在为 PersistentVolumeClaim 作出绑定决策时, -会评估 Pod 可能具有的其他节点约束,例如:如节点资源需求、节点选择器、Pod -亲和性和 Pod 反亲和性。 +延迟卷绑定的操作可以确保 Kubernetes 在为 PersistentVolumeClaim 作出绑定决策时,会评估 +Pod 可能具有的其他节点约束,例如:如节点资源需求、节点选择器、Pod亲和性和 Pod 反亲和性。 -`persistentVolumeClaim` 卷用来将[持久卷](/zh/docs/concepts/storage/persistent-volumes/)(PersistentVolume) -挂载到 Pod 中。 -持久卷申领(PersistentVolumeClaim)是用户在不知道特定云环境细节的情况下"申领"持久存储 -(例如 GCE PersistentDisk 或者 iSCSI 卷)的一种方法。 +`persistentVolumeClaim` 卷用来将[持久卷](/zh/docs/concepts/storage/persistent-volumes/)(PersistentVolume)挂载到 Pod 中。 +持久卷申领(PersistentVolumeClaim)是用户在不知道特定云环境细节的情况下“申领”持久存储(例如 +GCE PersistentDisk 或者 iSCSI 卷)的一种方法。 `portworxVolume` 是一个可伸缩的块存储层,能够以超融合(hyperconverged)的方式与 Kubernetes 一起运行。 -[Portworx](https://portworx.com/use-case/kubernetes-storage/) 支持对服务器上存储的指纹处理、 -基于存储能力进行分层以及跨多个服务器整合存储容量。 +[Portworx](https://portworx.com/use-case/kubernetes-storage/) +支持对服务器上存储的指纹处理、基于存储能力进行分层以及跨多个服务器整合存储容量。 Portworx 可以以 in-guest 方式在虚拟机中运行,也可以在裸金属 Linux 节点上运行。 -`projected` 卷类型能将若干现有的卷来源映射到同一目录上。 - -目前,可以映射的卷来源类型如下: - -- [`secret`](#secret) -- [`downwardAPI`](#downwardapi) -- [`configMap`](#configmap) -- `serviceAccountToken` - - -所有的卷来源需要和 Pod 处于相同的命名空间。 -更多详情请参考[一体化卷设计文档](https://github.com/kubernetes/community/blob/master/contributors/design-proposals/node/all-in-one-volume.md)。 - - - -#### 包含 Secret、downwardAPI 和 configMap 的 Pod 示例 {#example-configuration-secret-downwardapi-configmap} - -```yaml -apiVersion: v1 -kind: Pod -metadata: - name: volume-test -spec: - containers: - - name: container-test - image: busybox - volumeMounts: - - name: all-in-one - mountPath: "/projected-volume" - readOnly: true - volumes: - - name: all-in-one - projected: - sources: - - secret: - name: mysecret - items: - - key: username - path: my-group/my-username - - downwardAPI: - items: - - path: "labels" - fieldRef: - fieldPath: metadata.labels - - path: "cpu_limit" - resourceFieldRef: - containerName: container-test - resource: limits.cpu - - configMap: - name: myconfigmap - items: - - key: config - path: my-group/my-config -``` - - - -下面是一个带有非默认访问权限设置的多个 secret 的 Pod 示例: - -```yaml -apiVersion: v1 -kind: Pod -metadata: - name: volume-test -spec: - containers: - - name: container-test - image: busybox - volumeMounts: - - name: all-in-one - mountPath: "/projected-volume" - readOnly: true - volumes: - - name: all-in-one - projected: - sources: - - secret: - name: mysecret - items: - - key: username - path: my-group/my-username - - secret: - name: mysecret2 - items: - - key: password - path: my-group/my-password - mode: 511 -``` - -每个被投射的卷来源都在规约中的 `sources` 内列出。参数几乎相同,除了两处例外: - -* 对于 `secret`,`secretName` 字段已被变更为 `name` 以便与 ConfigMap 命名一致。 -* `defaultMode` 只能在整个投射卷级别指定,而无法针对每个卷来源指定。 - 不过,如上所述,你可以显式地为每个投射项设置 `mode` 值。 - - - -当开启 `TokenRequestProjection` 功能时,可以将当前 -[服务帐号](/zh/docs/reference/access-authn-authz/authentication/#service-account-tokens) -的令牌注入 Pod 中的指定路径。 -下面是一个例子: - -```yaml -apiVersion: v1 -kind: Pod -metadata: - name: sa-token-test -spec: - containers: - - name: container-test - image: busybox - volumeMounts: - - name: token-vol - mountPath: "/service-account" - readOnly: true - volumes: - - name: token-vol - projected: - sources: - - serviceAccountToken: - audience: api - expirationSeconds: 3600 - path: token -``` - - -示例 Pod 具有包含注入服务帐户令牌的映射卷。 -该令牌可以被 Pod 中的容器用来访问 Kubernetes API 服务器。 -`audience` 字段包含令牌的预期受众。 -令牌的接收者必须使用令牌的受众中指定的标识符来标识自己,否则应拒绝令牌。 -此字段是可选的,默认值是 API 服务器的标识符。 - - -`expirationSeconds` 是服务帐户令牌的有效期时长。 -默认值为 1 小时,必须至少 10 分钟(600 秒)。 -管理员还可以通过设置 API 服务器的 `--service-account-max-token-expiration` 选项来 -限制其最大值。 -`path` 字段指定相对于映射卷的挂载点的相对路径。 - -{{< note >}} - -使用投射卷源作为 [subPath](#using-subpath) 卷挂载的容器将不会接收这些卷源的更新。 -{{< /note >}} +投射卷能将若干现有的卷来源映射到同一目录上。更多详情请参考[投射卷](/zh/docs/concepts/storage/projected-volumes/)。 ### quobyte (已弃用) {#quobyte} @@ -1542,32 +1375,32 @@ Quobyte 的 GitHub 项目包含以 CSI 形式部署 Quobyte 的 -`rbd` 卷允许将 [Rados 块设备](https://docs.ceph.com/en/latest/rbd/) 卷挂载到你的 Pod 中. -不像 `emptyDir` 那样会在删除 Pod 的同时也会被删除,`rbd` 卷的内容在删除 Pod 时 -会被保存,卷只是被卸载。 +`rbd` 卷允许将 [Rados 块设备](https://docs.ceph.com/en/latest/rbd/)卷挂载到你的 Pod 中。 +不像 `emptyDir` 那样会在删除 Pod 的同时也会被删除,`rbd` 卷的内容在删除 Pod 时会被保存,卷只是被卸载。 这意味着 `rbd` 卷可以被预先填充数据,并且这些数据可以在 Pod 之间共享。 -{{< caution >}} +{{< note >}} 在使用 RBD 之前,你必须安装运行 Ceph。 -{{< /caution >}} +{{< /note >}} RBD 的一个特性是它可以同时被多个用户以只读方式挂载。 这意味着你可以用数据集预先填充卷,然后根据需要在尽可能多的 Pod 中并行地使用卷。 @@ -1576,6 +1409,59 @@ RBD 的一个特性是它可以同时被多个用户以只读方式挂载。 更多详情请参考 [RBD 示例](https://github.com/kubernetes/examples/tree/master/volumes/rbd)。 + +#### RBD CSI 迁移 {#rbd-csi-migration} + +{{< feature-state for_k8s_version="v1.23" state="alpha" >}} + + +启用 RBD 的 `CSIMigration` 功能后,所有插件操作从现有的树内插件重定向到 +`rbd.csi.ceph.com` {{}} 驱动程序。 +要使用该功能,必须在集群内安装 +[Ceph CSI 驱动](https://github.com/ceph/ceph-csi),并启用 `CSIMigration` 和 `csiMigrationRBD` +[特性门控](/zh/docs/reference/command-line-tools-reference/feature-gates/)。 + + +{{< note >}} +作为一位管理存储的 Kubernetes 集群操作者,在尝试迁移到 RBD CSI 驱动前,你必须完成下列先决事项: + +* 你必须在集群中安装 v3.5.0 或更高版本的 Ceph CSI 驱动(`rbd.csi.ceph.com`)。 +* 因为 `clusterID` 是 CSI 驱动程序必需的参数,而树内存储类又将 `monitors` + 作为一个必需的参数,所以 Kubernetes 存储管理者需要根据 `monitors` + 的哈希值(例:`#echo -n '' | md5sum`)来创建 + `clusterID`,并保持该 `monitors` 存在于该 `clusterID` 的配置中。 +* 同时,如果树内存储类的 `adminId` 的值不是 `admin`,那么其 `adminSecretName` + 就需要被修改成 `adminId` 参数的 base64 编码值。 +{{< /note >}} + ### secret `secret` 卷用来给 Pod 传递敏感信息,例如密码。你可以将 Secret 存储在 Kubernetes API 服务器上,然后以文件的形式挂在到 Pod 中,无需直接与 Kubernetes 耦合。 -`secret` 卷由 tmpfs(基于 RAM 的文件系统)提供存储,因此它们永远不会被写入非易失性 -(持久化的)存储器。 +`secret` 卷由 tmpfs(基于 RAM 的文件系统)提供存储,因此它们永远不会被写入非易失性(持久化的)存储器。 +#### Portworx CSI 迁移 + +{{< feature-state for_k8s_version="v1.23" state="alpha" >}} + + +Kubernetes 1.23 中加入了 Portworx 的 `CSIMigration` 功能,但默认不会启用,因为该功能仍处于 alpha 阶段。 +该功能会将所有的插件操作从现有的树内插件重定向到 +`pxd.portworx.com` 容器存储接口(Container Storage Interface, CSI)驱动程序。 +集群中必须安装 +[Portworx CSI 驱动](https://docs.portworx.com/portworx-install-with-kubernetes/storage-operations/csi/)。 +要启用此功能,请在 kube-controller-manager 和 kubelet 中设置 `CSIMigrationPortworx=true`。 + -## 使用 subPath {#using-path} +## 使用 subPath {#using-subpath} 有时,在单个 Pod 中共享卷以供多方使用是很有用的。 `volumeMounts.subPath` 属性可用于指定所引用的卷内的子路径,而不是其根路径。 @@ -1934,6 +1841,7 @@ spec: volumeMounts: - name: workdir1 mountPath: /logs + # 包裹变量名的是小括号,而不是大括号 subPathExpr: $(POD_NAME) restartPolicy: Never volumes: @@ -1953,10 +1861,9 @@ Pods. --> ## 资源 {#resources} -`emptyDir` 卷的存储介质(磁盘、SSD 等)是由保存 kubelet 数据的根目录 -(通常是 `/var/lib/kubelet`)的文件系统的介质确定。 -Kubernetes 对 `emptyDir` 卷或者 `hostPath` 卷可以消耗的空间没有限制, -容器之间或 Pod 之间也没有隔离。 +`emptyDir` 卷的存储介质(磁盘、SSD 等)是由保存 kubelet +数据的根目录(通常是 `/var/lib/kubelet`)的文件系统的介质确定。 +Kubernetes 对 `emptyDir` 卷或者 `hostPath` 卷可以消耗的空间没有限制,容器之间或 Pod 之间也没有隔离。 ## 树外(Out-of-Tree)卷插件 {#out-of-tree-volume-plugins} Out-of-Tree 卷插件包括 -{{< glossary_tooltip text="容器存储接口(CSI)" term_id="csi" >}} (CSI) -和 FlexVolume。 -它们使存储供应商能够创建自定义存储插件,而无需将它们添加到 Kubernetes 代码仓库。 +{{< glossary_tooltip text="容器存储接口(CSI)" term_id="csi" >}} +和 FlexVolume(已弃用)。 +它们使存储供应商能够创建自定义存储插件,而无需将插件源码添加到 Kubernetes 代码仓库。 -CSI 和 FlexVolume 都允许独立于 Kubernetes 代码库开发卷插件,并作为扩展部署 -(安装)在 Kubernetes 集群上。 +CSI 和 FlexVolume 都允许独立于 Kubernetes 代码库开发卷插件,并作为扩展部署(安装)在 Kubernetes 集群上。 对于希望创建树外(Out-Of-Tree)卷插件的存储供应商,请参考 [卷插件常见问题](https://github.com/kubernetes/community/blob/master/sig-storage/volume-plugin-faq.md)。 @@ -2053,8 +1958,8 @@ A `csi` volume can be used in a Pod in three different ways: * with a [CSI ephemeral volume](/docs/concepts/storage/ephemeral-volumes/#csi-ephemeral-volume) if the driver supports that (beta feature) --> -一旦在 Kubernetes 集群上部署了 CSI 兼容卷驱动程序,用户就可以使用 `csi` 卷类型来 -挂接、挂载 CSI 驱动所提供的卷。 +一旦在 Kubernetes 集群上部署了 CSI 兼容卷驱动程序,用户就可以使用 +`csi` 卷类型来挂接、挂载 CSI 驱动所提供的卷。 `csi` 卷可以在 Pod 中以三种方式使用: @@ -2078,10 +1983,10 @@ persistent volume: CSI driver components to identify which PV objects belong to the CSI driver. --> - `driver`:指定要使用的卷驱动名称的字符串值。 - 这个值必须与 CSI 驱动程序在 `GetPluginInfoResponse` 中返回的值相对应; - 该接口定义在 [CSI 规范](https://github.com/container-storage-interface/spec/blob/master/spec.md#getplugininfo)中。 - Kubernetes 使用所给的值来标识要调用的 CSI 驱动程序;CSI 驱动程序也使用该值来辨识 - 哪些 PV 对象属于该 CSI 驱动程序。 + 这个值必须与 CSI 驱动程序在 `GetPluginInfoResponse` 中返回的值相对应;该接口定义在 + [CSI 规范](https://github.com/container-storage-interface/spec/blob/master/spec.md#getplugininfo)中。 + Kubernetes 使用所给的值来标识要调用的 CSI 驱动程序;CSI + 驱动程序也使用该值来辨识哪些 PV 对象属于该 CSI 驱动程序。 - `volumeHandle`:唯一标识卷的字符串值。 - 该值必须与 CSI 驱动在 `CreateVolumeResponse` 的 `volume_id` 字段中返回的值相对应; - 接口定义在 [CSI spec](https://github.com/container-storage-interface/spec/blob/master/spec.md#createvolume) 中。 + 该值必须与 CSI 驱动在 `CreateVolumeResponse` 的 `volume_id` 字段中返回的值相对应;接口定义在 + [CSI 规范](https://github.com/container-storage-interface/spec/blob/master/spec.md#createvolume) 中。 在所有对 CSI 卷驱动程序的调用中,引用该 CSI 卷时都使用此值作为 `volume_id` 参数。 -- `readOnly`:一个可选的布尔值,指示通过 `ControllerPublished` 关联该卷时是否设置 - 该卷为只读。默认值是 false。 +- `readOnly`:一个可选的布尔值,指示通过 `ControllerPublished` 关联该卷时是否设置该卷为只读。默认值是 false。 该值通过 `ControllerPublishVolumeRequest` 中的 `readonly` 字段传递给 CSI 驱动。 -启用 `CSIMigration` 功能后,针对现有树内插件的操作会被重定向到相应的 CSI 插件 -(应已安装和配置)。 -因此,操作员在过渡到取代树内插件的 CSI 驱动时,无需对现有存储类、PV 或 PVC -(指树内插件)进行任何配置更改。 +启用 `CSIMigration` 功能后,针对现有树内插件的操作会被重定向到相应的 CSI 插件(应已安装和配置)。 +因此,操作员在过渡到取代树内插件的 CSI 驱动时,无需对现有存储类、PV 或 PVC(指树内插件)进行任何配置更改。 所支持的操作和功能包括:配备(Provisioning)/删除、挂接(Attach)/解挂(Detach)、 挂载(Mount)/卸载(Unmount)和调整卷大小。 @@ -2252,22 +2155,35 @@ are listed in [Types of Volumes](#volume-types). ### flexVolume +{{< feature-state for_k8s_version="v1.23" state="deprecated" >}} + -FlexVolume 是一个自 1.2 版本(在 CSI 之前)以来在 Kubernetes 中一直存在的树外插件接口。 -它使用基于 exec 的模型来与驱动程序对接。 -用户必须在每个节点(在某些情况下是主控节点)上的预定义卷插件路径中安装 -FlexVolume 驱动程序可执行文件。 +FlexVolume 是一个使用基于 exec 的模型来与驱动程序对接的树外插件接口。 +用户必须在每个节点上的预定义卷插件路径中安装 FlexVolume +驱动程序可执行文件,在某些情况下,控制平面节点中也要安装。 -Pod 通过 `flexvolume` 树内插件与 Flexvolume 驱动程序交互。 -更多详情请参考 [FlexVolume](https://github.com/kubernetes/community/blob/master/contributors/devel/sig-storage/flexvolume.md) 示例。 +Pod 通过 `flexvolume` 树内插件与 FlexVolume 驱动程序交互。 +更多详情请参考 FlexVolume [README](https://github.com/kubernetes/community/blob/master/contributors/devel/sig-storage/flexvolume.md#readme) 文档。 + + +{{< note >}} +FlexVolume 已弃用。推荐使用树外 CSI 驱动来将外部存储整合进 Kubernetes。 + +FlexVolume 驱动的维护者应开发一个 CSI 驱动并帮助用户从 FlexVolume 驱动迁移到 CSI。 +FlexVolume 用户应迁移工作负载以使用对等的 CSI 驱动。 +{{< /note >}} ## 挂载卷的传播 {#mount-propagation} -挂载卷的传播能力允许将容器安装的卷共享到同一 Pod 中的其他容器, -甚至共享到同一节点上的其他 Pod。 +挂载卷的传播能力允许将容器安装的卷共享到同一 Pod 中的其他容器,甚至共享到同一节点上的其他 Pod。 卷的挂载传播特性由 `Container.volumeMounts` 中的 `mountPropagation` 字段控制。 它的值包括: @@ -2320,8 +2235,8 @@ Its values are: 换句话说,如果主机在此挂载卷中挂载任何内容,容器将能看到它被挂载在那里。 - 类似的,配置了 `Bidirectional` 挂载传播选项的 Pod 如果在同一卷上挂载了内容, - 挂载传播设置为 `HostToContainer` 的容器都将能看到这一变化。 + 类似的,配置了 `Bidirectional` 挂载传播选项的 Pod 如果在同一卷上挂载了内容,挂载传播设置为 + `HostToContainer` 的容器都将能看到这一变化。 该模式等同于 [Linux 内核文档](https://www.kernel.org/doc/Documentation/filesystems/sharedsubtree.txt) 中描述的 `rslave` 挂载传播选项。 @@ -2366,8 +2281,7 @@ Docker as shown below. --> ### 配置 {#configuration} -在某些部署环境中,挂载传播正常工作前,必须在 Docker 中正确配置挂载共享(mount share), -如下所示。 +在某些部署环境中,挂载传播正常工作前,必须在 Docker 中正确配置挂载共享(mount share),如下所示。 ## ReplicaSet 的工作原理 {#how-a-replicaset-works} -RepicaSet 是通过一组字段来定义的,包括一个用来识别可获得的 Pod +ReplicaSet 是通过一组字段来定义的,包括一个用来识别可获得的 Pod 的集合的选择算符、一个用来标明应该维护的副本个数的数值、一个用来指定应该创建新 Pod 以满足副本个数条件时要使用的 Pod 模板等等。 每个 ReplicaSet 都通过根据需要创建和 删除 Pod 以使得副本个数达到期望值, diff --git a/content/zh/docs/concepts/workloads/controllers/replicationcontroller.md b/content/zh/docs/concepts/workloads/controllers/replicationcontroller.md index 285215a3b2..dbadd4f88d 100644 --- a/content/zh/docs/concepts/workloads/controllers/replicationcontroller.md +++ b/content/zh/docs/concepts/workloads/controllers/replicationcontroller.md @@ -331,7 +331,7 @@ kubectl 将 ReplicationController 缩放为 0 并等待以便在删除 Replicati You can delete a ReplicationController without affecting any of its pods. -Using kubectl, specify the `--cascade=false` option to [`kubectl delete`](/docs/reference/generated/kubectl/kubectl-commands#delete). +Using kubectl, specify the `--cascade=orphan` option to [`kubectl delete`](/docs/reference/generated/kubectl/kubectl-commands#delete). When using the REST API or Go client library, simply delete the ReplicationController object. --> @@ -339,7 +339,7 @@ When using the REST API or Go client library, simply delete the ReplicationContr 你可以删除一个 ReplicationController 而不影响它的任何 Pod。 -使用 kubectl,为 [`kubectl delete`](/docs/reference/generated/kubectl/kubectl-commands#delete) 指定 `--cascade=false` 选项。 +使用 kubectl,为 [`kubectl delete`](/docs/reference/generated/kubectl/kubectl-commands#delete) 指定 `--cascade=orphan` 选项。 当使用 REST API 或 Go 客户端库时,只需删除 ReplicationController 对象。 @@ -501,12 +501,12 @@ ReplicationController 永远被限制在这个狭隘的职责范围内。 我们甚至计划考虑批量创建 Pod 的机制(查阅 [#170](https://issue.k8s.io/170))。 ReplicationController 旨在成为可组合的构建基元。 我们希望在它和其他补充原语的基础上构建更高级别的 API 或者工具,以便于将来的用户使用。 kubectl 目前支持的 "macro" 操作(运行、缩放、滚动更新)就是这方面的概念示例。 -例如,我们可以想象类似于 [Asgard](https://techblog.netflix.com/2012/06/asgaard-web-based-cloud-management-and.html) +例如,我们可以想象类似于 [Asgard](https://netflixtechblog.com/asgard-web-based-cloud-management-and-deployment-2c9fc4e4d3a1) 的东西管理 ReplicationController、自动定标器、服务、调度策略、金丝雀发布等。 ### Deployment (推荐) -[`Deployment`](/zh/docs/concepts/workloads/controllers/deployment/) 是一种更高级别的 API 对象, -它以类似于 `kubectl rolling-update` 的方式更新其底层 ReplicaSet 及其 Pod。 -如果你想要这种滚动更新功能,那么推荐使用 Deployment,因为与 `kubectl rolling-update` 不同, -它们是声明式的、服务端的,并且具有其它特性。 +[`Deployment`](/zh/docs/concepts/workloads/controllers/deployment/) 是一种更高级别的 API 对象,用于更新其底层 ReplicaSet 及其 Pod。 +如果你想要这种滚动更新功能,那么推荐使用 Deployment,因为它们是声明式的、服务端的,并且具有其它特性。 -## 更多信息 +## {{% heading "whatsnext" %}} -请阅读[运行无状态的 ReplicationController](/zh/docs/tasks/run-application/run-stateless-application-deployment/)。 +- 了解 [Pods](/zh/docs/concepts/workloads/pods)。 +- 了解 [Depolyment](/zh/docs/concepts/workloads/controllers/deployment/),ReplicationController 的替代品。 +- `ReplicationController` 是 Kubernetes REST API 的一部分,阅读 {{< api-reference page="workload-resources/replication-controller-v1" >}} + 对象定义以了解 replication controllers 的 API。 diff --git a/content/zh/docs/concepts/workloads/controllers/ttlafterfinished.md b/content/zh/docs/concepts/workloads/controllers/ttlafterfinished.md index 291b88e0d7..0d02b7a04b 100644 --- a/content/zh/docs/concepts/workloads/controllers/ttlafterfinished.md +++ b/content/zh/docs/concepts/workloads/controllers/ttlafterfinished.md @@ -1,65 +1,54 @@ --- -title: 已完成资源的 TTL 控制器 +title: 已完成 Job 的自动清理 content_type: concept weight: 70 --- -{{< feature-state for_k8s_version="v1.21" state="beta" >}} +{{< feature-state for_k8s_version="v1.23" state="stable" >}} -TTL 控制器提供了一种 TTL 机制来限制已完成执行的资源对象的生命周期。 -TTL 控制器目前只处理 {{< glossary_tooltip text="Job" term_id="job" >}}, -可能以后会扩展以处理将完成执行的其他资源,例如 Pod 和自定义资源。 +TTL-after-finished {{}} 提供了一种 TTL 机制来限制已完成执行的资源对象的生命周期。 +TTL 控制器目前只处理 {{< glossary_tooltip text="Job" term_id="job" >}}。 - -此功能目前是 Beta 版而自动启用,并且可以通过 `kube-apiserver` 和 -`kube-controller-manager` 上的 -[特性门控](/zh/docs/reference/command-line-tools-reference/feature-gates/) -`TTLAfterFinished` 禁用。 -## TTL 控制器 +## TTL-after-finished 控制器 -TTL 控制器现在只支持 Job。集群操作员可以通过指定 Job 的 `.spec.ttlSecondsAfterFinished` +TTL-after-finished 控制器只支持 Job。集群操作员可以通过指定 Job 的 `.spec.ttlSecondsAfterFinished` 字段来自动清理已结束的作业(`Complete` 或 `Failed`),如 [示例](/zh/docs/concepts/workloads/controllers/job/#clean-up-finished-jobs-automatically) 所示。 -TTL 控制器假设资源能在执行完成后的 TTL 秒内被清理,也就是当 TTL 过期后。 -当 TTL 控制器清理资源时,它将做级联删除操作,即删除资源对象的同时也删除其依赖对象。 +TTL-after-finished 控制器假设作业能在执行完成后的 TTL 秒内被清理,也就是当 TTL 过期后。 +当 TTL 控制器清理作业时,它将做级联删除操作,即删除资源对象的同时也删除其依赖对象。 注意,当资源被删除时,由该资源的生命周期保证其终结器(Finalizers)等被执行。 -* 在资源清单(manifest)中指定此字段,以便 Job 在完成后的某个时间被自动清除。 -* 将此字段设置为现有的、已完成的资源,以采用此新功能。 -* 在创建资源时使用 [mutating admission webhook](/zh/docs/reference/access-authn-authz/extensible-admission-controllers/#admission-webhooks) - 动态设置该字段。集群管理员可以使用它对完成的资源强制执行 TTL 策略。 +* 在作业清单(manifest)中指定此字段,以便 Job 在完成后的某个时间被自动清除。 +* 将此字段设置为现有的、已完成的作业,以采用此新功能。 +* 在创建作业时使用 [mutating admission webhook](/zh/docs/reference/access-authn-authz/extensible-admission-controllers/#admission-webhooks) + 动态设置该字段。集群管理员可以使用它对完成的作业强制执行 TTL 策略。 * 使用 [mutating admission webhook](/zh/docs/reference/access-authn-authz/extensible-admission-controllers/#admission-webhooks) - 在资源完成后动态设置该字段,并根据资源状态、标签等选择不同的 TTL 值。 + 在作业完成后动态设置该字段,并根据作业状态、标签等选择不同的 TTL 值。 ## 警告 -### 更新 TTL 秒 +### 更新 TTL 秒数 -请注意,在创建资源或已经执行结束后,仍可以修改其 TTL 周期,例如 Job 的 +请注意,在创建 Job 或已经执行结束后,仍可以修改其 TTL 周期,例如 Job 的 `.spec.ttlSecondsAfterFinished` 字段。 但是一旦 Job 变为可被删除状态(当其 TTL 已过期时),即使您通过 API 增加其 TTL 时长得到了成功的响应,系统也不保证 Job 将被保留。 @@ -111,25 +100,21 @@ returns a successful API response. ### 时间偏差 {#time-skew} -由于 TTL 控制器使用存储在 Kubernetes 资源中的时间戳来确定 TTL 是否已过期, -因此该功能对集群中的时间偏差很敏感,这可能导致 TTL 控制器在错误的时间清理资源对象。 +由于 TTL-after-finished 控制器使用存储在 Kubernetes 资源中的时间戳来确定 TTL 是否已过期, +因此该功能对集群中的时间偏差很敏感,这可能导致 TTL-after-finished 控制器在错误的时间清理资源对象。 -在 Kubernetes 中,需要在所有节点上运行 NTP(参见 -[#6159](https://github.com/kubernetes/kubernetes/issues/6159#issuecomment-93844058)) -以避免时间偏差。时钟并不总是如此正确,但差异应该很小。 +时钟并不总是如此正确,但差异应该很小。 设置非零 TTL 时请注意避免这种风险。 ## {{% heading "whatsnext" %}} diff --git a/content/zh/docs/contribute/advanced.md b/content/zh/docs/contribute/advanced.md index 3a69c1f97f..5fe6df4098 100644 --- a/content/zh/docs/contribute/advanced.md +++ b/content/zh/docs/contribute/advanced.md @@ -44,7 +44,7 @@ the documentation, the website style, the processes for reviewing and merging pull requests, or other aspects of the documentation. For maximum transparency, these types of proposals need to be discussed in a SIG Docs meeting or on the [kubernetes-sig-docs mailing list](https://groups.google.com/forum/#!forum/kubernetes-sig-docs). -In addition, it can really help to have some context about the way things +In addition, it can help to have some context about the way things currently work and why past decisions have been made before proposing sweeping changes. The quickest way to get answers to questions about how the documentation currently works is to ask in the `#sig-docs` Slack channel on @@ -55,7 +55,7 @@ currently works is to ask in the `#sig-docs` Slack channel on 评审和合并 PR 的流程或者文档的其他方面产生改进的想法。 为了尽可能透明化,这些提议都需要在 SIG Docs 会议或 [kubernetes-sig-docs 邮件列表](https://groups.google.com/forum/#!forum/kubernetes-sig-docs)上讨论。 -此外,在提出全面的改进之前,这些讨论能真正帮助我们了解有关“当前工作如何运作”和“以往的决定是为何做出”的背景。 +此外,在提出全面的改进之前,这些讨论能帮助我们了解有关“当前工作如何运作”和“以往的决定是为何做出”的背景。 想了解文档的当前运作方式,最快的途径是咨询 [kubernetes.slack.com](https://kubernetes.slack.com) 中的 `#sig-docs` 聊天群组。 @@ -96,7 +96,7 @@ refer to The SIG Docs representative for a given release coordinates the following tasks: - Monitor the feature-tracking spreadsheet for new or changed features with an - impact on documentation. If documentation for a given feature won't be ready + impact on documentation. If the documentation for a given feature won't be ready for the release, the feature may not be allowed to go into the release. - Attend sig-release meetings regularly and give updates on the status of the docs for the release. @@ -151,19 +151,21 @@ SIG Docs [批准人(Approvers)](/zh/docs/contribute/participating/#approvers - 监听 [Kubernetes #sig-docs 频道](https://kubernetes.slack.com) 上新贡献者的 Issue。 -- 与 PR 管理者合作为新参与者寻找合适的第一个 issues。 +- 与 PR 管理者合作为新参与者寻找[合适的第一个 issues](https://kubernetes.dev/docs/guide/help-wanted/#good-first-issue) 。 - 通过前几个 PR 指导新贡献者为文档存储库作贡献。 - 帮助新的贡献者创建成为 Kubernetes 成员所需的更复杂的 PR。 - [为贡献者提供保荐](#sponsor-a-new-contributor),使其成为 Kubernetes 成员。 +- 每月召开一次会议,帮助和指导新的贡献者。 当前新贡献者大使将在每次 SIG 文档会议上以及 [Kubernetes #sig-docs 频道](https://kubernetes.slack.com)中宣布。 @@ -205,37 +207,37 @@ membership in the Kubernetes organization. ## 担任 SIG 联合主席 -SIG Docs [批准人(Approvers)](/zh/docs/contribute/participate/roles-and-responsibilities/#approvers) +SIG Docs [成员(Members)](/zh/docs/contribute/participate/roles-and-responsibilities/#members) 可以担任 SIG Docs 的联合主席。 ### 前提条件 -Approvers 必须满足以下要求才能成为联合主席: +Kubernetes 成员必须满足以下要求才能成为联合主席: -- 已维持 SIG Docs approver 身份至少 6 个月 -- [曾领导 Kubernetes 文档发布](/zh/docs/contribute/advanced/#coordinate-docs-for-a-kubernetes-release) - 或者在两个版本发布中有实习经历 - 理解 SIG Docs 工作流程和工具:git、Hugo、本地化、博客子项目 - 理解其他 Kubernetes SIG 和仓库会如何影响 SIG Docs 工作流程,包括: [k/org 中的团队](https://github.com/kubernetes/org/blob/master/config/kubernetes/sig-docs/teams.yaml)、 [k/community 中的流程](https://github.com/kubernetes/community/tree/master/sig-docs)、 [k/test-infra](https://github.com/kubernetes/test-infra/) 中的插件、 [SIG Architecture](https://github.com/kubernetes/community/tree/master/sig-architecture) 中的角色。 + 此外,了解 [Kubernetes 文档发布流程](/docs/contribute/advanced/#coordinate-docs-for-a-kubernetes-release) 的工作原理。 +- 由 SIG Docs 社区直接或通过惰性共识批准。 - 在至少 6 个月的时段内,确保每周至少投入 5 个小时(通常更多) 描述 | 网址 -----|----- @@ -500,7 +501,7 @@ Site strings | [All site strings](#Site-strings-in-i18n) in a new localized TOML 安装 | [所有标题和副标题网址](/zh/docs/setup/) 教程 | [Kubernetes 基础](/zh/docs/tutorials/kubernetes-basics/), [Hello Minikube](/zh/docs/tutorials/hello-minikube/) 网站字符串 | [所有网站字符串](#Site-strings-in-i18n) - +发行版本 | [所有标题和副标题 URL](/releases) @@ -616,6 +617,24 @@ Some language teams have their own language-specific style guide and glossary. F 一些语言团队有自己的特定语言样式指南和词汇表。 例如,请参见[中文本地化指南](/zh/docs/contribute/localization_zh/)。 + + +### 特定语言的 Zoom 会议 + +如果本地化项目需要单独的会议时间, +请联系 SIG Docs 联合主席或技术主管以创建新的重复 Zoom 会议和日历邀请。 +仅当团队维持在足够大的规模并需要单独的会议时才需要这样做。 + +根据 CNCF 政策,本地化团队必须将他们的会议上传到 SIG Docs YouTube 播放列表。 +SIG Docs 联合主席或技术主管可以帮助完成该过程,直到 SIG Docs 实现自动化。 + ## 官方支持的客户端库 @@ -71,19 +71,19 @@ client libraries: - [Kubernetes Python 语言客户端库](https://github.com/kubernetes-client/python) - [Kubernetes Java 语言客户端库](https://github.com/kubernetes-client/java) - [Kubernetes JavaScript 语言客户端库](https://github.com/kubernetes-client/javascript) -- [Kubernetes Dotnet 语言客户端库](https://github.com/kubernetes-client/csharp) +- [Kubernetes C# 语言客户端库](https://github.com/kubernetes-client/csharp) - [Kubernetes Haskell 语言客户端库](https://github.com/kubernetes-client/haskell) ## CLI -* [kubectl](/zh/docs/reference/kubectl/overview/) - 主要的 CLI 工具,用于运行命令和管理 Kubernetes 集群。 +* [kubectl](/zh/docs/reference/kubectl/) - 主要的 CLI 工具,用于运行命令和管理 Kubernetes 集群。 * [JSONPath](/zh/docs/reference/kubectl/jsonpath/) - 通过 kubectl 使用 [JSONPath 表达式](https://goessner.net/articles/JsonPath/) 的语法指南。 * [kubeadm](/zh/docs/reference/setup-tools/kubeadm/) - 此 CLI 工具可轻松配置安全的 Kubernetes 集群。 @@ -105,6 +105,8 @@ client libraries: * [Scheduler Policies](/docs/reference/scheduling/policies) * [Scheduler Profiles](/docs/reference/scheduling/config#profiles) + * List of [ports and protocols](/docs/reference/ports-and-protocols/) that + should be open on control plane and worker nodes --> ## 组件 @@ -121,6 +123,8 @@ client libraries: * [调度策略](/zh/docs/reference/scheduling/policies) * [调度配置](/zh/docs/reference/scheduling/config#profiles) + * 应该在控制平面和工作节点上打开的 [端口和协议](/zh/docs/reference/ports-and-protocols/) - + 列表 ## 配置 API @@ -144,14 +154,32 @@ operator to use or manage a cluster. 尽管这些 API 对于用户或操作者使用或管理集群来说是必不可少的, 它们大都没有以 RESTful 的方式在 API 服务器上公开。 -* [kubelet 配置 (v1beta1)](/zh/docs/reference/config-api/kubelet-config.v1beta1/) -* [kube-scheduler 配置 (v1beta1)](/zh/docs/reference/config-api/kube-scheduler-config.v1beta1/) -* [kube-scheduler 策略参考 (v1)](/zh/docs/reference/config-api/kube-scheduler-policy-config.v1/) +* [kube-apiserver 配置 (v1alpha1)](/zh/docs/reference/config-api/apiserver-config.v1alpha1/) +* [kube-apiserver 配置 (v1)](/zh/docs/reference/config-api/apiserver-config.v1/) +* [kube-apiserver 加密 (v1)](/zh/docs/reference/config-api/apiserver-encryption.v1/) +* [kubelet 配置 (v1alpha1)](/zh/docs/reference/config-api/kubelet-config.v1alpha1/) 和 + [kubelet 配置 (v1beta1)](/zh/docs/reference/config-api/kubelet-config.v1beta1/) +* [kubelet 凭据驱动 (v1alpha1)](/zh/docs/reference/config-api/kubelet-credentialprovider.v1alpha1/) +* [kube-scheduler 配置 (v1beta2)](/zh/docs/reference/config-api/kube-scheduler-config.v1beta2/) 和 + [kube-scheduler 配置 (v1beta3)](/zh/docs/reference/config-api/kube-scheduler-config.v1beta3/) * [kube-proxy 配置 (v1alpha1)](/zh/docs/reference/config-api/kube-proxy-config.v1alpha1/) * [`audit.k8s.io/v1` API](/zh/docs/reference/config-api/apiserver-audit.v1/) -* [客户端认证 API (v1beta1)](/zh/docs/reference/config-api/client-authentication.v1beta1/) +* [客户端认证 API (v1beta1)](/zh/docs/reference/config-api/client-authentication.v1beta1/) 和 + [客户端认证 API (v1)](/zh/docs/reference/config-api/client-authentication.v1/) * [WebhookAdmission 配置 (v1)](/zh/docs/reference/config-api/apiserver-webhookadmission.v1/) + + +## kubeadm 的配置 API + +* [v1beta2](/zh/docs/reference/config-api/kubeadm-config.v1beta2/) +* [v1beta3](/zh/docs/reference/config-api/kubeadm-config.v1beta3/) + 以下 HTTP 头部字段可用来执行伪装请求: @@ -1841,7 +1841,8 @@ Certificates)。 -作为一种可选方案,响应中还可以包含以 RFC3339 时间戳格式给出的证书到期时间。 +作为一种可选方案,响应中还可以包含以 +[RFC 3339](https://datatracker.ietf.org/doc/html/rfc3339) +时间戳格式给出的证书到期时间。 证书到期时间的有无会有如下影响: - 如果响应中包含了到期时间,持有者令牌和 TLS 凭据会被缓存,直到到期期限到来、 diff --git a/content/zh/docs/reference/access-authn-authz/bootstrap-tokens.md b/content/zh/docs/reference/access-authn-authz/bootstrap-tokens.md index 08b79a5d8d..0960a96716 100644 --- a/content/zh/docs/reference/access-authn-authz/bootstrap-tokens.md +++ b/content/zh/docs/reference/access-authn-authz/bootstrap-tokens.md @@ -113,7 +113,7 @@ controller on the controller manager. Each valid token is backed by a secret in the `kube-system` namespace. You can find the full design doc -[here](https://github.com/kubernetes/community/blob/{{< param "githubbranch" >}}/contributors/design-proposals/cluster-lifecycle/bootstrap-discovery.md). +[here](https://github.com/kubernetes/community/blob/master/contributors/design-proposals/cluster-lifecycle/bootstrap-discovery.md). Here is what the secret looks like. --> @@ -121,7 +121,7 @@ Here is what the secret looks like. 每个合法的令牌背后对应着 `kube-system` 名字空间中的某个 Secret 对象。 你可以从 -[这里](https://github.com/kubernetes/community/blob/{{< param "githubbranch" >}}/contributors/design-proposals/cluster-lifecycle/bootstrap-discovery.md). +[这里](https://github.com/kubernetes/community/blob/master/contributors/design-proposals/cluster-lifecycle/bootstrap-discovery.md) 找到完整设计文档。 这是 Secret 看起来的样子。 diff --git a/content/zh/docs/reference/access-authn-authz/certificate-signing-requests.md b/content/zh/docs/reference/access-authn-authz/certificate-signing-requests.md index cd82025d19..0fc00ced00 100644 --- a/content/zh/docs/reference/access-authn-authz/certificate-signing-requests.md +++ b/content/zh/docs/reference/access-authn-authz/certificate-signing-requests.md @@ -3,7 +3,7 @@ title: 证书签名请求 content_type: concept weight: 20 --- - -证书 API 支持 -[X.509](https://www.itu.int/rec/T-REC-X.509) +证书 API 支持 +[X.509](https://www.itu.int/rec/T-REC-X.509) 的自动化配置, 它为 Kubernetes API 的客户端提供一个编程接口, -用于从证书颁发机构(CA)请求并获取 X.509 +用于从证书颁发机构(CA)请求并获取 X.509 {{< glossary_tooltip term_id="certificate" text="证书" >}}。 CertificateSigningRequest(CSR)资源用来向指定的签名者申请证书签名, @@ -41,7 +41,7 @@ CertificateSigningRequest(CSR)资源用来向指定的签名者申请证书 - @@ -83,13 +83,13 @@ which tells the configured signer that it must not sign the request. 这就相当于通知了指定的签名者,这个证书不能签名。 @@ -100,8 +100,8 @@ The CertificateSigningRequest `status.certificate` field is empty until the sign 此时,字段 `status.certificate` 要么为空,要么包含一个用 PEM 编码的 X.509 证书。 直到签名完成前,CertificateSigningRequest 的字段 `status.certificate` 都为空。 - @@ -147,18 +147,18 @@ This includes: 以便客户端可以预期到他们的 CSR 将发生什么。 此类信息包括: - 1. **信任分发**:信任(CA 证书包)是如何分发的。 @@ -171,7 +171,7 @@ This includes: 以及签名者决定的过期时间与 CSR `spec.expirationSeconds` 字段不同时的应对手段。 6. **允许/不允许 CA 位**:当 CSR 包含一个签名者并不允许的 CA 证书的请求时,相应的应对手段。 - ### 创建 CertificateSigningRequest {#create-certificatesigningrequest} @@ -480,7 +480,7 @@ spec: EOF ``` - 批准 CSR: @@ -521,7 +521,7 @@ Approve the CSR: kubectl certificate approve myuser ``` - 下面是为这个新用户创建 RoleBinding 的示例命令: @@ -576,7 +576,7 @@ This is a sample command to create a RoleBinding for this new user: kubectl create rolebinding developer-binding-myuser --role=developer --user=myuser ``` - 然后,你需要添加上下文: @@ -604,7 +604,7 @@ Then, you need to add the context: kubectl config set-context myuser --cluster=kubernetes --user=myuser ``` - 来测试一下,把上下文切换为 `myuser`: @@ -613,7 +613,7 @@ To test it, change the context to `myuser`: kubectl config use-context myuser ``` - 同样地,驳回一个 CSR: @@ -663,7 +663,7 @@ Likewise, to deny a CSR: kubectl certificate deny ``` - 驳回(`Denied`)的 CRS: @@ -717,7 +717,7 @@ status: type: Denied ``` - -* 对于基于 TLS 的启动引导过程时使用的 certificationsigningrequests API 的读/写权限 -* 为委派的身份验证/授权检查创建 tokenreviews 和 subjectaccessreviews 的能力 +* 对于基于 TLS 的启动引导过程时使用的 + [certificationsigningrequests API](/zh/docs/reference/access-authn-authz/certificate-signing-requests/) + 的读/写权限 +* 为委派的身份验证/授权检查创建 TokenReview 和 SubjectAccessReview 的能力 在将来的版本中,节点鉴权器可能会添加或删除权限,以确保 kubelet 具有正确操作所需的最小权限集。 diff --git a/content/zh/docs/reference/access-authn-authz/webhook.md b/content/zh/docs/reference/access-authn-authz/webhook.md index 8cfdc91592..0ef1e6a18d 100644 --- a/content/zh/docs/reference/access-authn-authz/webhook.md +++ b/content/zh/docs/reference/access-authn-authz/webhook.md @@ -266,6 +266,7 @@ to the REST api. -更多信息可以参考 authorization.v1beta1 API 对象和[webhook.go](https://github.com/kubernetes/kubernetes/blob/{{< param "githubbranch" >}}/staging/src/k8s.io/apiserver/plugin/pkg/authorizer/webhook/webhook.go)。 +更多信息可以参考 authorization.v1beta1 API 对象和 [webhook.go](https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/apiserver/plugin/pkg/authorizer/webhook/webhook.go)。 + diff --git a/content/zh/docs/reference/kubectl/conventions.md b/content/zh/docs/reference/kubectl/conventions.md index d8f372086e..bddbec7e03 100644 --- a/content/zh/docs/reference/kubectl/conventions.md +++ b/content/zh/docs/reference/kubectl/conventions.md @@ -70,58 +70,6 @@ You can use the `--dry-run=client` flag to preview the object that would be sent --> 你可以使用 `--dry-run=client` 参数来预览而不真正提交即将下发到集群的对象实例: -{{< note >}} - -所有的 `kubectl run` 生成器已弃用。 -查阅 Kubernetes v1.17 文档中的生成器[列表](https://v1-17.docs.kubernetes.io/docs/reference/kubectl/conventions/#generators)以及它们的用法。 -{{< /note >}} - - -#### 生成器 - -你可以使用 kubectl 命令生成以下资源, `kubectl create --dry-run=client -o yaml`: - -* `clusterrole`: 创建 ClusterRole。 -* `clusterrolebinding`: 为特定的 ClusterRole 创建 ClusterRoleBinding。 -* `configmap`: 使用本地文件、目录或文本值创建 Configmap。 -* `cronjob`: 使用指定的名称创建 Cronjob。 -* `deployment`: 使用指定的名称创建 Deployment。 -* `job`: 使用指定的名称创建 Job。 -* `namespace`: 使用指定的名称创建名称空间。 -* `poddisruptionbudget`: 使用指定名称创建 Pod 干扰预算。 -* `priorityclass`: 使用指定的名称创建 Priorityclass。 -* `quota`: 使用指定的名称创建配额。 -* `role`: 使用单一规则创建角色。 -* `rolebinding`: 为特定角色或 ClusterRole 创建 RoleBinding。 -* `secret`: 使用指定的子命令创建 Secret。 -* `service`: 使用指定的子命令创建服务。 -* `serviceaccount`: 使用指定的名称创建服务帐户。 - - ### `kubectl apply` diff --git a/content/zh/docs/setup/learning-environment/_index.md b/content/zh/docs/setup/learning-environment/_index.md index f68e52a439..e7d409c1b6 100644 --- a/content/zh/docs/setup/learning-environment/_index.md +++ b/content/zh/docs/setup/learning-environment/_index.md @@ -11,41 +11,3 @@ weight: 20 {{/* If you're localizing this page, you only need to copy the front matter */}} {{/* and add a redirect into "/static/_redirects", for YOUR localization. */}} --> - -## kind - - -你可以使用 [`kind`](https://kind.sigs.k8s.io/docs/) 来在本地计算机上运行 Kubernetes。 -此工具要求你已经安装并配置了 [Docker](https://docs.docker.com/get-docker/)。 - -kind [快速入门](https://kind.sigs.k8s.io/docs/user/quick-start/)页面 -为你展示了如何开始使用 kind 的相关信息。 - -## minikube - - -与 `kind` 类似,[`minikube`](https://minikube.sigs.k8s.io/) 是一个允许你在 -本地运行 Kubernetes 的工具。`minikube` 在你的个人计算机上运行一个单节点的 -Kubernetes 集群(包括 Windows、macOS 和 Linux PC 机),这样你可以尝试 -Kubernetes 或者执行每天的开发工作。 - -如果你所关注的是如何安装该工具,可以查阅官方的 -[Get Started!](https://minikube.sigs.k8s.io/docs/start/) -文档。 - diff --git a/content/zh/docs/tasks/access-application-cluster/access-cluster.md b/content/zh/docs/tasks/access-application-cluster/access-cluster.md index 706afa0779..1dd18f0c90 100644 --- a/content/zh/docs/tasks/access-application-cluster/access-cluster.md +++ b/content/zh/docs/tasks/access-application-cluster/access-cluster.md @@ -1,4 +1,4 @@ ---- +--- title: 访问集群 weight: 20 content_type: concept @@ -50,10 +50,10 @@ kubectl config view 有许多 [例子](/zh/docs/reference/kubectl/cheatsheet/) 介绍了如何使用 kubectl, -可以在 [kubectl手册](/zh/docs/reference/kubectl/overview/) 中找到更完整的文档。 +可以在 [kubectl 参考](/zh/docs/reference/kubectl/overview/) 中找到更完整的文档。 + ### 不使用 kubectl proxy 在 Kubernetes 1.3 或更高版本中,`kubectl config view` 不再显示 token。 -使用 `kubectl describe secret ...` 来获取默认服务帐户的 token,如下所示: - +使用 `kubectl apply` 和 `kubectl describe secret ...` 及 grep 和剪切操作来为 default 服务帐户创建令牌,如下所示: `grep/cut` 方法实现: +首先,创建 Secret,请求默认 ServiceAccount 的令牌: +```shell +kubectl apply -f - < +接下来,等待令牌控制器使用令牌填充 Secret: +```shell +while ! kubectl describe secret default-token | grep -E '^token' >/dev/null; do + echo "waiting for token..." >&2 + sleep 1 +done +``` + +捕获并使用生成的令牌: ```shell APISERVER=$(kubectl config view | grep server | cut -f 2- -d ":" | tr -d " ") -TOKEN=$(kubectl describe secret $(kubectl get secrets | grep default | cut -f1 -d ' ') | grep -E '^token' | cut -f2 -d':' | tr -d ' ') +TOKEN=$(kubectl describe secret default-token | grep -E '^token' | cut -f2 -d':' | tr -d ' ') curl $APISERVER/api --header "Authorization: Bearer $TOKEN" --insecure ``` ```json @@ -172,7 +201,7 @@ curl $APISERVER/api --header "Authorization: Bearer $TOKEN" --insecure ```shell APISERVER=$(kubectl config view --minify -o jsonpath='{.clusters[0].cluster.server}') -TOKEN=$(kubectl get secret $(kubectl get serviceaccount default -o jsonpath='{.secrets[0].name}') -o jsonpath='{.data.token}' | base64 --decode ) +TOKEN=$(kubectl get secret default-token -o jsonpath='{.data.token}' | base64 --decode ) curl $APISERVER/api --header "Authorization: Bearer $TOKEN" --insecure ``` diff --git a/content/zh/docs/tasks/access-application-cluster/web-ui-dashboard.md b/content/zh/docs/tasks/access-application-cluster/web-ui-dashboard.md index 70becf1797..e19888913f 100644 --- a/content/zh/docs/tasks/access-application-cluster/web-ui-dashboard.md +++ b/content/zh/docs/tasks/access-application-cluster/web-ui-dashboard.md @@ -9,7 +9,9 @@ card: --- #### 手动构建 API 服务器代理 URLs {#manually-constructing-apiserver-proxy-urls} @@ -160,6 +170,15 @@ If you haven't specified a name for your port, you don't have to specify *port_n `http://`*`kubernetes_master_address`*`/api/v1/namespaces/`*`namespace_name`*`/services/`*`service_name[:port_name]`*`/proxy` 如果还没有为你的端口指定名称,你可以不用在 URL 中指定 *port_name*。 +对于命名和未命名端口,你还可以使用端口号代替 *port_name*。 + +默认情况下,API 服务器使用 HTTP 为你的服务提供代理。 要使用 HTTPS,请在服务名称前加上 `https:`: +`http:///api/v1/namespaces//services//proxy` +URL 的 `` 段支持的格式为: +* `` - 使用 http 代理到默认或未命名端口 +* `:` - 使用 http 代理到指定的端口名称或端口号 +* `https::` - 使用 https 代理到默认或未命名端口(注意尾随冒号) +* `https::` - 使用 https 代理到指定的端口名称或端口号 +## 升级 etcd 集群 +有关 etcd 升级的更多详细信息,请参阅 [etcd 升级](https://etcd.io/docs/latest/upgrades/)文档。 +{{< note >}} +在开始升级之前,请先备份你的 etcd 集群。 +{{< /note >}} + diff --git a/content/zh/docs/tasks/administer-cluster/kubeadm/adding-windows-nodes.md b/content/zh/docs/tasks/administer-cluster/kubeadm/adding-windows-nodes.md index bce123e306..4e08ce2c60 100644 --- a/content/zh/docs/tasks/administer-cluster/kubeadm/adding-windows-nodes.md +++ b/content/zh/docs/tasks/administer-cluster/kubeadm/adding-windows-nodes.md @@ -289,10 +289,10 @@ Install-WindowsFeature -Name containers ``` 安装 Docker -操作指南在 [Install Docker Engine - Enterprise on Windows Servers](https://hub.docker.com/editions/enterprise/docker-ee-server-windows)。 +操作指南在 [Install Docker Engine - Enterprise on Windows Servers](https://docs.microsoft.com/en-us/virtualization/windowscontainers/quick-start/set-up-environment?tabs=Windows-Server#install-docker)。 -标志设置的值是一个按 NUMA 节点所给的内存预留的值的列表,用逗号分开。 +标志设置的值是一个按 NUMA 节点的不同内存类型所给的内存预留的值的列表,用逗号分开。 +可以使用分号作为分隔符来指定跨多个 NUMA 节点的内存预留。 只有在内存管理器特性被启用的语境下,这个参数才有意义。 内存管理器不会使用这些预留的内存来为容器负载分配内存。 @@ -426,7 +428,7 @@ Here is an example of a correct configuration: --kube-reserved=cpu=4,memory=4Gi --system-reserved=cpu=1,memory=1Gi --memory-manager-policy=Static ---reserved-memory 0:memory=3Gi --reserved-memory 1:memory=2148Mi +--reserved-memory '0:memory=3Gi;1:memory=2148Mi' ``` 自从 Kubernetes 1.20 宣布 [弃用 dockershim](/zh/blog/2020/12/08/kubernetes-1-20-release-announcement/#dockershim-deprecation), 各类疑问随之而来:这对各类工作负载和 Kubernetes 部署会产生什么影响。 -你会发现这篇博文对于更好地理解此问题非常有用: -[弃用 Dockershim 常见问题](/zh/blog/2020/12/02/dockershim-faq/) +我们的[弃用 Dockershim 常见问题](/blog/2022/02/17/dockershim-faq/)可以帮助你更好地理解这个问题。 本页讲解你的集群把 Docker 用作容器运行时的运作机制, 并提供使用 `dockershim` 时,它所扮演角色的详细信息, -继而展示了一组验证步骤,可用来检查弃用 `dockershim` 对你的工作负载的影响。 +继而展示了一组操作,可用来检查弃用 `dockershim` 对你的工作负载是否有影响。 -虽然你通过 Docker 创建了应用容器,但这些容器却可以运行于所有容器运行时。 -所以这种使用 Docker 容器运行时的方式并不构成对 Docker 的依赖。 +即使你是通过 Docker 创建的应用容器,也不妨碍你在其他任何容器运行时上运行这些容器。 +这种使用 Docker 的方式并不构成对 Docker 作为一个容器运行时的依赖。 -当用了替代的容器运行时之后,Docker 命令可能不工作,甚至产生意外的输出。 -这才是判定你是否依赖于 Docker 的方法。 +当用了别的容器运行时之后,Docker 命令可能不工作,或者产生意外的输出。 +下面是判定你是否依赖于 Docker 的方法。 1. 确认没有特权 Pod 执行 Docker 命令(如 `docker ps`)、重新启动 Docker - 服务(如 `systemctl restart docker.service`)或修改 - Docker 配置文件 `/etc/docker/daemon.json`。 + 服务(如 `systemctl restart docker.service`)或修改 Docker 配置文件 + `/etc/docker/daemon.json`。 2. 检查 Docker 配置文件(如 `/etc/docker/daemon.json`)中容器镜像仓库的镜像(mirror)站点设置。 这些配置通常需要针对不同容器运行时来重新设置。 -3. 检查确保在 Kubernetes 基础设施之外的节点上运行的脚本和应用程序没有执行Docker命令。 +3. 检查确保在 Kubernetes 基础设施之外的节点上运行的脚本和应用程序没有执行 Docker 命令。 可能的情况如: - SSH 到节点排查故障; - 节点启动脚本; - 直接安装在节点上的监控和安全代理。 -4. 检查执行上述特权操作的第三方工具。详细操作请参考: - [从 dockershim 迁移遥测和安全代理](/zh/docs/tasks/administer-cluster/migrating-from-dockershim/migrating-telemetry-and-security-agents) +4. 检查执行上述特权操作的第三方工具。详细操作请参考 + [从 dockershim 迁移遥测和安全代理](/zh/docs/tasks/administer-cluster/migrating-from-dockershim/migrating-telemetry-and-security-agents)。 5. 确认没有对 dockershim 行为的间接依赖。这是一种极端情况,不太可能影响你的应用。 - 一些工具很可能被配置为使用了 Docker 特性,比如,基于特定指标发警报,或者在故障排查指令的一个环节中搜索特定的日志信息。 - 如果你有此类配置的工具,需要在迁移之前,在测试集群上完成功能验证。 + 一些工具很可能被配置为使用了 Docker 特性,比如,基于特定指标发警报, + 或者在故障排查指令的一个环节中搜索特定的日志信息。 + 如果你有此类配置的工具,需要在迁移之前,在测试集群上测试这类行为。 [容器运行时](/zh/docs/concepts/containers/#container-runtimes)是一个软件,用来运行组成 Kubernetes Pod 的容器。 -Kubernetes 负责编排和调度 Pod;在每一个节点上, -{{< glossary_tooltip text="kubelet" term_id="kubelet" >}} +Kubernetes 负责编排和调度 Pod;在每一个节点上,{{< glossary_tooltip text="kubelet" term_id="kubelet" >}} 使用抽象的容器运行时接口,所以你可以任意选用兼容的容器运行时。 你可以阅读博文 -[Kubernetes 容器集成功能的正式发布](/zh/blog/2018/05/24/kubernetes-containerd-integration-goes-ga/) +[Kubernetes 正式支持集成 Containerd](/zh/blog/2018/05/24/kubernetes-containerd-integration-goes-ga/)。 ![Dockershim 和 Containerd CRI 的实现对比图](/images/blog/2018-05-24-kubernetes-containerd-integration-goes-ga/cri-containerd.png) @@ -138,8 +138,8 @@ So any Docker tooling or fancy UI you might have used before to check on these containers is no longer available. --> 切换到容器运行时 Containerd 可以消除掉中间环节。 -所有以前遗留的容器可由 Containerd 这类容器运行时来运行和管理,操作体验也和以前一样。 -但是现在,由于直接用容器运行时调度容器,所以它们对 Docker 来说是不可见的。 +所有相同的容器都可由 Containerd 这类容器运行时来运行。 +但是现在,由于直接用容器运行时调度容器,它们对 Docker 是不可见的。 因此,你以前用来检查这些容器的 Docker 工具或漂亮的 UI 都不再可用。 {{< note >}} - -如果你用 Kubernetes 运行工作负载,最好通过 Kubernetes API停止容器,而不是通过容器运行时 +如果你在用 Kubernetes 运行工作负载,最好通过 Kubernetes API 停止容器, +而不是通过容器运行时来停止它们 (此建议适用于所有容器运行时,不仅仅是针对 Docker)。 - {{< /note >}} 你仍然可以下载镜像,或者用 `docker build` 命令创建它们。 但用 Docker 创建、下载的镜像,对于容器运行时和 Kubernetes,均不可见。 -为了在 Kubernetes 中使用,需要把镜像推送(push)到某注册中心。 +为了在 Kubernetes 中使用,需要把镜像推送(push)到某镜像仓库。 + diff --git a/content/zh/docs/tasks/configure-pod-container/create-hostprocess-pod.md b/content/zh/docs/tasks/configure-pod-container/create-hostprocess-pod.md index 01116183e7..61942b93fc 100644 --- a/content/zh/docs/tasks/configure-pod-container/create-hostprocess-pod.md +++ b/content/zh/docs/tasks/configure-pod-container/create-hostprocess-pod.md @@ -46,7 +46,7 @@ as Windows server containers, meaning that the version of the base images does n to match that of the host. It is, however, recommended that you use the same base image version as your Windows Server container workloads to ensure you do not have any unused images taking up space on the node. HostProcess containers also support -[volume mounts](./create-hostprocess-pod#volume-mounts) within the container volume. +[volume mounts](#volume-mounts) within the container volume. --> 类似于安装安全补丁、事件日志收集等这类管理性质的任务可以在不需要集群操作员登录到每个 Windows 节点的前提下执行。HostProcess 容器可以以主机上存在的任何用户账户来运行, @@ -58,7 +58,7 @@ Windows 节点的前提下执行。HostProcess 容器可以以主机上存在的 这意味着基础镜像的版本不必与主机操作系统的版本匹配。 不过,仍然建议你像使用 Windows 服务器容器负载那样,使用相同的基础镜像版本, 这样你就不会有一些未使用的镜像占用节点上的存储空间。HostProcess 容器也支持 -在容器卷内执行[卷挂载](./create-hostprocess-pod#volume-mounts)。 +在容器卷内执行[卷挂载](#volume-mounts)。 - HostProcess 容器需要 containerd 1.6 或更高版本的 {{< glossary_tooltip text="容器运行时" term_id="container-runtime" >}}。 @@ -145,13 +145,14 @@ filesystem or Hyper-V isolation are supported for HostProcess containers. 用户账号所实施的资源约束外,不提供任何形式的隔离。HostProcess 容器不支持文件系统或 Hyper-V 隔离。 - 卷挂载是被支持的,并且要花在到容器卷下。参见[卷挂载](#volume-mounts)。 - 默认情况下有一组主机用户账户可供 HostProcess 容器使用。 diff --git a/content/zh/docs/tasks/configure-pod-container/security-context.md b/content/zh/docs/tasks/configure-pod-container/security-context.md index a131d450c1..7a226f37f9 100644 --- a/content/zh/docs/tasks/configure-pod-container/security-context.md +++ b/content/zh/docs/tasks/configure-pod-container/security-context.md @@ -1,5 +1,5 @@ --- -title: 为 Pod 或容器配置安全性上下文 +title: 为 Pod 或容器配置安全上下文 content_type: task weight: 80 --- @@ -21,7 +21,8 @@ a Pod or Container. Security context settings include, but are not limited to: * Discretionary Access Control: Permission to access an object, like a file, is based on [user ID (UID) and group ID (GID)](https://wiki.archlinux.org/index.php/users_and_groups). -* [Security Enhanced Linux (SELinux)](https://en.wikipedia.org/wiki/Security-Enhanced_Linux): Objects are assigned security labels. +* [Security Enhanced Linux (SELinux)](https://en.wikipedia.org/wiki/Security-Enhanced_Linux): + Objects are assigned security labels. * Running as privileged or unprivileged. * [Linux Capabilities](https://linux-audit.com/linux-capabilities-hardening-linux-binaries-by-removing-setuid/): Give a process some privileges, but not all the privileges of the root user. @@ -29,8 +30,8 @@ a Pod or Container. Security context settings include, but are not limited to: 安全上下文(Security Context)定义 Pod 或 Container 的特权与访问控制设置。 安全上下文包括但不限于: -* 自主访问控制(Discretionary Access Control):基于 - [用户 ID(UID)和组 ID(GID)](https://wiki.archlinux.org/index.php/users_and_groups). +* 自主访问控制(Discretionary Access Control): + 基于[用户 ID(UID)和组 ID(GID)](https://wiki.archlinux.org/index.php/users_and_groups) 来判定对对象(例如文件)的访问权限。 * [安全性增强的 Linux(SELinux)](https://zh.wikipedia.org/wiki/%E5%AE%89%E5%85%A8%E5%A2%9E%E5%BC%BA%E5%BC%8FLinux): 为对象赋予安全性标签。 @@ -38,21 +39,31 @@ a Pod or Container. Security context settings include, but are not limited to: * [Linux 权能](https://linux-audit.com/linux-capabilities-hardening-linux-binaries-by-removing-setuid/): 为进程赋予 root 用户的部分特权而非全部特权。 -* [AppArmor](/zh/docs/tutorials/clusters/apparmor/):使用程序框架来限制个别程序的权能。 -* [Seccomp](/zh/docs/tutorials/clusters/seccomp/):过滤进程的系统调用。 -* AllowPrivilegeEscalation:控制进程是否可以获得超出其父进程的特权。 +* [AppArmor](/zh/docs/tutorials/security/apparmor/):使用程序配置来限制个别程序的权能。 +* [Seccomp](/zh/docs/tutorials/security/seccomp/):过滤进程的系统调用。 +* `allowPrivilegeEscalation`:控制进程是否可以获得超出其父进程的特权。 此布尔值直接控制是否为容器进程设置 [`no_new_privs`](https://www.kernel.org/doc/Documentation/prctl/no_new_privs.txt)标志。 - 当容器以特权模式运行或者具有 `CAP_SYS_ADMIN` 权能时,AllowPrivilegeEscalation 总是为 true。 + 当容器满足一下条件之一时,`allowPrivilegeEscalation` 总是为 true: + + - 以特权模式运行,或者 + - 具有 `CAP_SYS_ADMIN` 权能 + * readOnlyRootFilesystem:以只读方式加载容器的根文件系统。 +输出类似于: + +```none uid=1000 gid=3000 groups=2000 ``` -你会看到 `gid` 值为 3000,也就是 `runAsGroup` 字段的值。 +从输出中你会看到 `gid` 值为 3000,也就是 `runAsGroup` 字段的值。 如果 `runAsGroup` 被忽略,则 `gid` 会取值 0(root),而进程就能够与 root 用户组所拥有以及要求 root 用户组访问权限的文件交互。 @@ -251,18 +267,21 @@ slowing Pod startup. You can use the `fsGroupChangePolicy` field inside a `secur to control the way that Kubernetes checks and manages ownership and permissions for a volume. --> -默认情况下,Kubernetes 在挂载一个卷时,会递归地更改每个卷中的内容的属主和访问权限,使之与 Pod -的 `securityContext` 中指定的 `fsGroup` 匹配。 +默认情况下,Kubernetes 在挂载一个卷时,会递归地更改每个卷中的内容的属主和访问权限, +使之与 Pod 的 `securityContext` 中指定的 `fsGroup` 匹配。 对于较大的数据卷,检查和变更属主与访问权限可能会花费很长时间,降低 Pod 启动速度。 你可以在 `securityContext` 中使用 `fsGroupChangePolicy` 字段来控制 Kubernetes 检查和管理卷属主和访问权限的方式。 {{< note >}} -此字段对于[`secret`](/zh/docs/concepts/storage/volumes/#secret)、 +此字段对于 [`secret`](/zh/docs/concepts/storage/volumes/#secret)、 [`configMap`](/zh/docs/concepts/storage/volumes/#configmap) 和 [`emptydir`](/zh/docs/concepts/storage/volumes/#emptydir) 这类临时性存储无效。 @@ -316,23 +335,24 @@ ownership and permission change, `fsGroupChangePolicy` does not take effect, and as specified by CSI, the driver is expected to mount the volume with the provided `fsGroup`, resulting in a volume that is readable/writable by the `fsGroup`. +--> +如果你部署了一个[容器存储接口 (CSI)](https://github.com/container-storage-interface/spec/blob/master/spec.md) +驱动,而该驱动支持 `VOLUME_MOUNT_GROUP` `NodeServiceCapability`, +在 `securityContext` 中指定 `fsGroup` 来设置文件所有权和权限的过程将由 CSI +驱动而不是 Kubernetes 来执行,前提是 Kubernetes 的 `DelegateFSGroupToCSIDriver` +特性门控已启用。在这种情况下,由于 Kubernetes 不执行任何所有权和权限更改, +`fsGroupChangePolicy` 不会生效,并且按照 CSI 的规定,CSI 驱动应该使用所指定的 +`fsGroup` 来挂载卷,从而生成了一个对 `fsGroup` 可读/可写的卷. + -如果你部署了一个[容器存储接口 (CSI)](https://github.com/container-storage-interface/spec/blob/master/spec.md) -驱动支持 `VOLUME_MOUNT_GROUP` `NodeServiceCapability`, -在 `securityContext` 中指定 `fsGroup` 来设置文件所有权和权限的过程将由 CSI 驱动 -而不是 Kubernetes 来执行,前提是 Kubernetes 的 `DelegateFSGroupToCSIDriver` -特性门控已启用。在这种情况下,由于 Kubernetes 不执行任何 -所有权和权限更改,`fsGroupChangePolicy` 不会生效,并且 -按照 CSI 的规定,CSI 驱动应该使用所指定的 `fsGroup` 来挂载卷,从而生成了一个对 `fsGroup` 可读/可写的卷. - 更多的信息请参考 [KEP](https://github.com/gnufied/enhancements/blob/master/keps/sig-storage/2317-fsgroup-on-mount/README.md) -和 [CSI 规范](https://github.com/container-storage-interface/spec/blob/master/spec.md#createvolume) 中的字 -段 `VolumeCapability.MountVolume.volume_mount_group` 的描述 。 +和 [CSI 规范](https://github.com/container-storage-interface/spec/blob/master/spec.md#createvolume) +中的字段 `VolumeCapability.MountVolume.volume_mount_group` 的描述。 -输出显示进程以用户 2000 账号运行。该值是在 Container 的 `runAsUser` 中设置的。 +输出显示进程以用户 2000 运行。该值是在 Container 的 `runAsUser` 中设置的。 该设置值重载了 Pod 层面所设置的值 1000。 ``` @@ -434,12 +453,12 @@ Here is configuration file that does not add or remove any Container capabilitie --> ## 为 Container 设置权能 {#set-capabilities-for-a-container} -使用 [Linux 权能](https://man7.org/linux/man-pages/man7/capabilities.7.html),你可以 -赋予进程 root 用户所拥有的某些特权,但不必赋予其全部特权。 -要为 Container 添加或移除 Linux 权能,可以在 Container 清单的 `securityContext` 节 -包含 `capabilities` 字段。 +使用 [Linux 权能](https://man7.org/linux/man-pages/man7/capabilities.7.html), +你可以赋予进程 root 用户所拥有的某些特权,但不必赋予其全部特权。 +要为 Container 添加或移除 Linux 权能,可以在 Container 清单的 `securityContext` +节包含 `capabilities` 字段。 -首先,查看不包含 `capabilities` 字段时候会发生什么。 +首先,看一下不包含 `capabilities` 字段时候会发生什么。 下面是一个配置文件,其中没有添加或移除容器的权能: {{< codenew file="pods/security/security-context-3.yaml" >}} @@ -598,12 +617,15 @@ for definitions of the capability constants. 了解权能常数的定义。 {{< note >}} Linux 权能常数定义的形式为 `CAP_XXX`。但是你在 Container 清单中列举权能时, -要将权能名称中的 `CAP_` 部分去掉。例如,要添加 `CAP_SYS_TIME`,可在权能 -列表中添加 `SYS_TIME`。 +要将权能名称中的 `CAP_` 部分去掉。例如,要添加 `CAP_SYS_TIME`, +可在权能列表中添加 `SYS_TIME`。 {{< /note >}} -## 为容器设置 Seccomp 样板 +## 为容器设置 Seccomp 配置 -若要为容器设置 Seccomp 样板(Profile),可在你的 Pod 或 Container 清单的 +若要为容器设置 Seccomp 配置(Profile),可在你的 Pod 或 Container 清单的 `securityContext` 节中包含 `seccompProfile` 字段。该字段是一个 [SeccompProfile](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#seccompprofile-v1-core) 对象,包含 `type` 和 `localhostProfile` 属性。 `type` 的合法选项包括 `RuntimeDefault`、`Unconfined` 和 `Localhost`。 -`localhostProfile` 只能在 `type: Localhost` 配置下才需要设置。 -该字段标明节点上预先配置的样板的路径,路径是相对于 kubelet 所配置的 -Seccomp 样板路径(使用 `--root-dir` 配置)而言的。 +`localhostProfile` 只能在 `type: Localhost` 配置下才可以设置。 +该字段标明节点上预先设定的配置的路径,路径是相对于 kubelet 所配置的 +Seccomp 配置路径(使用 `--root-dir` 设置)而言的。 -下面是一个例子,设置容器使用节点上容器运行时的默认样板作为 Seccomp 样板: +下面是一个例子,设置容器使用节点上容器运行时的默认配置作为 Seccomp 配置: ```yaml ... @@ -704,15 +726,15 @@ Pod 的安全上下文适用于 Pod 中的容器,也适用于 Pod 所挂载的 * `fsGroup`:支持属主管理的卷会被修改,将其属主变更为 `fsGroup` 所指定的 GID, 并且对该 GID 可写。进一步的细节可参阅 @@ -763,9 +785,9 @@ kubectl delete pod security-context-demo-4 --> * [PodSecurityContext](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#podsecuritycontext-v1-core) API 定义 * [SecurityContext](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#securitycontext-v1-core) API 定义 -* [使用最新的安全性增强来调优 Docker](https://github.com/containerd/containerd/blob/main/docs/cri/config.md) -* [安全性上下文的设计文档](https://git.k8s.io/community/contributors/design-proposals/auth/security_context.md) -* [属主管理的设计文档](https://git.k8s.io/community/contributors/design-proposals/storage/volume-ownership-management.md) +* [使用最新的安全性增强来调优 Docker(英文)](https://github.com/containerd/containerd/blob/main/docs/cri/config.md) +* [安全上下文的设计文档(英文)](https://git.k8s.io/community/contributors/design-proposals/auth/security_context.md) +* [属主管理的设计文档(英文)](https://git.k8s.io/community/contributors/design-proposals/storage/volume-ownership-management.md) * [Pod 安全策略](/zh/docs/concepts/policy/pod-security-policy/) -* [AllowPrivilegeEscalation 的设计文档](https://git.k8s.io/community/contributors/design-proposals/auth/no-new-privs.md) +* [AllowPrivilegeEscalation 的设计文档(英文)](https://git.k8s.io/community/contributors/design-proposals/auth/no-new-privs.md) diff --git a/content/zh/docs/tasks/inject-data-application/define-command-argument-container.md b/content/zh/docs/tasks/inject-data-application/define-command-argument-container.md index 6688e6fab1..3372c8ca26 100644 --- a/content/zh/docs/tasks/inject-data-application/define-command-argument-container.md +++ b/content/zh/docs/tasks/inject-data-application/define-command-argument-container.md @@ -46,8 +46,7 @@ with your new arguments. 如果在配置文件中设置了容器启动时要执行的命令及其参数,那么容器镜像中自带的命令与参数将会被覆盖而不再执行。如果配置文件中只是设置了参数,却没有设置其对应的命令,那么容器镜像中自带的命令会使用该新参数作为其执行时的参数。 {{< note >}} 在有些容器运行时中,`command` 字段对应 `entrypoint`,请参阅下面的 @@ -161,73 +160,6 @@ command: ["/bin/sh"] args: ["-c", "while true; do echo hello; sleep 10;done"] ``` - -## 说明事项 {#notes} - -下表给出了 Docker 与 Kubernetes 中对应的字段名称。 - -| 描述 | Docker 字段名称 | Kubernetes 字段名称 | -|--------------------|--------------------|-----------------------| -| 容器执行的命令 | Entrypoint | command | -| 传给命令的参数 | Cmd | args | - - -如果要覆盖默认的 Entrypoint 与 Cmd,需要遵循如下规则: - -* 如果在容器配置中没有设置 `command` 或者 `args`,那么将使用 Docker 镜像自带的命令及其参数。 - -* 如果在容器配置中只设置了 `command` 但是没有设置 `args`,那么容器启动时只会执行该命令, - Docker 镜像中自带的命令及其参数会被忽略。 - -* 如果在容器配置中只设置了 `args`,那么 Docker 镜像中自带的命令会使用该新参数作为其执行时的参数。 - -* 如果在容器配置中同时设置了 `command` 与 `args`,那么 Docker 镜像中自带的命令及其参数会被忽略。 - 容器启动时只会执行配置中设置的命令,并使用配置中设置的参数作为命令的参数。 - - -下面是一些例子: - -| 镜像 Entrypoint | 镜像 Cmd | 容器 command | 容器 args | 命令执行 | -|--------------------|------------------|---------------------|--------------------|------------------| -| `[/ep-1]` | `[foo bar]` | <not set> | <not set> | `[ep-1 foo bar]` | -| `[/ep-1]` | `[foo bar]` | `[/ep-2]` | <not set> | `[ep-2]` | -| `[/ep-1]` | `[foo bar]` | <not set> | `[zoo boo]` | `[ep-1 zoo boo]` | -| `[/ep-1]` | `[foo bar]` | `[/ep-2]` | `[zoo boo]` | `[ep-2 zoo boo]` | - ## {{% heading "whatsnext" %}} diff --git a/content/zh/docs/tasks/job/automated-tasks-with-cron-jobs.md b/content/zh/docs/tasks/job/automated-tasks-with-cron-jobs.md index 0f2f5e90b6..731d560c5d 100644 --- a/content/zh/docs/tasks/job/automated-tasks-with-cron-jobs.md +++ b/content/zh/docs/tasks/job/automated-tasks-with-cron-jobs.md @@ -30,6 +30,7 @@ Cron jobs can also schedule individual tasks for a specific time, such as if you --> 在Kubernetes v1.21 版本中,CronJob 被提升为通用版本。如果你使用的是旧版本的 Kubernetes,请参考你正在使用的 Kubernetes 版本的文档,这样你就能看到准确的信息。旧的 Kubernetes 版本不支持`batch/v1` CronJob API。 + 你可以利用 [CronJobs](/zh/docs/concepts/workloads/controllers/cron-jobs) 执行基于时间调度的任务。这些自动化任务和 Linux 或者 Unix 系统的 [Cron](https://en.wikipedia.org/wiki/Cron) 任务类似。 CronJobs 在创建周期性以及重复性的任务时很有帮助,例如执行备份操作或者发送邮件。CronJobs 也可以在特定时间调度单个任务,例如你想调度低活跃周期的任务。 @@ -43,6 +44,7 @@ For more limitations, see [CronJobs](/docs/concepts/workloads/controllers/cron-j CronJobs 有一些限制和特点。 例如,在特定状况下,同一个 CronJob 可以创建多个任务。 因此,任务应该是幂等的。 + 查看更多限制,请参考 [CronJobs](/zh/docs/concepts/workloads/controllers/cron-jobs)。 ## {{% heading "prerequisites" %}} @@ -134,16 +136,14 @@ hello */1 * * * * False 0 50s 75s ``` -你应该能看到 “hello” CronJob 在 `LAST-SCHEDULE` 声明的时间点成功的调度了一次任务。 +你应该能看到 `hello` CronJob 在 `LAST SCHEDULE` 声明的时间点成功的调度了一次任务。 有 0 个活跃的任务意味着任务执行完毕或者执行失败。 -现在,找到最后一次调度任务创建的 Pod 并查看一个 Pod 的标准输出。请注意任务名称和 Pod 名称是不同的。 +现在,找到最后一次调度任务创建的 Pod 并查看一个 Pod 的标准输出。 +输出与此类似: ``` Fri Feb 22 11:02:09 UTC 2019 @@ -359,6 +363,6 @@ By default, they are set to 3 and 1 respectively. Setting a limit to `0` corres `.spec.successfulJobsHistoryLimit` 和 `.spec.failedJobsHistoryLimit`是可选的。 这两个字段指定应保留多少已完成和失败的任务。 -默认设置为3和1。限制设置为0代表相应类型的任务完成后不会保留。 +默认设置为3和1。限制设置为 `0` 代表相应类型的任务完成后不会保留。 diff --git a/content/zh/docs/tasks/job/parallel-processing-expansion.md b/content/zh/docs/tasks/job/parallel-processing-expansion.md index 9e2c0a2c86..2f5a8e13c3 100644 --- a/content/zh/docs/tasks/job/parallel-processing-expansion.md +++ b/content/zh/docs/tasks/job/parallel-processing-expansion.md @@ -279,13 +279,13 @@ First, copy and paste the following template of a Job object, into a file called 首先,复制下面的 Job 对象模板到一个名为 `job.yaml.jinja2` 的文件。 ```liquid -{%- set params = [{ "name": "apple", "url": "http://dbpedia.org/resource/Apple", }, +{% set params = [{ "name": "apple", "url": "http://dbpedia.org/resource/Apple", }, { "name": "banana", "url": "http://dbpedia.org/resource/Banana", }, { "name": "cherry", "url": "http://dbpedia.org/resource/Cherry" }] %} -{%- for p in params %} -{%- set name = p["name"] %} -{%- set url = p["url"] %} +{% for p in params %} +{% set name = p["name"] %} +{% set url = p["url"] %} --- apiVersion: batch/v1 kind: Job @@ -305,7 +305,7 @@ spec: image: busybox command: ["sh", "-c", "echo Processing URL {{ url }} && sleep 5"] restartPolicy: Never -{%- endfor %} +{% endfor %} ``` 你可能想设置 [`.spec.updateStrategy.rollingUpdate.maxUnavailable`](/zh/docs/concepts/workloads/controllers/deployment/#max-unavailable) (默认为 1), [`.spec.minReadySeconds`](/zh/docs/concepts/workloads/controllers/deployment/#min-ready-seconds) (默认为 0) 和 -[`.spec.maxSurge`](/zh/docs/concepts/workloads/controllers/deployment/#max-surge) (一种 Beta 阶段的特性,默认为 25%) +[`.spec.updateStrategy.rollingUpdate.maxSurge`](/zh/docs/reference/kubernetes-api/workload-resources/daemon-set-v1/#DaemonSetSpec) +(一种 Beta 阶段的特性,默认为 0)。 修补你的 Deployment: -{{< tabs name="kubectl_retainkeys_example" >}} -{{{< tab name="Bash" codelang="bash" >}} -kubectl patch deployment retainkeys-demo --type merge --patch "$(cat patch-file-no-retainkeys.yaml)" -{{< /tab >}} -{{< tab name="PowerShell" codelang="posh" >}} -kubectl patch deployment retainkeys-demo --type merge --patch $(Get-Content patch-file-no-retainkeys.yaml -Raw) -{{< /tab >}}} -{{< /tabs >}} +```shell +kubectl patch deployment patch-demo --patch-file patch-file.yaml +``` 检查 Deployment 的内容: @@ -625,10 +615,10 @@ The following commands are equivalent: 以下命令是等价的: ```shell -kubectl patch deployment patch-demo --patch "$(cat patch-file.yaml)" +kubectl patch deployment patch-demo --patch-file patch-file.yaml" kubectl patch deployment patch-demo --patch 'spec:\n template:\n spec:\n containers:\n - name: patch-demo-ctr-2\n image: redis' -kubectl patch deployment patch-demo --patch "$(cat patch-file.json)" +kubectl patch deployment patch-demo --patch-file patch-file.json" kubectl patch deployment patch-demo --patch '{"spec": {"template": {"spec": {"containers": [{"name": "patch-demo-ctr-2","image": "redis"}]}}}}' ``` @@ -681,6 +671,3 @@ Strategic merge patch is not supported for custom resources. * [使用配置文件执行 Kubernetes 对象的指令式管理](/zh/docs/tasks/manage-kubernetes-objects/imperative-config) * [使用配置文件对 Kubernetes 对象进行声明式管理](/zh/docs/tasks/manage-kubernetes-objects/declarative-config/) - - - diff --git a/content/zh/docs/tasks/tools/install-kubectl-windows.md b/content/zh/docs/tasks/tools/install-kubectl-windows.md index a52e7bd375..617ca3f3ce 100644 --- a/content/zh/docs/tasks/tools/install-kubectl-windows.md +++ b/content/zh/docs/tasks/tools/install-kubectl-windows.md @@ -106,17 +106,23 @@ The following methods exist for installing kubectl on Windows: ``` -1. 将可执行文件的路径添加到 `PATH`。 +1. 将 kubectl 二进制文件夹附加或添加到你的 `PATH` 环境变量中。 1. 测试一下,确保此 `kubectl` 的版本和期望版本一致: ```cmd kubectl version --client ``` + 或者使用下面命令来查看版本的详细信息: + ```cmd + kubectl version --client --output=yaml + ``` + -1. 将可执行文件添加到你的 `PATH` 环境变量。 +1. 将 kubectl 二进制文件夹附加或添加到你的 `PATH` 环境变量中。 1. 验证插件是否安装成功 diff --git a/content/zh/docs/tutorials/_index.md b/content/zh/docs/tutorials/_index.md index a4440f7ba7..2feb4bd56d 100644 --- a/content/zh/docs/tutorials/_index.md +++ b/content/zh/docs/tutorials/_index.md @@ -23,11 +23,10 @@ each of which has a sequence of steps. Before walking through each tutorial, you may want to bookmark the [Standardized Glossary](/docs/reference/glossary/) page for later references. --> -Kubernetes 文档的这一部分包含教程。每个教程展示了如何完成一个比单个 -[任务](/zh/docs/tasks/)更大的目标。 +Kubernetes 文档的这一部分包含教程。 +每个教程展示了如何完成一个比单个[任务](/zh/docs/tasks/)更大的目标。 通常一个教程有几个部分,每个部分都有一系列步骤。在浏览每个教程之前, -您可能希望将[标准化术语表](/zh/docs/reference/glossary/)页面添加到书签,供以后参考。 - +你可能希望将[标准化术语表](/zh/docs/reference/glossary/)页面添加到书签,供以后参考。 ## 基础知识 {#basics} -* [Kubernetes 基础知识](/zh/docs/tutorials/Kubernetes-Basics/)是一个深入的 - 交互式教程,帮助您理解 Kubernetes 系统,并尝试一些基本的 Kubernetes 特性。 - -* [介绍 Kubernetes (edx)](https://www.edx.org/course/introduction-kubernetes-linuxfoundationx-lfs158x#) - +* [Kubernetes 基础知识](/zh/docs/tutorials/Kubernetes-Basics/) + 是一个深入的交互式教程,帮助你理解 Kubernetes 系统,并尝试一些基本的 Kubernetes 特性。 +* [Kubernetes 介绍 (edX)](https://www.edx.org/course/introduction-kubernetes-linuxfoundationx-lfs158x#) * [你好 Minikube](/zh/docs/tutorials/hello-minikube/) -## 集群 {#clusters} - -* [AppArmor](/zh/docs/tutorials/clusters/apparmor/) - -* [seccomp](/zh/docs/tutorials/clusters/seccomp/) - ## 安全 {#security} * [在集群级别应用 Pod 安全标准](/zh/docs/tutorials/security/cluster-level-pss/) * [在名字空间级别应用 Pod 安全标准](/zh/docs/tutorials/security/ns-level-pss/) +* [AppArmor](/zh/docs/tutorials/security/apparmor/) +* [seccomp](/zh/docs/tutorials/security/seccomp/) ## {{% heading "whatsnext" %}} @@ -136,6 +118,6 @@ If you would like to write a tutorial, see [Content Page Types](/docs/contribute/style/page-content-types/) for information about the tutorial page. --> -如果您想编写教程,请参阅[内容页面类型](/zh/docs/contribute/style/page-content-types/) +如果你要编写教程,请参阅[内容页面类型](/zh/docs/contribute/style/page-content-types/) 以获取有关教程页面类型的信息。 diff --git a/content/zh/docs/tutorials/hello-minikube.md b/content/zh/docs/tutorials/hello-minikube.md index 445f806c33..f9c15568c9 100644 --- a/content/zh/docs/tutorials/hello-minikube.md +++ b/content/zh/docs/tutorials/hello-minikube.md @@ -244,10 +244,10 @@ Pod runs a Container based on the provided Docker image. {{< note >}} -有关 `kubectl` 命令的更多信息,请参阅 [kubectl 概述](/zh/docs/reference/kubectl/overview/)。 +有关 `kubectl` 命令的更多信息,请参阅 [kubectl 概述](/zh/docs/reference/kubectl/)。 {{< /note >}}