Mark AnyVolumeDataSource as stable

pull/49535/head
Sunny 2025-01-30 08:23:13 -08:00 committed by sunnylovestiramisu
commit 11b65ee347
1233 changed files with 76300 additions and 5061 deletions

1
.gitignore vendored
View File

@ -32,7 +32,6 @@ nohup.out
.hugo_build.lock
# Netlify Functions build output
package-lock.json
/functions/
/node_modules/

4
.gitmodules vendored
View File

@ -1,7 +1,3 @@
[submodule "themes/docsy"]
path = themes/docsy
url = https://github.com/google/docsy.git
branch = v0.3.0
[submodule "api-ref-generator"]
path = api-ref-generator
url = https://github.com/kubernetes-sigs/reference-docs

View File

@ -4,7 +4,7 @@
# change is that the Hugo version is now an overridable argument rather than a fixed
# environment variable.
FROM docker.io/library/golang:1.23.0-alpine3.20
FROM docker.io/library/golang:1.23.1-alpine3.20
RUN apk add --no-cache \
curl \
@ -22,11 +22,12 @@ RUN mkdir $HOME/src && \
cd "hugo-${HUGO_VERSION}" && \
go install --tags extended
FROM docker.io/library/golang:1.23.0-alpine3.20
FROM docker.io/library/golang:1.23.1-alpine3.20
RUN apk add --no-cache \
runuser \
git \
gcompat \
openssh-client \
rsync \
npm

View File

@ -7,11 +7,24 @@ NETLIFY_FUNC = $(NODE_BIN)/netlify-lambda
# CONTAINER_ENGINE=podman make container-image
CONTAINER_ENGINE ?= docker
IMAGE_REGISTRY ?= gcr.io/k8s-staging-sig-docs
IMAGE_VERSION=$(shell scripts/hash-files.sh Dockerfile Makefile | cut -c 1-12)
IMAGE_VERSION=$(shell scripts/hash-files.sh Dockerfile Makefile package.json package-lock.json | cut -c 1-12)
CONTAINER_IMAGE = $(IMAGE_REGISTRY)/k8s-website-hugo:v$(HUGO_VERSION)-$(IMAGE_VERSION)
# Mount read-only to allow use with tools like Podman in SELinux mode
# Container targets don't need to write into /src
CONTAINER_RUN = "$(CONTAINER_ENGINE)" run --rm --interactive --tty --volume "$(CURDIR):/src:ro,Z"
CONTAINER_RUN_TTY = "$(CONTAINER_ENGINE)" run --rm --interactive --tty
CONTAINER_HUGO_MOUNTS = \
--read-only \
--mount type=bind,source=$(CURDIR)/.git,target=/src/.git,readonly \
--mount type=bind,source=$(CURDIR)/archetypes,target=/src/archetypes,readonly \
--mount type=bind,source=$(CURDIR)/assets,target=/src/assets,readonly \
--mount type=bind,source=$(CURDIR)/content,target=/src/content,readonly \
--mount type=bind,source=$(CURDIR)/data,target=/src/data,readonly \
--mount type=bind,source=$(CURDIR)/i18n,target=/src/i18n,readonly \
--mount type=bind,source=$(CURDIR)/layouts,target=/src/layouts,readonly \
--mount type=bind,source=$(CURDIR)/static,target=/src/static,readonly \
--mount type=tmpfs,destination=/tmp,tmpfs-mode=01777 \
--mount type=bind,source=$(CURDIR)/hugo.toml,target=/src/hugo.toml,readonly
CCRED=\033[0;31m
CCEND=\033[0m
@ -98,15 +111,13 @@ docker-push: ## Build a multi-architecture image and push that into the registry
container-build: module-check
mkdir -p public
$(CONTAINER_RUN) --read-only \
--mount type=tmpfs,destination=/tmp,tmpfs-mode=01777 \
--mount type=bind,source=$(CURDIR)/public,target=/src/public $(CONTAINER_IMAGE) \
hugo --cleanDestinationDir --buildDrafts --buildFuture --environment preview --noBuildLock
$(CONTAINER_RUN_TTY) $(CONTAINER_HUGO_MOUNTS) $(CONTAINER_IMAGE) \
hugo --destination /tmp/public --cleanDestinationDir --buildDrafts --buildFuture --environment preview --noBuildLock
# no build lock to allow for read-only mounts
container-serve: module-check ## Boot the development server using container.
$(CONTAINER_RUN) --cap-drop=ALL --cap-add=AUDIT_WRITE --read-only \
--mount type=tmpfs,destination=/tmp,tmpfs-mode=01777 -p 1313:1313 $(CONTAINER_IMAGE) \
$(CONTAINER_RUN_TTY) --cap-drop=ALL --cap-add=AUDIT_WRITE $(CONTAINER_HUGO_MOUNTS) \
-p 1313:1313 $(CONTAINER_IMAGE) \
hugo server --buildDrafts --buildFuture --environment development --bind 0.0.0.0 --destination /tmp/public --cleanDestinationDir --noBuildLock
test-examples:

View File

@ -1,11 +1,13 @@
aliases:
sig-docs-blog-owners: # Approvers for blog content
- mrbobbytables
- natalisucks
- nate-double-u
- sftim
sig-docs-blog-reviewers: # Reviewers for blog content
- Gauravpadam
- mrbobbytables
- natalisucks
- nate-double-u
- sftim
sig-docs-website-owners: # Admins for overall website
@ -60,11 +62,11 @@ aliases:
- katcosgrove
- natalisucks
- nate-double-u
- rayandas # RT 1.33 Docs Lead
- reylejano
- salaxander
- sftim
- tengqm
- chanieljdan # RT 1.32 Docs Lead
sig-docs-en-reviews: # PR reviews for English content
- dipesh-rawat
- divya-mohan0209
@ -156,10 +158,10 @@ aliases:
- ysyukr
sig-docs-leads: # Website chairs and tech leads
- divya-mohan0209
- katcosgrove
- natalisucks
- reylejano
- salaxander
- sftim
- tengqm
sig-docs-zh-owners: # Admins for Chinese content
- chenrui333

View File

@ -30,7 +30,7 @@ git clone https://github.com/kubernetes/website.git
cd website
```
The Kubernetes website uses the [Docsy Hugo theme](https://github.com/google/docsy#readme). Even if you plan to run the website in a container, we strongly recommend pulling in the submodule and other development dependencies by running the following:
The Kubernetes website uses git submodules. Even if you plan to run the website in a container, we strongly recommend pulling in the submodule and other development dependencies by running the following:
### Windows

View File

Before

Width:  |  Height:  |  Size: 31 KiB

After

Width:  |  Height:  |  Size: 31 KiB

View File

@ -21,21 +21,25 @@ $(document).ready(function() {
/* Check the presence of a cookie */
let announcement = document.querySelector("#announcement");
let token = `announcement_ack_${announcement.getAttribute('data-announcement-name').replace(/\s/g, '_')}`; // Generate the unique token for announcement
let acknowledged = getCookie(token);
if (acknowledged === "true") {
announcement.remove(); // Remove the announcement if the cookie is set
}
else {
announcement.classList.add('display-announcement') // Display the announcement if the cookie is not set
}
if (announcement) {
let token = `announcement_ack_${announcement.getAttribute('data-announcement-name').replace(/\s/g, '_')}`; // Generate the unique token for announcement
let acknowledged = getCookie(token);
if (acknowledged === "true") {
announcement.remove(); // Remove the announcement if the cookie is set
}
else {
announcement.classList.add('display-announcement') // Display the announcement if the cookie is not set
}
}
/* Driver code to set the cookie */
let button = document.querySelector('#banner-dismiss');
button.removeAttribute('style');
button.addEventListener('click', function() {
setCookie(token, "true",
button.getAttribute('data-ttl')); // Set a cookie with time to live parameter
announcement.remove();
});
if (button) {
button.removeAttribute('style');
button.addEventListener('click', function() {
setCookie(token, "true",
button.getAttribute('data-ttl')); // Set a cookie with time to live parameter
announcement.remove();
});
}
});

File diff suppressed because one or more lines are too long

2
assets/js/toastr-2.1.4.min.js vendored Normal file

File diff suppressed because one or more lines are too long

View File

@ -545,16 +545,27 @@ section#cncf {
margin-bottom: 20px;
}
#desktopKCButton {
position: absolute;
// All #desktopKCButton blocks are left for back compatibility and
// should be removed later, leaving .desktopKCButton only
// See https://github.com/kubernetes/website/pull/49167 for details
#desktopKCButton, .desktopKCButton {
display: inline-block;
font-size: 18px;
background-color: $dark-grey;
border-radius: 8px;
color: $white;
padding: 2px 8px;
margin: 5px;
}
#desktopKCButton {
display: inline;
position: absolute;
padding: 20px 10px 20px 10px;
margin: 0;
}
#desktopKCButton:hover{
#desktopKCButton:hover, .desktopKCButton:hover{
background-color: #ffffff;
color: #326ce5;
transition: 150ms;
@ -567,7 +578,7 @@ section#cncf {
border-radius: 8px;
color: $primary;
padding: 15px 30px 15px 80px;
margin-bottom: 15px;
margin-bottom: 35px;
&:before {
content: "";

View File

@ -98,10 +98,7 @@ body.td-home main[role="main"] > section:first-of-type .content p:first-child {
body.td-404 main .error-details {
max-width: 1100px;
margin-left: auto;
margin-right: auto;
margin-top: 4em;
margin-bottom: 0;
margin: 0 auto;
}
/* Global - Mermaid.js diagrams */
@ -1138,7 +1135,7 @@ body.cid-community #cncf-code-of-conduct h2:after {
color: #fff;
padding: 0;
margin: 0;
width: 100vw;
width: 100%;
}
#caseStudies body > #deprecation-warning, body.cid-casestudies > #deprecation-warning {
padding-top: 32px;
@ -1591,7 +1588,7 @@ html.no-js body div#announcement {
// default background is blue; overrides are possible
color: #fff;
display: none; // When javascript is available, Let javascript handle the state of the announcement
.announcement-main {
margin-left: auto;
margin-right: auto;
@ -1828,7 +1825,7 @@ div.alert > em.javascript-required {
align-items: center;
background-color: #fff;
border: 1px solid #4c4c4c;
border-radius: 20px;
border-radius: 20px;
vertical-align: middle;
flex-grow: 1;
overflow-x: hidden;
@ -1857,7 +1854,7 @@ div.alert > em.javascript-required {
#search .pagefind-ui form input {
background-color: #fff;
border: 1px solid #4c4c4c;
border-radius: 20px;
border-radius: 20px;
overflow-x: hidden;
width: auto;
padding: 6px 10px !important;
@ -1912,7 +1909,7 @@ body.td-search #search {
}
/* CSS for 'figure' full-screen display */
/* Define styles for full-screen overlay */
.figure-fullscreen-overlay {
position: fixed;
@ -1941,7 +1938,7 @@ body.td-search #search {
/* Define styles for close button */
.figure-close-button {
position: absolute;
top: 1%;
top: 1%;
right: 2%;
cursor: pointer;
font-size: calc(5vw + 10px);
@ -1950,10 +1947,24 @@ body.td-search #search {
.code-sample > .copy-code-icon {
cursor: pointer;
text-align: right;
display: flex;
gap:1rem;
justify-content: right;
padding: 0.2rem;
}
.includecode .highlight {
margin-top: 0;
margin-bottom: 0;
}
#toast-container > *{
background: $primary;
}
.toast-success {
background-color: $primary !important;
color: #ffffff !important;
}
// handle main page features on narrow viewports
@media screen and (max-width: 768px) {

View File

@ -1,6 +1,6 @@
$main-max-width: 1200px;
$vendor-strip-height: 44px;
$video-section-height: 550px;
$video-section-height: 580px;
@media screen and (min-width: 1024px) {
@ -96,4 +96,4 @@ $video-section-height: 550px;
min-height: 260px;
}
}
}
}

View File

@ -136,7 +136,7 @@ $video-section-height: 400px;
#video {
height: $video-section-height;
display: block;
height: 500px;
height: 550px;
& > .light-text {
display: block;

View File

@ -41,7 +41,7 @@ In the summer of 2014, Box was feeling the pain of a decade's worth of hardware
<p>Box's cloud native journey accelerated that June, when Ghods attended <a href="https://www.docker.com/events/dockercon">DockerCon</a>. The company had come to the realization that it could no longer run its applications only off bare metal, and was researching containerizing with Docker, virtualizing with OpenStack, and supporting public cloud.</p>
<p>At that conference, Google announced the release of its Kubernetes container management system, and Ghods was won over. "We looked at a lot of different options, but Kubernetes really stood out, especially because of the incredibly strong team of <a href="https://research.google.com/pubs/pub43438.html">Borg</a> veterans and the vision of having a completely infrastructure-agnostic way of being able to run cloud software," he says, referencing Google's internal container orchestrator Borg. "The fact that on day one it was designed to run on bare metal just as well as <a href="https://cloud.google.com/">Google Cloud</a> meant that we could actually migrate to it inside of our data centers, and then use those same tools and concepts to run across public cloud providers as well."</p>
<p>At that conference, Google announced the release of its Kubernetes container management system, and Ghods was won over. "We looked at a lot of different options, but Kubernetes really stood out, especially because of the incredibly strong team of <a href="https://research.google/pubs/large-scale-cluster-management-at-google-with-borg/">Borg</a> veterans and the vision of having a completely infrastructure-agnostic way of being able to run cloud software," he says, referencing Google's internal container orchestrator Borg. "The fact that on day one it was designed to run on bare metal just as well as <a href="https://cloud.google.com/">Google Cloud</a> meant that we could actually migrate to it inside of our data centers, and then use those same tools and concepts to run across public cloud providers as well."</p>
<p>Another plus: Ghods liked that <a href="https://kubernetes.io/">Kubernetes</a> has a universal set of API objects like pod, service, replica set and deployment object, which created a consistent surface to build tooling against. "Even PaaS layers like <a href="https://www.openshift.com/">OpenShift</a> or <a href="http://deis.io/">Deis</a> that build on top of Kubernetes still treat those objects as first-class principles," he says. "We were excited about having these abstractions shared across the entire ecosystem, which would result in a lot more momentum than we saw in other potential solutions."</p>

View File

@ -392,7 +392,7 @@ init কন্টেইনারগুলির জন্য আপনাকে
কুবারনেটিস কেন অন্যান্য রিসোর্সগুলিতে মোড়ানোর প্রসঙ্গটি বোঝার জন্য একটি সাধারণ পড API (যেমন {{< glossary_tooltip text="স্টেটফুল সেট" term_id="statefulset" >}}) বা {{< glossary_tooltip text="ডিপলয়মেন্ট" term_id="deployment">}}) তে , আপনি পূর্ববর্তী আর্ট সম্পর্কে পড়তে পারেন, যার মধ্যে রয়েছে:
* [Aurora](https://aurora.apache.org/documentation/latest/reference/configuration/#job-schema)
* [Borg](https://research.google.com/pubs/pub43438.html)
* [Marathon](https://mesosphere.github.io/marathon/docs/rest-api.html)
* [Borg](https://research.google/pubs/large-scale-cluster-management-at-google-with-borg/)
* [Marathon](https://github.com/d2iq-archive/marathon)
* [Omega](https://research.google/pubs/pub41684/)
* [Tupperware](https://engineering.fb.com/data-center-engineering/tupperware/)।

View File

@ -16,7 +16,7 @@ no_list: true
- [Kubespray](https://kubespray.io/):
[Ansible](https://docs.ansible.com/) প্লেবুকের একটি রচনা,
[ইনভেন্টরি](https://github.com/kubernetes-sigs/kubespray/blob/master/docs/ansible.md#inventory),
[ইনভেন্টরি](https://github.com/kubernetes-sigs/kubespray/blob/master/docs/ansible/inventory.md),
প্রভিশনিং টুলস, এবং জেনেরিক ওস/কুবারনেটিস ক্লাস্টার কনফিগারেশন ব্যবস্থাপনা কাজের
জন্য ডোমেন জ্ঞান । আপনি স্ল্যাক চ্যানেলে সম্প্রদায়ের সাথে যোগাযোগ করতে পারেন
[#kubespray](https://kubernetes.slack.com/messages/kubespray/)।

View File

@ -25,7 +25,6 @@ content_type: ধারণা
## কনফিগারেশন
* [উদাহরণ: একটি জাভা মাইক্রোসার্ভিস কনফিগার কর](/docs/tutorials/configuration/configure-java-microservice/)
* [কনফিগার ম্যাপ ব্যবহার করে রেডিস কনফিগার কর](/docs/tutorials/configuration/configure-redis-using-configmap/)
## স্টেটলেস অ্যাপ্লিকেশন

View File

@ -41,14 +41,13 @@ Kubernetes ist Open Source und bietet Dir die Freiheit, die Infrastruktur vor Or
<h2>Die Herausforderungen bei der Migration von über 150 Microservices auf Kubernetes</h2>
<p>Von Sarah Wells, technische Direktorin für Betrieb und Zuverlässigkeit, Financial Times</p>
<button id="desktopShowVideoButton" onclick="kub.showVideo()">Video ansehen</button>
<br>
<br>
<a href="https://events.linuxfoundation.org/kubecon-cloudnativecon-europe/" button id="desktopKCButton">Besuche die KubeCon + CloudNativeCon Europe vom 19. bis 22. M&auml;rz 2024</a>
<br>
<br>
<br>
<br>
<a href="https://events.linuxfoundation.org/kubecon-cloudnativecon-north-america-2024/" button id="desktopKCButton">Besuche die KubeCon + CloudNativeCon North America vom 12. bis 15. November 2024</a>
<h3>Nehmen Sie an der kommenden KubeCon + CloudNativeCon teil</h3>
<a href="https://events.linuxfoundation.org/kubecon-cloudnativecon-europe/" class="desktopKCButton"><strong>Europe</strong> (London, Apr 1-4)</a>
<a href="https://events.linuxfoundation.org/kubecon-cloudnativecon-china/" class="desktopKCButton"><strong>China</strong> (Hongkong, Jun 10-11)</a>
<a href="https://events.linuxfoundation.org/kubecon-cloudnativecon-japan/" class="desktopKCButton"><strong>Japan</strong> (Tokio, Jun 16-17)</a>
<a href="https://events.linuxfoundation.org/kubecon-cloudnativecon-india/" class="desktopKCButton"><strong>India</strong> (Hyderabad, Aug 6-7)</a>
<a href="https://events.linuxfoundation.org/kubecon-cloudnativecon-north-america-2025/" class="desktopKCButton"><strong>North America</strong> (Atlanta, Nov 10-13)</a>
</div>
<div id="videoPlayer">
<iframe data-url="https://www.youtube.com/embed/H06qrNmGqyE?autoplay=1" frameborder="0" allowfullscreen></iframe>

View File

@ -363,7 +363,7 @@ andere Ressourcen, wie z. B.
oder {{< glossary_tooltip text="Deployments" term_id="deployment" >}} einbindet,
kannst du Artikel zu früheren Technologien lesen, unter anderem:
* [Aurora](https://aurora.apache.org/documentation/latest/reference/configuration/#job-schema)
* [Borg](https://research.google.com/pubs/pub43438.html)
* [Marathon](https://mesosphere.github.io/marathon/docs/rest-api.html)
* [Borg](https://research.google/pubs/large-scale-cluster-management-at-google-with-borg/)
* [Marathon](https://github.com/d2iq-archive/marathon)
* [Omega](https://research.google/pubs/pub41684/)
* [Tupperware](https://engineering.fb.com/data-center-engineering/tupperware/).

View File

@ -42,20 +42,16 @@ To download Kubernetes, visit the [download](/releases/download/) section.
{{< blocks/section id="video" background-image="kub_video_banner_homepage" >}}
<div class="light-text">
<h2>The Challenges of Migrating 150+ Microservices to Kubernetes</h2>
<h2>The Challenges of Migrating 150+ Microservices to Kubernetes</h2>
<p>By Sarah Wells, Technical Director for Operations and Reliability, Financial Times</p>
<button id="desktopShowVideoButton" onclick="kub.showVideo()">Watch Video</button>
<br>
<br>
<a href="https://events.linuxfoundation.org/kubecon-cloudnativecon-north-america/" button id="desktopKCButton">Attend KubeCon + CloudNativeCon North America on November 12-15</a>
<br>
<br>
<br>
<a href="https://events.linuxfoundation.org/kubecon-cloudnativecon-india/" button id="desktopKCButton">Attend KubeCon + CloudNativeCon India on December 11-12</a>
<br>
<br>
<br>
<a href="https://events.linuxfoundation.org/kubecon-cloudnativecon-europe/" button id="desktopKCButton">Attend KubeCon + CloudNativeCon Europe on April 1-4, 2025</a>
<h3>Attend upcoming KubeCon + CloudNativeCon events</h3>
<a href="https://events.linuxfoundation.org/kubecon-cloudnativecon-europe/" class="desktopKCButton"><strong>Europe</strong> (London, Apr 1-4)</a>
<a href="https://events.linuxfoundation.org/kubecon-cloudnativecon-china/" class="desktopKCButton"><strong>China</strong> (Hong Kong, Jun 10-11)</a>
<a href="https://events.linuxfoundation.org/kubecon-cloudnativecon-japan/" class="desktopKCButton"><strong>Japan</strong> (Tokyo, Jun 16-17)</a>
<a href="https://events.linuxfoundation.org/kubecon-cloudnativecon-india/" class="desktopKCButton"><strong>India</strong> (Hyderabad, Aug 6-7)</a>
<a href="https://events.linuxfoundation.org/kubecon-cloudnativecon-north-america-2025/" class="desktopKCButton"><strong>North America</strong> (Atlanta, Nov 10-13)</a>
</div>
<div id="videoPlayer">
<iframe data-url="https://www.youtube.com/embed/H06qrNmGqyE?autoplay=1" frameborder="0" allowfullscreen></iframe>

View File

@ -19,7 +19,7 @@ Real time visualization is a strength that UIs have over CLIs, and with 1.
**Logs**
Based on user research with Kubernetes predecessor&nbsp;[Borg](http://research.google.com/pubs/pub43438.html)&nbsp;and continued community feedback, we know logs are tremendously important to users. For this reason were constantly looking for ways to improve these features in Dashboard. This release includes a fix for an issue wherein large numbers of logs would crash the system, as well as the introduction of the ability to view logs by date.
Based on user research with Kubernetes predecessor&nbsp;[Borg](https://research.google/pubs/large-scale-cluster-management-at-google-with-borg/)&nbsp;and continued community feedback, we know logs are tremendously important to users. For this reason were constantly looking for ways to improve these features in Dashboard. This release includes a fix for an issue wherein large numbers of logs would crash the system, as well as the introduction of the ability to view logs by date.
**Showing More Resources**
The previous release brought all workloads to Dashboard: Pods, Pet Sets, Daemon Sets, Replication Controllers, Replica Set, Services, & Deployments. With 1.4, we expand upon that set of objects by including Services, Ingresses, Persistent Volume Claims, Secrets, & ConfigMaps. Weve also introduced an “Admin” section with the Namespace-independent global objects of Namespaces, Nodes, and Persistent Volumes. With the addition of roles, these will be shown only to cluster operators, and developers side nav will begin with the Namespace dropdown.

View File

@ -42,7 +42,7 @@ NAMESPACE=kubeflow
kubectl create namespace ${NAMESPACE}
VERSION=v0.1.3
# Initialize a ksonnet app. Set the namespace for it's default environment.
# Initialize a ksonnet app. Set the namespace for its default environment.
APP_NAME=my-kubeflow
ks init ${APP_NAME}
cd ${APP_NAME}

View File

@ -41,7 +41,7 @@ spec:
If the `service.kubernetes.io/service-proxy-name` label is defined the
`kube-proxy` will ignore the service. A custom controller can watch
services with the label set to it's own name, "kpng-example" in
services with the label set to its own name, "kpng-example" in
this example, and setup specialized load-balancing.
The `service.kubernetes.io/service-proxy-name` label is [not

View File

@ -396,8 +396,8 @@ happily, [Mike Morris] has returned to the role. We're grateful for everything
Keith has done, and excited to have Mattia and Mike on board.
[Mattia Lavacca]: https://github.com/mlavacca
[Keith Mattix]: https://github.com/@keithmattix
[Mike Morris]: https://github.com/@mikemorris
[Keith Mattix]: https://github.com/keithmattix
[Mike Morris]: https://github.com/mikemorris
## Try it out

View File

@ -30,7 +30,7 @@ each release cycle is a journey, and just like Penelope, in "The Odyssey",
weaved for 10 years -- each night removing parts of what she had done during the day --
so does each release add new features and removes others, albeit here with a much
clearer purpose of constantly improving Kubernetes.
With v1.32 being the last release in the year Kubernetes marks it's first decade anniversary,
With v1.32 being the last release in the year Kubernetes marks its first decade anniversary,
we wanted to honour all of those that have been part of the global Kubernetes crew
that roams the cloud-native seas through perils and challanges:
may we continue to weave the future of Kubernetes together.
@ -478,7 +478,7 @@ Antigua Guatemala, Guatemala
## Upcoming release webinar
Join members of the Kubernetes v1.32 release team on **Thursday, January 9th 2024 at 5:00 PM (UTC)**, to learn about the
Join members of the Kubernetes v1.32 release team on **Thursday, January 9th 2025 at 5:00 PM (UTC)**, to learn about the
release highlights of this release, as well as deprecations and removals to help plan for upgrades.
For more information and registration, visit the [event
page](https://community.cncf.io/events/details/cncf-cncf-online-programs-presents-cncf-live-webinar-kubernetes-132-release/)

View File

@ -12,7 +12,7 @@ component that selects the nodes on which new Pods run. The scheduler processes
these new Pods **one by one**. Therefore, the larger your clusters, the more important
the throughput of the scheduler becomes.
Over the years, the Kubernetes project (and SIG Scheduling in particular) has improved the throughput
Over the years, Kubernetes SIG Scheduling has improved the throughput
of the scheduler in multiple enhancements. This blog post describes a major improvement to the
scheduler in Kubernetes v1.32: a
[scheduling context element](/docs/concepts/scheduling-eviction/scheduling-framework/#extension-points)

View File

@ -19,7 +19,7 @@ This can be explained by the differing nature of resource consumption by a singl
This situation poses a genuine risk, potentially overwhelming and crashing any kube-apiserver within seconds due to out-of-memory (OOM) conditions. To better visualize the issue, let's consider the below graph.
{{< figure src="kube-apiserver-memory_usage.png" alt="Monitoring graph showing kube-apiserver memory usage" >}}
{{< figure src="kube-apiserver-memory_usage.png" alt="Monitoring graph showing kube-apiserver memory usage" class="diagram-large" clicktozoom="true" >}}
The graph shows the memory usage of a kube-apiserver during a synthetic test.
(see the [synthetic test](#the-synthetic-test) section for more details).

Binary file not shown.

Before

Width:  |  Height:  |  Size: 75 KiB

After

Width:  |  Height:  |  Size: 65 KiB

View File

@ -0,0 +1,153 @@
---
layout: blog
title: "Spotlight on SIG Architecture: Enhancements"
slug: sig-architecture-enhancements
canonicalUrl: https://www.kubernetes.dev/blog/2025/01/21/sig-architecture-enhancements
date: 2025-01-21
author: "Frederico Muñoz (SAS Institute)"
---
_This is the fourth interview of a SIG Architecture Spotlight series that will cover the different
subprojects, and we will be covering [SIG Architecture:
Enhancements](https://github.com/kubernetes/community/blob/master/sig-architecture/README.md#enhancements)._
In this SIG Architecture spotlight we talked with [Kirsten
Garrison](https://github.com/kikisdeliveryservice), lead of the Enhancements subproject.
## The Enhancements subproject
**Frederico (FSM): Hi Kirsten, very happy to have the opportunity to talk about the Enhancements
subproject. Let's start with some quick information about yourself and your role.**
**Kirsten Garrison (KG)**: Im a lead of the Enhancements subproject of SIG-Architecture and
currently work at Google. I first got involved by contributing to the service-catalog project with
the help of [Carolyn Van Slyck](https://github.com/carolynvs). With time, [I joined the Release
team](https://github.com/kubernetes/sig-release/blob/master/releases/release-1.17/release_team.md),
eventually becoming the Enhancements Lead and a Release Lead shadow. While on the release team, I
worked on some ideas to make the process better for the SIGs and Enhancements team (the opt-in
process) based on my teams experiences. Eventually, I started attending Subproject meetings and
contributing to the Subprojects work.
**FSM: You mentioned the Enhancements subproject: how would you describe its main goals and areas of
intervention?**
**KG**: The [Enhancements
Subproject](https://github.com/kubernetes/community/blob/master/sig-architecture/README.md#enhancements)
primarily concerns itself with the [Kubernetes Enhancement
Proposal](https://github.com/kubernetes/enhancements/blob/master/keps/sig-architecture/0000-kep-process/README.md)
(_KEP_ for short)—the "design" documents required for all features and significant changes
to the Kubernetes project.
## The KEP and its impact
**FSM: The improvement of the KEP process was (and is) one in which SIG Architecture was heavily
involved. Could you explain the process to those that arent aware of it?**
**KG**: [Every release](https://kubernetes.io/releases/release/#the-release-cycle), the SIGs let the
Release Team know which features they intend to work on to be put into the release. As mentioned
above, the prerequisite for these changes is a KEP - a standardized design document that all authors
must fill out and approve in the first weeks of the release cycle. Most features [will move
through 3
phases](https://kubernetes.io/docs/reference/command-line-tools-reference/feature-gates/#feature-stages):
alpha, beta and finally GA so approving a feature represents a significant commitment for the SIG.
The KEP serves as the full source of truth of a feature. The [KEP
template](https://github.com/kubernetes/enhancements/blob/master/keps/NNNN-kep-template/README.md)
has different requirements based on what stage a feature is in, but it generally requires a detailed
discussion of the design and the impact as well as providing artifacts of stability and
performance. The KEP takes quite a bit of iterative work between authors, SIG reviewers, api review
team and the Production Readiness Review team[^1] before it is approved. Each set of reviewers is
looking to make sure that the proposal meets their standards in order to have a stable and
performant Kubernetes release. Only after all approvals are secured, can an author go forth and
merge their feature in the Kubernetes code base.
**FSM: I see, quite a bit of additional structure was added. Looking back, what were the most
significant improvements of that approach?**
**KG**: In general, I think that the improvements with the most impact had to do with focusing on
the core intent of the KEP. KEPs exist not just to memorialize designs, but provide a structured way
to discuss and come to an agreement about different facets of the change. At the core of the KEP
process is communication and consideration.
To that end, some of the significant changes revolve around a more detailed and accessible KEP
template. A significant amount of work was put in over time to get the
[k/enhancements](https://github.com/kubernetes/enhancements) repo into its current form -- a
directory structure organized by SIG with the contours of the modern KEP template (with
Proposal/Motivation/Design Details subsections). We might take that basic structure for granted
today, but it really represents the work of many people trying to get the foundation of this process
in place over time.
As Kubernetes matures, weve needed to think about more than just the end goal of getting a single
feature merged. We need to think about things like: stability, performance, setting and meeting user
expectations. And as weve thought about those things the template has grown more detailed. The
addition of the Production Readiness Review was major as well as the enhanced testing requirements
(varying at different stages of a KEPs lifecycle).
## Current areas of focus
**FSM: Speaking of maturing, weve [recently released Kubernetes
v1.31](https://kubernetes.io/blog/2024/08/13/kubernetes-v1-31-release/), and work on v1.32 [has
started](https://github.com/fsmunoz/sig-release/tree/release-1.32/releases/release-1.32). Are there
any areas that the Enhancements sub-project is currently addressing that might change the way things
are done?**
**KG**: Were currently working on two things:
1) _Creating a Process KEP template._ Sometimes people want to harness the KEP process for
significant changes that are more process oriented rather than feature oriented. We want to
support this because memorializing changes is important and giving people a better tool to do so
will only encourage more discussion and transparency.
2) _KEP versioning._ While our template changes aim to be as non-disruptive as possible, we
believe that it will be easier to track and communicate those changes to the community better with
a versioned KEP template and the policies that go alongside such versioning.
Both features will take some time to get right and fully roll out (just like a KEP feature) but we
believe that they will both provide improvements that will benefit the community at large.
**FSM: You mentioned improvements: I remember when project boards for Enhancement tracking were
introduced in recent releases, to great effect and unanimous applause from release team members. Was
this a particular area of focus for the subproject?**
**KG**: The Subproject provided support to the Release Teams Enhancement team in the migration away
from using the spreadsheet to a project board. The collection and tracking of enhancements has
always been a logistical challenge. During my time on the Release Team, I helped with the transition
to an opt-in system of enhancements, whereby the SIG leads "opt-in" KEPs for release tracking. This
helped to enhance communication between authors and SIGs before any significant work was undertaken
on a KEP and removed toil from the Enhancements team. This change used the existing tools to avoid
introducing too many changes at once to the community. Later, the Release Team approached the
Subproject with an idea of leveraging GitHub Project Boards to further improve the collection
process. This was to be a move away from the use of complicated spreadsheets to using repo-native
labels on [k/enhancement](https://github.com/kubernetes/enhancements) issues and project boards.
**FSM: That surely adds an impact on simplifying the workflow...**
**KG**: Removing sources of friction and promoting clear communication is very important to the
Enhancements Subproject. At the same time, its important to give careful consideration to
decisions that impact the community as a whole. We want to make sure that changes are balanced to
give an upside and while not causing any regressions and pain in the rollout. We supported the
Release Team in ideation as well as through the actual migration to the project boards. It was a
great success and exciting to see the team make high impact changes that helped everyone involved in
the KEP process!
## Getting involved
**FSM: For those reading that might be curious and interested in helping, how would you describe the
required skills for participating in the sub-project?**
**KG**: Familiarity with KEPs either via experience or taking time to look through the
kubernetes/enhancements repo is helpful. All are welcome to participate if interested - we can take
it from there.
**FSM: Excellent! Many thanks for your time and insight -- any final comments you would like to
share with our readers?**
**KG**: The Enhancements process is one of the most important parts of Kubernetes and requires
enormous amounts of coordination and collaboration of people and teams across the project to make it
successful. Im thankful and inspired by everyones continued hard work and dedication to making the
project great. This is truly a wonderful community.
[^1]: For more information, check the [Production Readiness Review spotlight
interview](https://kubernetes.io/blog/2023/11/02/sig-architecture-production-readiness-spotlight-2023/)
in this series.

View File

@ -41,7 +41,7 @@ In the summer of 2014, Box was feeling the pain of a decade's worth of hardware
<p>Box's cloud native journey accelerated that June, when Ghods attended <a href="https://www.docker.com/events/dockercon">DockerCon</a>. The company had come to the realization that it could no longer run its applications only off bare metal, and was researching containerizing with Docker, virtualizing with OpenStack, and supporting public cloud.</p>
<p>At that conference, Google announced the release of its Kubernetes container management system, and Ghods was won over. "We looked at a lot of different options, but Kubernetes really stood out, especially because of the incredibly strong team of <a href="https://research.google.com/pubs/pub43438.html">Borg</a> veterans and the vision of having a completely infrastructure-agnostic way of being able to run cloud software," he says, referencing Google's internal container orchestrator Borg. "The fact that on day one it was designed to run on bare metal just as well as <a href="https://cloud.google.com/">Google Cloud</a> meant that we could actually migrate to it inside of our data centers, and then use those same tools and concepts to run across public cloud providers as well."</p>
<p>At that conference, Google announced the release of its Kubernetes container management system, and Ghods was won over. "We looked at a lot of different options, but Kubernetes really stood out, especially because of the incredibly strong team of <a href="https://research.google/pubs/large-scale-cluster-management-at-google-with-borg/">Borg</a> veterans and the vision of having a completely infrastructure-agnostic way of being able to run cloud software," he says, referencing Google's internal container orchestrator Borg. "The fact that on day one it was designed to run on bare metal just as well as <a href="https://cloud.google.com/">Google Cloud</a> meant that we could actually migrate to it inside of our data centers, and then use those same tools and concepts to run across public cloud providers as well."</p>
<p>Another plus: Ghods liked that <a href="https://kubernetes.io/">Kubernetes</a> has a universal set of API objects like pod, service, replica set and deployment object, which created a consistent surface to build tooling against. "Even PaaS layers like <a href="https://www.openshift.com/">OpenShift</a> or <a href="http://deis.io/">Deis</a> that build on top of Kubernetes still treat those objects as first-class principles," he says. "We were excited about having these abstractions shared across the entire ecosystem, which would result in a lot more momentum than we saw in other potential solutions."</p>

View File

@ -49,7 +49,7 @@ Existence of kube-apiserver leases enables future capabilities that may require
each kube-apiserver.
You can inspect Leases owned by each kube-apiserver by checking for lease objects in the `kube-system` namespace
with the name `kube-apiserver-<sha256-hash>`. Alternatively you can use the label selector `apiserver.kubernetes.io/identity=kube-apiserver`:
with the name `apiserver-<sha256-hash>`. Alternatively you can use the label selector `apiserver.kubernetes.io/identity=kube-apiserver`:
```shell
kubectl -n kube-system get lease -l apiserver.kubernetes.io/identity=kube-apiserver

View File

@ -89,7 +89,7 @@ installation instructions. The list does not try to be exhaustive.
## Service Discovery
* [CoreDNS](https://coredns.io) is a flexible, extensible DNS server which can
be [installed](https://github.com/coredns/deployment/tree/master/kubernetes)
be [installed](https://github.com/coredns/helm)
as the in-cluster DNS for pods.
## Visualization &amp; Control

View File

@ -5,25 +5,27 @@ weight: 10
---
<!-- overview -->
In a Kubernetes cluster, a {{< glossary_tooltip text="node" term_id="node" >}}
can be shutdown in a planned graceful way or unexpectedly because of reasons such
can be shut down in a planned graceful way or unexpectedly because of reasons such
as a power outage or something else external. A node shutdown could lead to workload
failure if the node is not drained before the shutdown. A node shutdown can be
either **graceful** or **non-graceful**.
<!-- body -->
## Graceful node shutdown {#graceful-node-shutdown}
{{< feature-state feature_gate_name="GracefulNodeShutdown" >}}
The kubelet attempts to detect node system shutdown and terminates pods running on the node.
Kubelet ensures that pods follow the normal
kubelet ensures that pods follow the normal
[pod termination process](/docs/concepts/workloads/pods/pod-lifecycle/#pod-termination)
during the node shutdown. During node shutdown, the kubelet does not accept new
Pods (even if those Pods are already bound to the node).
The Graceful node shutdown feature depends on systemd since it takes advantage of
The graceful node shutdown feature depends on systemd since it takes advantage of
[systemd inhibitor locks](https://www.freedesktop.org/wiki/Software/systemd/inhibit/) to
delay the node shutdown with a given duration.
@ -32,12 +34,12 @@ Graceful node shutdown is controlled with the `GracefulNodeShutdown`
enabled by default in 1.21.
Note that by default, both configuration options described below,
`shutdownGracePeriod` and `shutdownGracePeriodCriticalPods` are set to zero,
`shutdownGracePeriod` and `shutdownGracePeriodCriticalPods`, are set to zero,
thus not activating the graceful node shutdown functionality.
To activate the feature, the two kubelet config settings should be configured appropriately and
To activate the feature, both options should be configured appropriately and
set to non-zero values.
Once systemd detects or notifies node shutdown, the kubelet sets a `NotReady` condition on
Once systemd detects or is notified of a node shutdown, the kubelet sets a `NotReady` condition on
the Node, with the `reason` set to `"node is shutting down"`. The kube-scheduler honors this condition
and does not schedule any Pods onto the affected node; other third-party schedulers are
expected to follow the same logic. This means that new Pods won't be scheduled onto that node
@ -48,26 +50,29 @@ node shutdown has been detected, so that even Pods with a
{{< glossary_tooltip text="toleration" term_id="toleration" >}} for
`node.kubernetes.io/not-ready:NoSchedule` do not start there.
At the same time when kubelet is setting that condition on its Node via the API,
When kubelet is setting that condition on its Node via the API,
the kubelet also begins terminating any Pods that are running locally.
During a graceful shutdown, kubelet terminates pods in two phases:
1. Terminate regular pods running on the node.
2. Terminate [critical pods](/docs/tasks/administer-cluster/guaranteed-scheduling-critical-addon-pods/#marking-pod-as-critical)
1. Terminate [critical pods](/docs/tasks/administer-cluster/guaranteed-scheduling-critical-addon-pods/#marking-pod-as-critical)
running on the node.
Graceful node shutdown feature is configured with two
The graceful node shutdown feature is configured with two
[`KubeletConfiguration`](/docs/tasks/administer-cluster/kubelet-config-file/) options:
* `shutdownGracePeriod`:
* Specifies the total duration that the node should delay the shutdown by. This is the total
grace period for pod termination for both regular and
[critical pods](/docs/tasks/administer-cluster/guaranteed-scheduling-critical-addon-pods/#marking-pod-as-critical).
* `shutdownGracePeriodCriticalPods`:
* Specifies the duration used to terminate
[critical pods](/docs/tasks/administer-cluster/guaranteed-scheduling-critical-addon-pods/#marking-pod-as-critical)
during a node shutdown. This value should be less than `shutdownGracePeriod`.
- `shutdownGracePeriod`:
Specifies the total duration that the node should delay the shutdown by. This is the total
grace period for pod termination for both regular and
[critical pods](/docs/tasks/administer-cluster/guaranteed-scheduling-critical-addon-pods/#marking-pod-as-critical).
- `shutdownGracePeriodCriticalPods`:
Specifies the duration used to terminate
[critical pods](/docs/tasks/administer-cluster/guaranteed-scheduling-critical-addon-pods/#marking-pod-as-critical)
during a node shutdown. This value should be less than `shutdownGracePeriod`.
{{< note >}}
@ -122,22 +127,22 @@ Assuming the following custom pod
[priority classes](/docs/concepts/scheduling-eviction/pod-priority-preemption/#priorityclass)
in a cluster,
|Pod priority class name|Pod priority class value|
|-------------------------|------------------------|
|`custom-class-a` | 100000 |
|`custom-class-b` | 10000 |
|`custom-class-c` | 1000 |
|`regular/unset` | 0 |
| Pod priority class name | Pod priority class value |
| ----------------------- | ------------------------ |
| `custom-class-a` | 100000 |
| `custom-class-b` | 10000 |
| `custom-class-c` | 1000 |
| `regular/unset` | 0 |
Within the [kubelet configuration](/docs/reference/config-api/kubelet-config.v1beta1/)
the settings for `shutdownGracePeriodByPodPriority` could look like:
|Pod priority class value|Shutdown period|
|------------------------|---------------|
| 100000 |10 seconds |
| 10000 |180 seconds |
| 1000 |120 seconds |
| 0 |60 seconds |
| Pod priority class value | Shutdown period |
| ------------------------ | --------------- |
| 100000 | 10 seconds |
| 10000 | 180 seconds |
| 1000 | 120 seconds |
| 0 | 60 seconds |
The corresponding kubelet config YAML configuration would be:
@ -154,18 +159,18 @@ shutdownGracePeriodByPodPriority:
```
The above table implies that any pod with `priority` value >= 100000 will get
just 10 seconds to stop, any pod with value >= 10000 and < 100000 will get 180
seconds to stop, any pod with value >= 1000 and < 10000 will get 120 seconds to stop.
Finally, all other pods will get 60 seconds to stop.
just 10 seconds to shut down, any pod with value >= 10000 and < 100000 will get 180
seconds to shut down, any pod with value >= 1000 and < 10000 will get 120 seconds to shut down.
Finally, all other pods will get 60 seconds to shut down.
One doesn't have to specify values corresponding to all of the classes. For
example, you could instead use these settings:
|Pod priority class value|Shutdown period|
|------------------------|---------------|
| 100000 |300 seconds |
| 1000 |120 seconds |
| 0 |60 seconds |
| Pod priority class value | Shutdown period |
| ------------------------ | --------------- |
| 100000 | 300 seconds |
| 1000 | 120 seconds |
| 0 | 60 seconds |
In the above case, the pods with `custom-class-b` will go into the same bucket
as `custom-class-c` for shutdown.
@ -225,14 +230,16 @@ on a different node.
During a non-graceful shutdown, Pods are terminated in the two phases:
1. Force delete the Pods that do not have matching `out-of-service` tolerations.
2. Immediately perform detach volume operation for such pods.
1. Immediately perform detach volume operation for such pods.
{{< note >}}
- Before adding the taint `node.kubernetes.io/out-of-service`, it should be verified
that the node is already in shutdown or power off state (not in the middle of restarting).
- The user is required to manually remove the out-of-service taint after the pods are
moved to a new node and the user has checked that the shutdown node has been
recovered since the user was the one who originally added the taint.
{{< /note >}}
### Forced storage detach on timeout {#storage-force-detach-on-timeout}
@ -256,39 +263,41 @@ its associated
[VolumeAttachment](/docs/reference/kubernetes-api/config-and-storage-resources/volume-attachment-v1/)
deleted.
After this setting has been applied, unhealthy pods still attached to a volumes must be recovered
After this setting has been applied, unhealthy pods still attached to volumes must be recovered
via the [Non-Graceful Node Shutdown](#non-graceful-node-shutdown) procedure mentioned above.
{{< note >}}
- Caution must be taken while using the [Non-Graceful Node Shutdown](#non-graceful-node-shutdown) procedure.
- Deviation from the steps documented above can result in data corruption.
{{< /note >}}
{{< /note >}}
## Windows Graceful node shutdown {#windows-graceful-node-shutdown}
{{< feature-state feature_gate_name="WindowsGracefulNodeShutdown" >}}
The Windows graceful node shutdown feature depends on kubelet running as a Windows service,
it will then have a registered [service control handler](https://learn.microsoft.com/en-us/windows/win32/services/service-control-handler-function)
to delay the presshutdown event with a given duration.
The Windows graceful node shutdown feature depends on kubelet running as a Windows service,
it will then have a registered [service control handler](https://learn.microsoft.com/en-us/windows/win32/services/service-control-handler-function)
to delay the preshutdown event with a given duration.
Windows graceful node shutdown is controlled with the `WindowsGracefulNodeShutdown`
[feature gate](/docs/reference/command-line-tools-reference/feature-gates/)
Windows graceful node shutdown is controlled with the `WindowsGracefulNodeShutdown`
[feature gate](/docs/reference/command-line-tools-reference/feature-gates/)
which is introduced in 1.32 as an alpha feature.
Windows graceful node shutdown can not be cancelled.
If Kubelet is not running as a Windows service, it will not be able to set and monitor
If kubelet is not running as a Windows service, it will not be able to set and monitor
the [Preshutdown](https://learn.microsoft.com/en-us/windows/win32/api/winsvc/ns-winsvc-service_preshutdown_info) event,
the node will have to go through the [Non-Graceful Node Shutdown](#non-graceful-node-shutdown) procedure mentioned above.
In the case where the Windows graceful node shutdown feature is enabled, but the kubelet is not
running as a Windows service, the kubelet will continue running instead of failing. However,
In the case where the Windows graceful node shutdown feature is enabled, but the kubelet is not
running as a Windows service, the kubelet will continue running instead of failing. However,
it will log an error indicating that it needs to be run as a Windows service.
## {{% heading "whatsnext" %}}
Learn more about the following:
* Blog: [Non-Graceful Node Shutdown](/blog/2023/08/16/kubernetes-1-28-non-graceful-node-shutdown-ga/).
* Cluster Architecture: [Nodes](/docs/concepts/architecture/nodes/).
- Blog: [Non-Graceful Node Shutdown](/blog/2023/08/16/kubernetes-1-28-non-graceful-node-shutdown-ga/).
- Cluster Architecture: [Nodes](/docs/concepts/architecture/nodes/).

View File

@ -170,9 +170,12 @@ Once a pod reaches completion (has a `restartPolicy` of `Never` or `OnFailure` a
the series is no longer reported since the scheduler is now free to schedule other pods to run.
The two metrics are called `kube_pod_resource_request` and `kube_pod_resource_limit`.
The metrics are exposed at the HTTP endpoint `/metrics/resources` and require the same
authorization as the `/metrics` endpoint on the scheduler. You must use the
`--show-hidden-metrics-for-version=1.20` flag to expose these alpha stability metrics.
The metrics are exposed at the HTTP endpoint `/metrics/resources`. They require
authorization for the `/metrics/resources` endpoint, usually granted by a
ClusterRole with the `get` verb for the `/metrics/resources` non-resource URL.
On Kubernetes 1.21 you must use the `--show-hidden-metrics-for-version=1.20`
flag to expose these alpha stability metrics.
## Disabling metrics

View File

@ -14,7 +14,7 @@ weight: 90
System component traces record the latency of and relationships between operations in the cluster.
Kubernetes components emit traces using the
[OpenTelemetry Protocol](https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/protocol/otlp.md#opentelemetry-protocol-specification)
[OpenTelemetry Protocol](https://opentelemetry.io/docs/specs/otlp/)
with the gRPC exporter and can be collected and routed to tracing backends using an
[OpenTelemetry Collector](https://github.com/open-telemetry/opentelemetry-collector#-opentelemetry-collector).

View File

@ -114,7 +114,7 @@ resource requests/limits of that type for each container in the Pod.
{{< feature-state feature_gate_name="PodLevelResources" >}}
Starting in Kubernetes 1.32, you can also specify resource requests and limits at
the Pod level. the Pod level. At Pod level, Kubernetes {{< skew currentVersion >}}
the Pod level. At the Pod level, Kubernetes {{< skew currentVersion >}}
only supports resource requests or limits for specific resource types: `cpu` and /
or `memory`. This feature is currently in alpha and with the feature enabled,
Kubernetes allows you to declare an overall resource budget for the Pod, which is
@ -931,4 +931,4 @@ memory limit (and possibly request) for that container.
and its [resource requirements](/docs/reference/kubernetes-api/workload-resources/pod-v1/#resources)
* Read about [project quotas](https://www.linux.org/docs/man8/xfs_quota.html) in XFS
* Read more about the [kube-scheduler configuration reference (v1)](/docs/reference/config-api/kube-scheduler-config.v1/)
* Read more about [Quality of Service classes for Pods](/docs/concepts/workloads/pods/pod-qos/)
* Read more about [Quality of Service classes for Pods](/docs/concepts/workloads/pods/pod-qos/)

View File

@ -66,7 +66,7 @@ Enable the `PodLifecycleSleepActionAllowZero` feature gate if you want to set a
When a Container lifecycle management hook is called,
the Kubernetes management system executes the handler according to the hook action,
`httpGet` , `tcpSocket` and `sleep` are executed by the kubelet process, and `exec` is executed in the container.
`httpGet`, `tcpSocket` ([deprecated](/docs/reference/generated/kubernetes-api/v1.31/#lifecyclehandler-v1-core)) and `sleep` are executed by the kubelet process, and `exec` is executed in the container.
The `PostStart` hook handler call is initiated when a container is created,
meaning the container ENTRYPOINT and the `PostStart` hook are triggered simultaneously.

View File

@ -69,7 +69,7 @@ metadata:
spec:
containers:
- name: demo-container-1
image: registry.k8s.io/pause:2.0
image: registry.k8s.io/pause:3.8
resources:
limits:
hardware-vendor.example/foo: 2

View File

@ -86,7 +86,7 @@ Without indicating the resource type using the `Accept` header, the default
response for the `/api` and `/apis` endpoint is an unaggregated discovery
document.
The [discovery document](https://github.com/kubernetes/kubernetes/blob/release-{{< skew currentVersion >}}/api/discovery/aggregated_v2beta1.json)
The [discovery document](https://github.com/kubernetes/kubernetes/blob/release-{{< skew currentVersion >}}/api/discovery/aggregated_v2.json)
for the built-in resources can be found in the Kubernetes GitHub repository.
This Github document can be used as a reference of the base set of the available resources
if a Kubernetes cluster is not available to query.
@ -192,6 +192,12 @@ request headers as follows:
</tbody>
</table>
{{< warning >}}
The validation rules published as part of OpenAPI schemas may not be complete, and usually aren't.
Additional validation occurs within the API server. If you want precise and complete verification,
a `kubectl apply --dry-run=server` runs all the applicable validation (and also activates admission-time
checks).
{{< /warning >}}
### OpenAPI V3

View File

@ -34,7 +34,7 @@ In cases when objects represent a physical entity, like a Node representing a ph
The server may generate a name when `generateName` is provided instead of `name` in a resource create request.
When `generateName` is used, the provided value is used as a name prefix, which server appends a generated suffix
to. Even though the name is generated, it may conflict with existing names resulting in a HTTP 409 resopnse. This
to. Even though the name is generated, it may conflict with existing names resulting in a HTTP 409 response. This
became far less likely to happen in Kubernetes v1.31 and later, since the server will make up to 8 attempt to generate a
unique name before returning a HTTP 409 response.

View File

@ -9,16 +9,18 @@ weight: 50
<!-- overview -->
In order to support latency-critical and high-throughput workloads, Kubernetes offers a suite of Resource Managers. The managers aim to co-ordinate and optimise node's resources alignment for pods configured with a specific requirement for CPUs, devices, and memory (hugepages) resources.
In order to support latency-critical and high-throughput workloads, Kubernetes offers a suite of
Resource Managers. The managers aim to co-ordinate and optimise the alignment of node's resources for pods
configured with a specific requirement for CPUs, devices, and memory (hugepages) resources.
<!-- body -->
## Hardware topology alignment policies
_Topology Manager_ is a kubelet component that aims to coordinate the set of components that are
responsible for these optimizations. The the overall resource management process is governed using
the policy you specify.
To learn more, read [Control Topology Management Policies on a Node](/docs/tasks/administer-cluster/topology-manager/).
responsible for these optimizations. The overall resource management process is governed using
the policy you specify. To learn more, read
[Control Topology Management Policies on a Node](/docs/tasks/administer-cluster/topology-manager/).
## Policies for assigning CPUs to Pods
@ -29,27 +31,30 @@ hardware (for example, sharing CPUs across multiple Pods) or allocate hardware b
resource (for example, assigning one of more CPUs for a Pod's exclusive use).
By default, the kubelet uses [CFS quota](https://en.wikipedia.org/wiki/Completely_Fair_Scheduler)
to enforce pod CPU limits.  When the node runs many CPU-bound pods, the workload can move to different CPU cores depending on
whether the pod is throttled and which CPU cores are available at scheduling time. Many workloads are not sensitive to this migration and thus
to enforce pod CPU limits.  When the node runs many CPU-bound pods, the workload can move to
different CPU cores depending on whether the pod is throttled and which CPU cores are available
at scheduling time. Many workloads are not sensitive to this migration and thus
work fine without any intervention.
However, in workloads where CPU cache affinity and scheduling latency significantly affect workload performance, the kubelet allows alternative CPU
However, in workloads where CPU cache affinity and scheduling latency significantly affect
workload performance, the kubelet allows alternative CPU
management policies to determine some placement preferences on the node.
This is implemented using the _CPU Manager_ and its policy.
There are two available policies:
- `none`: the `none` policy explicitly enables the existing default CPU
affinity scheme, providing no affinity beyond what the OS scheduler does
automatically.  Limits on CPU usage for
[Guaranteed pods](/docs/concepts/workloads/pods/pod-qos/) and
[Burstable pods](/docs/concepts/workloads/pods/pod-qos/)
are enforced using CFS quota.
affinity scheme, providing no affinity beyond what the OS scheduler does
automatically.  Limits on CPU usage for
[Guaranteed pods](/docs/concepts/workloads/pods/pod-qos/) and
[Burstable pods](/docs/concepts/workloads/pods/pod-qos/)
are enforced using CFS quota.
- `static`: the `static` policy allows containers in `Guaranteed` pods with integer CPU
`requests` access to exclusive CPUs on the node. This exclusivity is enforced
using the [cpuset cgroup controller](https://www.kernel.org/doc/Documentation/cgroup-v2.txt).
`requests` access to exclusive CPUs on the node. This exclusivity is enforced
using the [cpuset cgroup controller](https://www.kernel.org/doc/Documentation/cgroup-v2.txt).
{{< note >}}
System services such as the container runtime and the kubelet itself can continue to run on these exclusive CPUs.  The exclusivity only extends to other pods.
System services such as the container runtime and the kubelet itself can continue to run on
these exclusive CPUs.  The exclusivity only extends to other pods.
{{< /note >}}
CPU Manager doesn't support offlining and onlining of CPUs at runtime.
@ -64,12 +69,12 @@ CPUs reserved by these options are taken, in integer quantity, from the initial
core ID.  This shared pool is the set of CPUs on which any containers in
`BestEffort` and `Burstable` pods run. Containers in `Guaranteed` pods with fractional
CPU `requests` also run on CPUs in the shared pool. Only containers that are
both part of a `Guaranteed` pod and have integer CPU `requests` are assigned
part of a `Guaranteed` pod and have integer CPU `requests` are assigned
exclusive CPUs.
{{< note >}}
The kubelet requires a CPU reservation greater than zero when the static policy is enabled.
This is because zero CPU reservation would allow the shared pool to become empty.
This is because a zero CPU reservation would allow the shared pool to become empty.
{{< /note >}}
As `Guaranteed` pods whose containers fit the requirements for being statically
@ -144,7 +149,6 @@ The pod above runs in the `Guaranteed` QoS class because `requests` are equal to
And the container's resource limit for the CPU resource is an integer greater than
or equal to one. The `nginx` container is granted 2 exclusive CPUs.
```yaml
spec:
containers:
@ -163,7 +167,6 @@ The pod above runs in the `Guaranteed` QoS class because `requests` are equal to
But the container's resource limit for the CPU resource is a fraction. It runs in
the shared pool.
```yaml
spec:
containers:
@ -182,27 +185,38 @@ equal to one. The `nginx` container is granted 2 exclusive CPUs.
#### Static policy options {#cpu-policy-static--options}
The behavior of the static policy can be fine-tuned using the CPU Manager policy options.
The following policy options exist for the static CPU management policy:
{{/* options in alphabetical order */}}
Here are the available policy options for the static CPU management policy,
listed in alphabetical order:
`align-by-socket` (alpha, hidden by default)
: Align CPUs by physical package / socket boundary, rather than logical NUMA boundaries (available since Kubernetes v1.25)
: Align CPUs by physical package / socket boundary, rather than logical NUMA boundaries
(available since Kubernetes v1.25)
`distribute-cpus-across-cores` (alpha, hidden by default)
: Allocate virtual cores, sometimes called hardware threads, across different physical cores (available since Kubernetes v1.31)
: Allocate virtual cores, sometimes called hardware threads, across different physical cores
(available since Kubernetes v1.31)
`distribute-cpus-across-numa` (alpha, hidden by default)
: Spread CPUs across different NUMA domains, aiming for an even balance between the selected domains (available since Kubernetes v1.23)
: Spread CPUs across different NUMA domains, aiming for an even balance between the selected domains
(available since Kubernetes v1.23)
`full-pcpus-only` (beta, visible by default)
: Always allocate full physical cores (available since Kubernetes v1.22)
`strict-cpu-reservation` (alpha, hidden by default)
: Prevent all the pods regardless of their Quality of Service class to run on reserved CPUs (available since Kubernetes v1.32)
: Prevent all the pods regardless of their Quality of Service class to run on reserved CPUs
(available since Kubernetes v1.32)
`prefer-align-cpus-by-uncorecache` (alpha, hidden by default)
: Align CPUs by uncore (Last-Level) cache boundary on a best-effort way (available since Kubernetes v1.32)
: Align CPUs by uncore (Last-Level) cache boundary on a best-effort way
(available since Kubernetes v1.32)
You can toggle groups of options on and off based upon their maturity level
using the following feature gates:
* `CPUManagerPolicyBetaOptions` (default enabled). Disable to hide beta-level options.
* `CPUManagerPolicyAlphaOptions` (default disabled). Enable to show alpha-level options.
You will still have to enable each option using the `cpuManagerPolicyOptions` field in the
kubelet configuration file.
@ -253,10 +267,10 @@ than number of NUMA nodes.
If the `distribute-cpus-across-cores` policy option is specified, the static policy
will attempt to allocate virtual cores (hardware threads) across different physical cores.
By default, the `CPUManager` tends to pack cpus onto as few physical cores as possible,
which can lead to contention among cpus on the same physical core and result
By default, the `CPUManager` tends to pack CPUs onto as few physical cores as possible,
which can lead to contention among CPUs on the same physical core and result
in performance bottlenecks. By enabling the `distribute-cpus-across-cores` policy,
the static policy ensures that cpus are distributed across as many physical cores
the static policy ensures that CPUs are distributed across as many physical cores
as possible, reducing the contention on the same physical core and thereby
improving overall performance. However, it is important to note that this strategy
might be less effective when the system is heavily loaded. Under such conditions,
@ -268,11 +282,11 @@ better performance under high load conditions.
The `reservedSystemCPUs` parameter in [KubeletConfiguration](/docs/reference/config-api/kubelet-config.v1beta1/),
or the deprecated kubelet command line option `--reserved-cpus`, defines an explicit CPU set for OS system daemons
and kubernetes system daemons. More details of this parameter can be found on the
and kubernetes system daemons. More details of this parameter can be found on the
[Explicitly Reserved CPU List](/docs/tasks/administer-cluster/reserve-compute-resources/#explicitly-reserved-cpu-list) page.
By default this isolation is implemented only for guaranteed pods with integer CPU requests not for burstable and best-effort pods
(and guaranteed pods with fractional CPU requests). Admission is only comparing the cpu requests against the allocatable cpus.
Since the cpu limit is higher than the request, the default behaviour allows burstable and best-effort pods to use up the capacity
By default, this isolation is implemented only for guaranteed pods with integer CPU requests not for burstable and best-effort pods
(and guaranteed pods with fractional CPU requests). Admission is only comparing the CPU requests against the allocatable CPUs.
Since the CPU limit is higher than the request, the default behaviour allows burstable and best-effort pods to use up the capacity
of `reservedSystemCPUs` and cause host OS services to starve in real life deployments.
If the `strict-cpu-reservation` policy option is enabled, the static policy will not allow
any workload to use the CPU cores specified in `reservedSystemCPUs`.
@ -280,21 +294,20 @@ any workload to use the CPU cores specified in `reservedSystemCPUs`.
##### `prefer-align-cpus-by-uncorecache`
If the `prefer-align-cpus-by-uncorecache` policy is specified, the static policy
will allocate CPU resources for individual containers such that all CPUs assigned
to a container share the same uncore cache block (also known as the Last-Level Cache
or LLC). By default, the `CPUManager` will tightly pack CPU assignments which can
result in containers being assigned CPUs from multiple uncore caches. This option
enables the `CPUManager` to allocate CPUs in a way that maximizes the efficient use
of the uncore cache. Allocation is performed on a best-effort basis, aiming to
affine as many CPUs as possible within the same uncore cache. If the container's
CPU requirement exceeds the CPU capacity of a single uncore cache, the `CPUManager`
minimizes the number of uncore caches used in order to maintain optimal uncore
cache alignment. Specific workloads can benefit in performance from the reduction
of inter-cache latency and noisy neighbors at the cache level. If the `CPUManager`
cannot align optimally while the node has sufficient resources, the container will
will allocate CPU resources for individual containers such that all CPUs assigned
to a container share the same uncore cache block (also known as the Last-Level Cache
or LLC). By default, the `CPUManager` will tightly pack CPU assignments which can
result in containers being assigned CPUs from multiple uncore caches. This option
enables the `CPUManager` to allocate CPUs in a way that maximizes the efficient use
of the uncore cache. Allocation is performed on a best-effort basis, aiming to
affine as many CPUs as possible within the same uncore cache. If the container's
CPU requirement exceeds the CPU capacity of a single uncore cache, the `CPUManager`
minimizes the number of uncore caches used in order to maintain optimal uncore
cache alignment. Specific workloads can benefit in performance from the reduction
of inter-cache latency and noisy neighbors at the cache level. If the `CPUManager`
cannot align optimally while the node has sufficient resources, the container will
still be admitted using the default packed behavior.
## Memory Management Policies
{{< feature-state feature_gate_name="MemoryManager" >}}

View File

@ -52,7 +52,7 @@ allocation to Pods. This is similar to how you can reserve CPU, memory, or other
resources for use by the operating system and other facilities outside of Pods
and their containers.
PID limiting is a an important sibling to [compute
PID limiting is an important sibling to [compute
resource](/docs/concepts/configuration/manage-resources-containers/) requests
and limits. However, you specify it in a different way: rather than defining a
Pod's resource limit in the `.spec` for a Pod, you configure the limit as a

View File

@ -36,7 +36,7 @@ Resource quotas work like this:
- If creating or updating a resource violates a quota constraint, the request will fail with HTTP
status code `403 FORBIDDEN` with a message explaining the constraint that would have been violated.
- If quota is enabled in a namespace for compute resources like `cpu` and `memory`, users must specify
- If quotas are enabled in a namespace for compute resources like `cpu` and `memory`, users must specify
requests or limits for those values; otherwise, the quota system may reject pod creation. Hint: Use
the `LimitRanger` admission controller to force defaults for pods that make no compute resource requirements.
@ -222,8 +222,8 @@ Resources specified on the quota outside of the allowed set results in a validat
| Scope | Description |
| ----- | ----------- |
| `Terminating` | Match pods where `.spec.activeDeadlineSeconds >= 0` |
| `NotTerminating` | Match pods where `.spec.activeDeadlineSeconds is nil` |
| `Terminating` | Match pods where `.spec.activeDeadlineSeconds` >= `0` |
| `NotTerminating` | Match pods where `.spec.activeDeadlineSeconds` is `nil` |
| `BestEffort` | Match pods that have best effort quality of service. |
| `NotBestEffort` | Match pods that do not have best effort quality of service. |
| `PriorityClass` | Match pods that references the specified [priority class](/docs/concepts/scheduling-eviction/pod-priority-preemption). |
@ -308,60 +308,14 @@ works as follows:
- Pods in the cluster have one of the three priority classes, "low", "medium", "high".
- One quota object is created for each priority.
Save the following YAML to a file `quota.yml`.
Save the following YAML to a file `quota.yaml`.
```yaml
apiVersion: v1
kind: List
items:
- apiVersion: v1
kind: ResourceQuota
metadata:
name: pods-high
spec:
hard:
cpu: "1000"
memory: 200Gi
pods: "10"
scopeSelector:
matchExpressions:
- operator : In
scopeName: PriorityClass
values: ["high"]
- apiVersion: v1
kind: ResourceQuota
metadata:
name: pods-medium
spec:
hard:
cpu: "10"
memory: 20Gi
pods: "10"
scopeSelector:
matchExpressions:
- operator : In
scopeName: PriorityClass
values: ["medium"]
- apiVersion: v1
kind: ResourceQuota
metadata:
name: pods-low
spec:
hard:
cpu: "5"
memory: 10Gi
pods: "10"
scopeSelector:
matchExpressions:
- operator : In
scopeName: PriorityClass
values: ["low"]
```
{{% code_sample file="policy/quota.yaml" %}}
Apply the YAML using `kubectl create`.
```shell
kubectl create -f ./quota.yml
kubectl create -f ./quota.yaml
```
```
@ -405,33 +359,14 @@ pods 0 10
```
Create a pod with priority "high". Save the following YAML to a
file `high-priority-pod.yml`.
file `high-priority-pod.yaml`.
```yaml
apiVersion: v1
kind: Pod
metadata:
name: high-priority
spec:
containers:
- name: high-priority
image: ubuntu
command: ["/bin/sh"]
args: ["-c", "while true; do echo hello; sleep 10;done"]
resources:
requests:
memory: "10Gi"
cpu: "500m"
limits:
memory: "10Gi"
cpu: "500m"
priorityClassName: high
```
{{% code_sample file="policy/high-priority-pod.yaml" %}}
Apply it with `kubectl create`.
```shell
kubectl create -f ./high-priority-pod.yml
kubectl create -f ./high-priority-pod.yaml
```
Verify that "Used" stats for "high" priority quota, `pods-high`, has changed and that
@ -550,9 +485,9 @@ metadata:
spec:
hard:
requests.cpu: "1"
requests.memory: 1Gi
requests.memory: "1Gi"
limits.cpu: "2"
limits.memory: 2Gi
limits.memory: "2Gi"
requests.nvidia.com/gpu: 4
EOF
```

View File

@ -453,7 +453,7 @@ spec:
# tenant is running.
labelSelector:
# We have to have the labelSelector which selects only Pods with the tenant label,
# otherwise this Pod would hate Pods from daemonsets as well, for example,
# otherwise this Pod would have Pods from daemonsets as well, for example,
# which aren't supposed to have the tenant label.
matchExpressions:
- key: tenant

View File

@ -79,6 +79,7 @@ Huawei Cloud | https://www.huaweicloud.com/intl/en-us/securecenter/overallsafety
IBM Cloud | https://www.ibm.com/cloud/security |
Microsoft Azure | https://docs.microsoft.com/en-us/azure/security/azure-security |
Oracle Cloud Infrastructure | https://www.oracle.com/security |
Tencent Cloud | https://www.tencentcloud.com/solutions/data-security-and-information-protection |
VMware vSphere | https://www.vmware.com/security/hardening-guides |
{{< /table >}}

View File

@ -77,7 +77,7 @@ depending on the IP family or families of the Service, with a name of the form
of the Service.
[Headless Services](/docs/concepts/services-networking/service/#headless-services)
(without a cluster IP) Services are also assigned DNS A and/or AAAA records,
(without a cluster IP) are also assigned DNS A and/or AAAA records,
with a name of the form `my-svc.my-namespace.svc.cluster-domain.example`. Unlike normal
Services, this resolves to the set of IPs of all of the Pods selected by the Service.
Clients are expected to consume the set or else use standard round-robin

View File

@ -632,7 +632,7 @@ The access modes are:
`ReadWriteOnce`
: the volume can be mounted as read-write by a single node. ReadWriteOnce access
mode still can allow multiple pods to access the volume when the pods are
mode still can allow multiple pods to access (read from or write to) that volume when the pods are
running on the same node. For single pod access, please see ReadWriteOncePod.
`ReadOnlyMany`
@ -818,6 +818,14 @@ storage with specific access modes.
Claims use [the same convention as volumes](#volume-mode) to indicate the
consumption of the volume as either a filesystem or block device.
### Volume Name
Claims can use the `volumeName` field to explicitly bind to a specific PersistentVolume. You can also leave
`volumeName` unset, indicating that you'd like Kubernetes to set up a new PersistentVolume
that matches the claim.
If the specified PV is already bound to another PVC, the binding will be stuck
in a pending state.
### Resources
Claims, like Pods, can request specific quantities of a resource. In this case,

View File

@ -67,6 +67,22 @@ deletionPolicy: Delete
parameters:
```
If multiple CSI drivers exist, a default VolumeSnapshotClass can be specified
for each of them.
### VolumeSnapshotClass dependencies
When you create a VolumeSnapshot without specifying a VolumeSnapshotClass, Kubernetes
automatically selects a default VolumeSnapshotClass that has a CSI driver matching
the CSI driver of the PVCs StorageClass.
This behavior allows multiple default VolumeSnapshotClass objects to coexist in a cluster, as long as
each one is associated with a unique CSI driver.
Always ensure that there is only one default VolumeSnapshotClass for each CSI driver. If
multiple default VolumeSnapshotClass objects are created using the same CSI driver,
a VolumeSnapshot creation will fail because Kubernetes cannot determine which one to use.
### Driver
Volume snapshot classes have a driver that determines what CSI volume plugin is

View File

@ -14,21 +14,47 @@ weight: 10
<!-- overview -->
On-disk files in a container are ephemeral, which presents some problems for
Kubernetes _volumes_ provide a way for containers in a {{< glossary_tooltip text="pods" term_id="pod" >}}
to access and share data via the filesystem. There are different kinds of volume that you can use for different purposes,
such as:
- populating a configuration file based on a {{< glossary_tooltip text="ConfigMap" term_id="configmap" >}}
or a {{< glossary_tooltip text="Secret" term_id="secret" >}}
- providing some temporary scratch space for a pod
- sharing a filesystem between two different containers in the same pod
- sharing a filesystem between two different pods (even if those Pods run on different nodes)
- durably storing data so that it stays available even if the Pod restarts or is replaced
- passing configuration information to an app running in a container, based on details of the Pod
the container is in
(for example: telling a {{< glossary_tooltip text="sidecar container" term_id="sidecar-container" >}}
what namespace the Pod is running in)
- providing read-only access to data in a different container image
Data sharing can be between different local processes within a container, or between different containers,
or between Pods.
## Why volumes are important
- **Data persistence:** On-disk files in a container are ephemeral, which presents some problems for
non-trivial applications when running in containers. One problem occurs when
a container crashes or is stopped. Container state is not saved so all of the
a container crashes or is stopped, the container state is not saved so all of the
files that were created or modified during the lifetime of the container are lost.
During a crash, kubelet restarts the container with a clean state.
Another problem occurs when multiple containers are running in a `Pod` and
During a crash, kubelet restarts the container with a clean state.
- **Shared storage:** Another problem occurs when multiple containers are running in a `Pod` and
need to share files. It can be challenging to setup
and access a shared filesystem across all of the containers.
The Kubernetes {{< glossary_tooltip text="volume" term_id="volume" >}} abstraction
solves both of these problems.
Familiarity with [Pods](/docs/concepts/workloads/pods/) is suggested.
The Kubernetes {{< glossary_tooltip text="volume" term_id="volume" >}} abstraction
can help you to solve both of these problems.
Before you learn about volumes, PersistentVolumes and PersistentVolumeClaims, you should read up
about {{< glossary_tooltip term_id="Pod" text="Pods" >}} and make sure that you understand how
Kubernetes uses Pods to run containers.
<!-- body -->
## Background
## How volumes work
Kubernetes supports many types of volumes. A {{< glossary_tooltip term_id="pod" text="Pod" >}}
can use any number of volume types simultaneously.
@ -45,14 +71,15 @@ volume type used.
To use a volume, specify the volumes to provide for the Pod in `.spec.volumes`
and declare where to mount those volumes into containers in `.spec.containers[*].volumeMounts`.
A process in a container sees a filesystem view composed from the initial contents of
When a pod is launched, a process in the container sees a filesystem view composed from the initial contents of
the {{< glossary_tooltip text="container image" term_id="image" >}}, plus volumes
(if defined) mounted inside the container.
The process sees a root filesystem that initially matches the contents of the container
image.
Any writes to within that filesystem hierarchy, if allowed, affect what that process views
when it performs a subsequent filesystem access.
Volumes mount at the [specified paths](#using-subpath) within
Volumes are mounted at [specified paths](#using-subpath) within
the image.
For each container defined within a Pod, you must independently specify where
to mount each volume that the container uses.
@ -179,11 +206,11 @@ spec:
name: log-config
items:
- key: log_level
path: log_level
path: log_level.conf
```
The `log-config` ConfigMap is mounted as a volume, and all contents stored in
its `log_level` entry are mounted into the Pod at path `/etc/config/log_level`.
its `log_level` entry are mounted into the Pod at path `/etc/config/log_level.conf`.
Note that this path is derived from the volume's `mountPath` and the `path`
keyed with `log_level`.
@ -806,18 +833,11 @@ before using it in the Pod.
For more details, see the [Portworx volume](https://github.com/kubernetes/examples/tree/master/staging/volumes/portworx/README.md) examples.
#### Portworx CSI migration
{{< feature-state for_k8s_version="v1.25" state="beta" >}}
{{< feature-state feature_gate_name="CSIMigrationPortworx" >}}
By default, Kubernetes {{% skew currentVersion %}} attempts to migrate legacy
Portworx volumes to use CSI. (CSI migration for Portworx has been available since
Kubernetes v1.23, but was only turned on by default since the v1.31 release).
If you want to disable automatic migration, you can set the `CSIMigrationPortworx`
[feature gate](/docs/reference/command-line-tools-reference/feature-gates/)
to `false`; you need to make that change for the kube-controller-manager **and** on
every relevant kubelet.
It redirects all plugin operations from the existing in-tree plugin to the
`pxd.portworx.com` Container Storage Interface (CSI) Driver.
In Kubernetes {{% skew currentVersion %}}, all operations for the in-tree
Portworx volumes are redirected to the `pxd.portworx.com`
Container Storage Interface (CSI) Driver by default.
[Portworx CSI Driver](https://docs.portworx.com/portworx-enterprise/operations/operate-kubernetes/storage-operations/csi)
must be installed on the cluster.

View File

@ -66,12 +66,12 @@ In this example:
All of the requirements, from both `matchLabels` and `matchExpressions`, must be satisfied in order to match.
{{< /note >}}
* The `template` field contains the following sub-fields:
* The `.spec.template` field contains the following sub-fields:
* The Pods are labeled `app: nginx`using the `.metadata.labels` field.
* The Pod template's specification, or `.template.spec` field, indicates that
the Pods run one container, `nginx`, which runs the `nginx`
[Docker Hub](https://hub.docker.com/) image at version 1.14.2.
* Create one container and name it `nginx` using the `.spec.template.spec.containers[0].name` field.
* The Pod template's specification, or `.spec` field, indicates that
the Pods run one container, `nginx`, which runs the `nginx`
[Docker Hub](https://hub.docker.com/) image at version 1.14.2.
* Create one container and name it `nginx` using the `.spec.containers[0].name` field.
Before you begin, make sure your Kubernetes cluster is up and running.
Follow the steps given below to create the above Deployment:

View File

@ -407,7 +407,7 @@ in the Pod Lifecycle documentation.
To understand the context for why Kubernetes wraps a common Pod API in other resources (such as {{< glossary_tooltip text="StatefulSets" term_id="statefulset" >}} or {{< glossary_tooltip text="Deployments" term_id="deployment" >}}), you can read about the prior art, including:
* [Aurora](https://aurora.apache.org/documentation/latest/reference/configuration/#job-schema)
* [Borg](https://research.google.com/pubs/pub43438.html)
* [Borg](https://research.google/pubs/large-scale-cluster-management-at-google-with-borg/)
* [Marathon](https://github.com/d2iq-archive/marathon)
* [Omega](https://research.google/pubs/pub41684/)
* [Tupperware](https://engineering.fb.com/data-center-engineering/tupperware/).

View File

@ -97,9 +97,9 @@ maintain sidecar containers without affecting the primary application.
Sidecar containers share the same network and storage namespaces with the primary
container. This co-location allows them to interact closely and share resources.
From Kubernetes perspective, sidecars graceful termination is less important.
When other containers took all alloted graceful termination time, sidecar containers
will receive the `SIGTERM` following with `SIGKILL` faster than may be expected.
From a Kubernetes perspective, the sidecar container's graceful termination is less important.
When other containers take all allotted graceful termination time, the sidecar containers
will receive the `SIGTERM` signal, followed by the `SIGKILL` signal, before they have time to terminate gracefully.
So exit codes different from `0` (`0` indicates successful exit), for sidecar containers are normal
on Pod termination and should be generally ignored by the external tooling.
@ -121,6 +121,9 @@ Init containers stop before the main containers start up, so init containers can
exchange messages with the app container in a Pod. Any data passing is one-way
(for example, an init container can put information inside an `emptyDir` volume).
Changing the image of a sidecar container will not cause the Pod to restart, but will
trigger a container restart.
## Resource sharing within containers
{{< comment >}}

View File

@ -177,6 +177,8 @@ to the `kubelet` user:
configuration.
* The subordinate ID count must be a multiple of 65536
(for Kubernetes {{< skew currentVersion >}} the subordinate ID count for each Pod is hard-coded
to 65536).
* The subordinate ID count must be at least `65536 x <maxPods>` where `<maxPods>`
is the maximum number of pods that can run on the node.
@ -198,7 +200,9 @@ these entries for the `kubelet` user:
# name:firstID:count of IDs
# where
# - firstID is 65536 (the minimum value possible)
# - count of IDs is 110 (default limit for number of) * 65536
# - count of IDs is 110 * 65536
# (110 is the default limit for number of pods on the node)
kubelet:65536:7208960
```

View File

@ -26,7 +26,8 @@ conventions and process used during a release by both groups.
## For documentation contributors
In general, documentation contributors don't write content from scratch for a release.
Instead, they work with the SIG creating a new feature to refine the draft documentation and make it release ready.
Instead, they work with the SIG creating a new feature to refine the draft documentation
and make it release ready.
After you've chosen a feature to document or assist, ask about it in the `#sig-docs`
Slack channel, in a weekly SIG Docs meeting, or directly on the PR filed by the
@ -106,11 +107,9 @@ deadlines.
issue with a link to the PR to notify the docs person managing this release that
the feature docs are coming and should be tracked for the release.
If your feature does not need
any documentation changes, make sure the sig-release team knows this, by
mentioning it in the `#sig-release` Slack channel. If the feature does need
documentation but the PR is not created, the feature may be removed from the
milestone.
If your feature does not need any documentation changes, make sure the sig-release team knows this,
by mentioning it in the `#sig-release` Slack channel. If the feature does need
documentation but the PR is not created, the feature may be removed from the milestone.
### PR ready for review
@ -133,8 +132,7 @@ content is not received, the feature may be removed from the milestone.
If your feature is an Alpha or Beta feature and is behind a feature gate,
you need a feature gate file for it inside
`content/en/docs/reference/command-line-tools-reference/feature-gates/`.
The name of the file should be the feature gate, converted from `UpperCamelCase`
to `kebab-case`, with `.md` as the suffix.
The name of the file should be the name of the feature gate with `.md` as the suffix.
You can look at other files already in the same directory for a hint about what yours
should look like. Usually a single paragraph is enough; for longer explanations,
add documentation elsewhere and link to that.
@ -153,9 +151,8 @@ stages:
toVersion: <Version> # (Optional) The version until which the feature gate is available
```
With net new feature gates, a separate
description of the feature gate is also required; create a new Markdown file
inside `content/en/docs/reference/command-line-tools-reference/feature-gates/`
With net new feature gates, a separate description of the feature gate is also required;
create a new Markdown file inside `content/en/docs/reference/command-line-tools-reference/feature-gates/`
(use other files as a template).
When you change a feature gate from disabled-by-default to enabled-by-default,

View File

@ -325,7 +325,7 @@ the `.svg` image file and add a caption.
For more details on diagram captions, see [How to use captions](#how-to-use-captions).
{{< note >}}
The `{{</* figure */>}}` shortcode is the preferred method for adding `.svg` image files
The figure shortcode is the preferred method for adding `.svg` image files
to your documentation. You can also use the standard markdown image syntax like so:
`![my boxnet diagram](static/images/boxnet.svg)`.
And you will need to add a caption below the diagram.
@ -473,7 +473,7 @@ Figure 6. Pod Topology Spread Constraints.
Code block:
```
```text
graph TB
subgraph "zoneB"
n3(Node3)
@ -528,7 +528,7 @@ Figure 7. Ingress
Code block:
```mermaid
```text
graph LR;
client([client])-. Ingress-managed <br> load balancer .->ingress[Ingress];
ingress-->|routing rule|service[Service];
@ -556,7 +556,7 @@ K8s components to start a container.
Code block:
```
```text
%%{init:{"theme":"neutral"}}%%
sequenceDiagram
actor me
@ -592,7 +592,7 @@ In the code for
[figure 7](https://mermaid-js.github.io/mermaid-live-editor/edit/#eyJjb2RlIjoiZ3JhcGggIExSXG4gIGNsaWVudChbY2xpZW50XSktLiBJbmdyZXNzLW1hbmFnZWQgPGJyPiBsb2FkIGJhbGFuY2VyIC4tPmluZ3Jlc3NbSW5ncmVzc107XG4gIGluZ3Jlc3MtLT58cm91dGluZyBydWxlfHNlcnZpY2VbU2VydmljZV07XG4gIHN1YmdyYXBoIGNsdXN0ZXJcbiAgaW5ncmVzcztcbiAgc2VydmljZS0tPnBvZDFbUG9kXTtcbiAgc2VydmljZS0tPnBvZDJbUG9kXTtcbiAgZW5kXG4gIGNsYXNzRGVmIHBsYWluIGZpbGw6I2RkZCxzdHJva2U6I2ZmZixzdHJva2Utd2lkdGg6NHB4LGNvbG9yOiMwMDA7XG4gIGNsYXNzRGVmIGs4cyBmaWxsOiMzMjZjZTUsc3Ryb2tlOiNmZmYsc3Ryb2tlLXdpZHRoOjRweCxjb2xvcjojZmZmO1xuICBjbGFzc0RlZiBjbHVzdGVyIGZpbGw6I2ZmZixzdHJva2U6I2JiYixzdHJva2Utd2lkdGg6MnB4LGNvbG9yOiMzMjZjZTU7XG4gIGNsYXNzIGluZ3Jlc3Msc2VydmljZSxwb2QxLHBvZDIgazhzO1xuICBjbGFzcyBjbGllbnQgcGxhaW47XG4gIGNsYXNzIGNsdXN0ZXIgY2x1c3RlcjtcbiIsIm1lcm1haWQiOiJ7XG4gIFwidGhlbWVcIjogXCJkZWZhdWx0XCJcbn0iLCJ1cGRhdGVFZGl0b3IiOmZhbHNlLCJhdXRvU3luYyI6dHJ1ZSwidXBkYXRlRGlhZ3JhbSI6dHJ1ZX0),
you can see examples of both.
```
```text
classDef k8s fill:#326ce5,stroke:#fff,stroke-width:4px,color:#fff; // defines style for the k8s class
class ingress,service,pod1,pod2 k8s; // k8s class is applied to elements ingress, service, pod1 and pod2.
```
@ -658,7 +658,7 @@ You should pass the `src`, `alt`, `class` and `caption` values into the
`diagram-large`, `diagram-medium` and `diagram-small` classes.
{{< note >}}
Diagrams created using the `Inline` method don't use the `{{</* figure */>}}`
Diagrams created using the `Inline` method don't use the figure
shortcode. The Mermaid code defines how the diagram will render on your page.
{{< /note >}}

View File

@ -96,69 +96,70 @@ and uid when inspecting a JWT.
### Verifying and inspecting private claims
The `TokenReview` API can be used to verify and extract private claims from a token:
The TokenReview API can be used to verify and extract private claims from a token:
1. First, assume you have a pod named `test-pod` and a service account named `my-sa`.
2. Create a token that is bound to this Pod:
1. Create a token that is bound to this Pod:
```shell
kubectl create token my-sa --bound-object-kind="Pod" --bound-object-name="test-pod"
```
```shell
kubectl create token my-sa --bound-object-kind="Pod" --bound-object-name="test-pod"
```
3. Copy this token into a new file named `tokenreview.yaml`:
1. Copy this token into a new file named `tokenreview.yaml`:
```yaml
apiVersion: authentication.k8s.io/v1
kind: TokenReview
spec:
token: <token from step 2>
```
```yaml
apiVersion: authentication.k8s.io/v1
kind: TokenReview
spec:
token: <token from step 2>
```
4. Submit this resource to the apiserver for review:
1. Submit this resource to the apiserver for review:
```shell
kubectl create -o yaml -f tokenreview.yaml # we use '-o yaml' so we can inspect the output
```
```shell
# use '-o yaml' to inspect the output
kubectl create -o yaml -f tokenreview.yaml
```
You should see an output like below:
You should see an output like below:
```yaml
apiVersion: authentication.k8s.io/v1
kind: TokenReview
metadata:
creationTimestamp: null
spec:
token: <token>
status:
audiences:
- https://kubernetes.default.svc.cluster.local
authenticated: true
user:
extra:
authentication.kubernetes.io/credential-id:
- JTI=7ee52be0-9045-4653-aa5e-0da57b8dccdc
authentication.kubernetes.io/node-name:
- kind-control-plane
authentication.kubernetes.io/node-uid:
- 497e9d9a-47aa-4930-b0f6-9f2fb574c8c6
authentication.kubernetes.io/pod-name:
- test-pod
authentication.kubernetes.io/pod-uid:
- e87dbbd6-3d7e-45db-aafb-72b24627dff5
groups:
- system:serviceaccounts
- system:serviceaccounts:default
- system:authenticated
uid: f8b4161b-2e2b-11e9-86b7-2afc33b31a7e
username: system:serviceaccount:default:my-sa
```
```yaml
apiVersion: authentication.k8s.io/v1
kind: TokenReview
metadata:
creationTimestamp: null
spec:
token: <token>
status:
audiences:
- https://kubernetes.default.svc.cluster.local
authenticated: true
user:
extra:
authentication.kubernetes.io/credential-id:
- JTI=7ee52be0-9045-4653-aa5e-0da57b8dccdc
authentication.kubernetes.io/node-name:
- kind-control-plane
authentication.kubernetes.io/node-uid:
- 497e9d9a-47aa-4930-b0f6-9f2fb574c8c6
authentication.kubernetes.io/pod-name:
- test-pod
authentication.kubernetes.io/pod-uid:
- e87dbbd6-3d7e-45db-aafb-72b24627dff5
groups:
- system:serviceaccounts
- system:serviceaccounts:default
- system:authenticated
uid: f8b4161b-2e2b-11e9-86b7-2afc33b31a7e
username: system:serviceaccount:default:my-sa
```
{{< note >}}
Despite using `kubectl create -f` to create this resource, and defining it similar to
other resource types in Kubernetes, TokenReview is a special type and the kube-apiserver
does not actually persist the TokenReview object into etcd.
Hence `kubectl get tokenreview` is not a valid command.
{{< /note >}}
{{< note >}}
Despite using `kubectl create -f` to create this resource, and defining it similar to
other resource types in Kubernetes, TokenReview is a special type and the kube-apiserver
does not actually persist the TokenReview object into etcd.
Hence `kubectl get tokenreview` is not a valid command.
{{< /note >}}
#### Schema for service account private claims
@ -229,7 +230,7 @@ For more information on JWTs and their structure, see the [JSON Web Token RFC](h
{{< feature-state feature_gate_name="BoundServiceAccountTokenVolume" >}}
By default, the Kubernetes control plane (specifically, the
[ServiceAccount admission controller](#serviceaccount-admission-controller))
[ServiceAccount admission controller](#serviceaccount-admission-controller))
adds a [projected volume](/docs/concepts/storage/projected-volumes/) to Pods,
and this volume includes a token for Kubernetes API access.
@ -292,9 +293,11 @@ and are mounted into Pods using a projected volume.
The tokens obtained using this method have bounded lifetimes, and are automatically
invalidated when the Pod they are mounted into is deleted.
You can still [manually create](/docs/tasks/configure-pod-container/configure-service-account/#manually-create-an-api-token-for-a-serviceaccount) a Secret to hold a service account token; for example, if you need a token that never expires.
You can still [manually create](/docs/tasks/configure-pod-container/configure-service-account/#manually-create-an-api-token-for-a-serviceaccount)
a Secret to hold a service account token; for example, if you need a token that never expires.
Once you manually create a Secret and link it to a ServiceAccount, the Kubernetes control plane automatically populates the token into that Secret.
Once you manually create a Secret and link it to a ServiceAccount,
the Kubernetes control plane automatically populates the token into that Secret.
{{< note >}}
Although the manual mechanism for creating a long-lived ServiceAccount token exists,
@ -318,7 +321,7 @@ metadata:
name: build-robot
namespace: default
secrets:
- name: build-robot-secret # usually NOT present for a manually generated token
- name: build-robot-secret # usually NOT present for a manually generated token
```
Beginning from version 1.29, legacy ServiceAccount tokens that were generated
@ -387,7 +390,7 @@ verify the tokens during authentication.
{{< feature-state feature_gate_name="ExternalServiceAccountTokenSigner" >}}
An alternate setup to setting `--service-account-private-key-file` and `--service-account-key-file` flags is
to configure an external JWT signer for [external ServiceAccount token signing and key management](#external-serviceaccount-token-signing-and-key-management).
to configure an external JWT signer for [external ServiceAccount token signing and key management](#external-serviceaccount-token-signing-and-key-management).
Note that these setups are mutually exclusive and cannot be configured together.
### ServiceAccount admission controller
@ -512,7 +515,7 @@ That manifest snippet defines a projected volume that combines information from
either when the pod is deleted or after a defined lifespan (by default, that is 1 hour).
The token is bound to the specific Pod and has the kube-apiserver as its audience.
1. A `configMap` source. The ConfigMap contains a bundle of certificate authority data. Pods can use these
certificates to make sure that they are connecting to your cluster's kube-apiserver (and not to middlebox
certificates to make sure that they are connecting to your cluster's kube-apiserver (and not to a middlebox
or an accidentally misconfigured peer).
1. A `downwardAPI` source. This `downwardAPI` volume makes the name of the namespace containing the Pod available
to application code running inside the Pod.
@ -620,15 +623,21 @@ kubectl -n examplens delete secret/example-automated-thing-token-zyxwv
{{< feature-state feature_gate_name="ExternalServiceAccountTokenSigner" >}}
The kube-apiserver can be configured to use external signer for token signing and token verifying key management.
This feature enables kubernetes distributions to integrate with key management solutions of their choice (eg: HSMs, cloud KMSes) for service account credential signing and verification.
To configure kube-apiserver to use external-jwt-signer set the `--service-account-signing-endpoint` flag to the location of a Unix domain socket (UDS) on a filesystem, or be prefixed with an @ symbol and name a UDS in the abstract socket namespace.
At the configured UDS, shall be an RPC server which implements [ExternalJWTSigner](https://github.com/kubernetes/kubernetes/blob/release-1.32/staging/src/k8s.io/externaljwt/apis/v1alpha1/api.proto).
This feature enables kubernetes distributions to integrate with key management solutions of their choice
(for example, HSMs, cloud KMSes) for service account credential signing and verification.
To configure kube-apiserver to use external-jwt-signer set the `--service-account-signing-endpoint` flag
to the location of a Unix domain socket (UDS) on a filesystem, or be prefixed with an @ symbol and name
a UDS in the abstract socket namespace. At the configured UDS, shall be an RPC server which implements
[ExternalJWTSigner](https://github.com/kubernetes/kubernetes/blob/release-1.32/staging/src/k8s.io/externaljwt/apis/v1alpha1/api.proto).
The external-jwt-signer must be healthy and be ready to serve supported service account keys for the kube-apiserver to start.
Check out [KEP-740](https://github.com/kubernetes/enhancements/tree/master/keps/sig-auth/740-service-account-external-signing) for more details on ExternalJWTSigner.
Check out [KEP-740](https://github.com/kubernetes/enhancements/tree/master/keps/sig-auth/740-service-account-external-signing)
for more details on ExternalJWTSigner.
{{< note >}}
The kube-apiserver flags `--service-account-key-file` and `--service-account-signing-key-file` will continue to be used for reading from files unless `--service-account-signing-endpoint` is set; they are mutually exclusive ways of supporting JWT signing and authentication.
The kube-apiserver flags `--service-account-key-file` and `--service-account-signing-key-file` will continue
to be used for reading from files unless `--service-account-signing-endpoint` is set; they are mutually
exclusive ways of supporting JWT signing and authentication.
{{< /note >}}
## Clean up

View File

@ -32,15 +32,15 @@ A policy is generally made up of three resources:
- The `ValidatingAdmissionPolicy` describes the abstract logic of a policy
(think: "this policy makes sure a particular label is set to a particular value").
- A `ValidatingAdmissionPolicyBinding` links the above resources together and provides scoping.
If you only want to require an `owner` label to be set for `Pods`, the binding is where you would
specify this restriction.
- A parameter resource provides information to a ValidatingAdmissionPolicy to make it a concrete
statement (think "the `owner` label must be set to something that ends in `.company.com`").
A native type such as ConfigMap or a CRD defines the schema of a parameter resource.
`ValidatingAdmissionPolicy` objects specify what Kind they are expecting for their parameter resource.
- A `ValidatingAdmissionPolicyBinding` links the above resources together and provides scoping.
If you only want to require an `owner` label to be set for `Pods`, the binding is where you would
specify this restriction.
At least a `ValidatingAdmissionPolicy` and a corresponding `ValidatingAdmissionPolicyBinding`
must be defined for a policy to have an effect.
@ -373,7 +373,7 @@ When an API request is validated with this admission policy, the resulting audit
In this example the annotation will only be included if the `spec.replicas` of the Deployment is more than
50, otherwise the CEL expression evaluates to null and the annotation will not be included.
Note that audit annotation keys are prefixed by the name of the `ValidatingAdmissionWebhook` and a `/`. If
Note that audit annotation keys are prefixed by the name of the `ValidatingAdmissionPolicy` and a `/`. If
another admission controller, such as an admission webhook, uses the exact same audit annotation key, the
value of the first admission controller to include the audit annotation will be included in the audit
event and all other values will be ignored.

View File

@ -0,0 +1,16 @@
---
title: APIServingWithRoute
content_type: feature_gate
_build:
list: never
render: false
stages:
- stage: alpha
defaultValue: false
fromVersion: "1.30"
---
This feature gate enables an API server performance improvement:
the API server can use separate goroutines (lightweight threads managed by the Go runtime)
to serve [**watch**](/docs/reference/using-api/api-concepts/#efficient-detection-of-changes)
requests.

View File

@ -1,5 +1,4 @@
---
# Removed from Kubernetes
title: Accelerators
content_type: feature_gate
@ -22,3 +21,4 @@ Provided an early form of plugin to enable Nvidia GPU support when using
Docker Engine; no longer available. See
[Device Plugins](/docs/concepts/extend-kubernetes/compute-storage-net/device-plugins/) for
an alternative.

View File

@ -0,0 +1,16 @@
---
title: BtreeWatchCache
content_type: feature_gate
_build:
list: never
render: false
stages:
- stage: beta
defaultValue: true
fromVersion: "1.32"
---
When enabled, the API server will replace the legacy HashMap-based _watch cache_
with a BTree-based implementation. This replacement may bring performance improvements.

Some files were not shown because too many files have changed in this diff Show More