Merge remote-tracking branch 'upstream/main' into dev-1.32

Merged main into dev-1.32 to keep in sync
pull/48344/head
michellengnx 2024-10-14 14:52:37 -04:00
commit 6d8076a746
68 changed files with 3168 additions and 1214 deletions

View File

@ -1,5 +1,6 @@
@import "reset";
// header
$full-width-paddingX: 20px;
@ -35,6 +36,7 @@ $vendor-strip-font-size: 16px;
$video-section-height: 200px;
@import "size";
@import "documentation";
////////////////////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////////////////////////
@ -898,34 +900,3 @@ footer.row {
margin-right: initial;
}
/* DOCUMENTATION */
// nav-tabs and tab-content
.nav-tabs {
border-bottom: none !important;
.nav-item {
margin-bottom: 0;
}
}
.td-content .tab-content .highlight {
margin: 0;
}
//Table Content
.tab-content table{
border-collapse: separate;
border-spacing: 6px;
}
.tab-pane {
border-radius: 0.25rem;
padding: 0 16px 16px;
overflow: auto;
border: 1px solid #dee2e6;
&:first-of-type.active {
border-top-left-radius: 0;
}
}

View File

@ -1,5 +1,10 @@
$announcement-size-adjustment: 8px;
// Although most of the site is documentation, that's not the only thing
// on the site. This file has customizations not specific to documentation.
// For documentation styles, see _documentation.scss
/* GLOBAL */
.td-main {
.row {
@ -393,433 +398,8 @@ footer {
}
}
/* DOCS */
table tr.cve-status-open, table tr.cve-status-unknown {
> td.cve-item-summary {
font-weight: bold;
}
}
.launch-cards {
padding: 0;
display: grid;
grid-template-columns: repeat(3, 1fr);
row-gap: 1em;
.launch-card {
display: flex;
padding: 0 30px 0 0;
.card-content{
width: fit-content;
display: flex;
flex-direction: column;
margin: 0;
row-gap: 1em;
h2 {
font-size: 1.75em;
padding: 0.5em 0;
margin: 0;
a {
display: none;
}
}
p {
margin: 0;
}
ul {
list-style: none;
height: fit-content;
line-height: 1.6;
padding: 0;
margin-block-end: auto;
}
br {
display: none;
}
button {
height: min-content;
width: auto;
padding: .5em 1em;
cursor: pointer;
box-sizing: border-box;
}
}
}
@media only screen and (max-width: 1000px) {
grid-template-columns: 1fr;
.launch-card {
width: 100%;
}
}
}
// table of contents
.td-toc {
padding-top: 1.5rem !important;
top: 5rem !important;
@supports (position: sticky) {
position: sticky !important;
height: calc(100vh - 10rem);
overflow-y: auto;
}
#TableOfContents {
padding-top: 1rem;
}
}
main {
/* SCSS Related to the Metrics list */
div.metric:nth-of-type(odd) { // Look & Feel , Aesthetics
background-color: $light-grey;
}
div.metrics {
.metric {
div:empty{
display: none;
}
display: flex;
flex-direction: column;
flex-wrap: wrap;
gap: .75em;
padding:.75em .75em .75em .75em;
.metric_name{
font-size: large;
font-weight: bold;
word-break: break-word;
}
label{
font-weight: bold;
margin-right: .5em;
}
ul {
li:empty{
display: none;
}
display: flex;
flex-direction: column;
gap: .75em;
flex-wrap: wrap;
li.metric_labels_varying{
span{
display: inline-block;
background-color: rgb(240, 239, 239);
padding: 0 0.5em;
margin-right: .35em;
font-family: monospace;
border: 1px solid rgb(230 , 230 , 230);
border-radius: 5%;
margin-bottom: .35em;
}
}
}
}
}
}
// blockquotes and callouts
body {
.alert {
// Override Docsy styles
padding: 0.4rem 0.4rem 0.4rem 1rem;
border-top: 1px solid #eee;
border-bottom: 1px solid #eee;
border-right: 1px solid #eee;
border-radius: 0.25em;
border-left-width: 0.5em; // fallback in case calc() is missing
background: #fff;
color: #000;
margin-top: 0.5em;
margin-bottom: 0.5em;
}
// Set minimum width and radius for alert color
.alert {
border-left-width: calc(max(0.5em, 4px));
border-top-left-radius: calc(max(0.5em, 4px));
border-bottom-left-radius: calc(max(0.5em, 4px));
padding-top: 0.75rem;
}
.alert.alert-caution {
border-left-color: #f0ad4e;
}
.alert.alert-info {
border-left-color: #428bca;
h4, h4.alert-heading {
color: #000;
display: block;
float: initial;
font-size: 1rem;
padding: 0;
padding-right: 0.5rem;
margin: 0;
line-height: 1.5;
font-weight: bolder;
}
}
.alert.alert-caution {
border-left-color: #f0ad4e;
h4, h4.alert-heading {
font-size: 1em;
font-weight: bold;
}
}
.alert.alert-warning {
border-left-color: #d9534f;
}
.alert.third-party-content {
border-left-color: #444;
}
h1:first-of-type + .alert.callout {
margin-top: 1.5em;
}
div.feature-state-notice {
background-color: #daeaf9;
border-radius: 0.75rem;
padding: 1rem;
margin-bottom: 1em;
font-size: 1.2em;
> .feature-state-name::before {
content: '';
color: #326ce5; // Kubernetes blue
}
> .feature-state-name {
display: inline-block;
font-size: 0.95em;
font-weight: bold;
color: #000;
background-color: #daeaf9;
}
code {
color: #000;
font-size: 1em;
background-color: #daeaf9;
}
margin-right: 2em;
max-width: 80%;
}
}
// Special color for third party content disclaimers
.alert.third-party-content { border-left-color: #222 };
// Highlight disclaimer when targeted as a fragment
#third-party-content-disclaimer {
color: #000;
background: #f8f9fa;
transition: all 0.5s ease;
}
@keyframes disclaimer-highlight {
from { background: #f8f922; color: #000; }
50% { background: #f8f944; color: #000; }
to { background: #f8f9cb; color: #000; }
}
#third-party-content-disclaimer:target {
color: #000;
animation: disclaimer-highlight 1.25s ease;
background: #f8f9cb;
}
.deprecation-warning, .pageinfo.deprecation-warning {
padding: clamp(10px, 2vmin, 20px);
margin: clamp(10px, 1vh, 20px) 0;
background-color: #faf5b6;
color: #000;
}
.deprecation-warning.outdated-blog, .pageinfo.deprecation-warning.outdated-blog {
background-color: $blue;
color: $white;
}
body.td-home .deprecation-warning, body.td-blog .deprecation-warning, body.td-documentation .deprecation-warning {
border-radius: 3px;
}
.deprecation-warning p:only-child {
margin-bottom: 0;
}
.td-documentation .td-content > .highlight {
max-width: initial;
width: 100%;
}
body.td-home #deprecation-warning {
max-width: 1000px;
margin-top: 2.5rem;
margin-left: auto;
margin-right: auto;
}
body.glossary {
main {
ul.glossary-terms > li {
list-style-type: none;
padding: 0.5em;
padding-bottom: calc(min(0.5em, 0.25em + 0.15vh ));
margin: 0;
margin-top: calc(min(1.0em, 0.25em + 0.15vh ));
}
ul.glossary-terms > li.hide {
display: none;
}
ul.glossary-terms > li:has(.term-anchor:target) {
border-left: 0.3em solid $blue;
background: rgba(#999999, 0.2);
}
#tag-container {
float: left;
max-width: calc(max(80%, 100em));
border-top: 1px solid #999999;
border-bottom: 1px solid #999999;
padding-top: 0.5em 0;
margin: 2em 0;
> p {
display: inline-block;
padding-top: 0.2em;
}
.hide {
display: none;
}
.tag-option {
border-radius: 0.33em;
padding: 0.5em;
padding-left: 0.6em;
padding-right: 0.75em;
margin: 0.75em;
margin-top: 0.1em;
float: left;
font-weight: bold;
font-size: 0.925em;
}
.tag-option:not(.canonical-tag):hover {
outline: 1.5px solid $blue;
}
.tag-description {
margin-left: auto;
margin-right: auto;
padding: 0.2em;
padding-bottom: 0.8em;
text-align: center;
}
.canonical-tag {
color: white;
background-color: #999999;
}
.canonical-tag a {
color: inherit;
background: transparent;
text-decoration: none !important;
}
.active-tag {
color: $white;
background-color: $blue;
}
// darken on hover
.canonical-tag:hover {
background: darken(#999999, 15%)
}
.canonical-tag.active-tag:hover {
background: darken($blue, 15%)
}
}
.term-anchor:target + .term-name > span {
color: $blue;
}
.term-anchor:target {
visibility: initial;
}
.glossary-term-name {
font-weight: bold;
display: inline-block;
padding-left: 0.25em;
padding-right: 0.25em;
}
.glossary-aka {
display: inline-block;
padding-left: 0.25em;
padding-right: 0.25em;
padding-bottom: 0.25em;
}
#glossary-details-before {
margin-top: 3em;
font-style: italic;
clear: both;
}
.preview-text {
display: inline-block;
margin-bottom: 0.2em;
}
.preview-text + * {
margin-top: 0.2em;
}
.term-definition {
margin-left: calc(min(2em, 0.5em + 0.75vw));
.hide {
display: none;
}
}
.glossary-aka {
font-style: italic;
}
.preview-text p {
display: inline;
}
.permalink {
display: inline-block;
background-image: url(../images/link.png);
background-repeat: no-repeat;
background-size: contain;
width: 1em;
height: 1em;
padding-left: 0.1em;
}
.term-name:hover {
color: $blue;
}
.term-name:not(:hover) > .permalink {
visibility: hidden;
}
.term-anchor {
display: block;
position: relative;
top: -4rem; // adjust scrolling to target
visibility: hidden;
}
.invisible {
visibility: hidden;
}
}
}
/* COMMUNITY */
body.cid-community {
section.linkbox {
@ -878,13 +458,6 @@ body.cid-partners {
margin-bottom: 2em;
}
/* COLUMN SETUP */
.col {
display: block;
float:left;
margin: 1% 0 1% 1.6%;
background-color: #f9f9f9;
}
.col:first-child { margin-left: 0; }

View File

@ -0,0 +1,439 @@
/* DOCUMENTATION */
// nav-tabs and tab-content
.nav-tabs {
border-bottom: none !important;
.nav-item {
margin-bottom: 0;
}
}
.td-content .tab-content .highlight {
margin: 0;
}
//Table Content
.tab-content table{
border-collapse: separate;
border-spacing: 6px;
}
.tab-pane {
border-radius: 0.25rem;
padding: 0 16px 16px;
overflow: auto;
border: 1px solid #dee2e6;
&:first-of-type.active {
border-top-left-radius: 0;
}
}
table tr.cve-status-open, table tr.cve-status-unknown {
> td.cve-item-summary {
font-weight: bold;
}
}
.launch-cards {
padding: 0;
display: grid;
grid-template-columns: repeat(3, 1fr);
row-gap: 1em;
.launch-card {
display: flex;
padding: 0 30px 0 0;
.card-content{
width: fit-content;
display: flex;
flex-direction: column;
margin: 0;
row-gap: 1em;
h2 {
font-size: 1.75em;
padding: 0.5em 0;
margin: 0;
a {
display: none;
}
}
p {
margin: 0;
}
ul {
list-style: none;
height: fit-content;
line-height: 1.6;
padding: 0;
margin-block-end: auto;
}
br {
display: none;
}
button {
height: min-content;
width: auto;
padding: .5em 1em;
cursor: pointer;
box-sizing: border-box;
}
}
}
@media only screen and (max-width: 1000px) {
grid-template-columns: 1fr;
.launch-card {
width: 100%;
}
}
}
// blockquotes and callouts
body {
.alert {
// Override Docsy styles
padding: 0.4rem 0.4rem 0.4rem 1rem;
border-top: 1px solid #eee;
border-bottom: 1px solid #eee;
border-right: 1px solid #eee;
border-radius: 0.25em;
border-left-width: 0.5em; // fallback in case calc() is missing
background: #fff;
color: #000;
margin-top: 0.5em;
margin-bottom: 0.5em;
}
// Set minimum width and radius for alert color
.alert {
border-left-width: calc(max(0.5em, 4px));
border-top-left-radius: calc(max(0.5em, 4px));
border-bottom-left-radius: calc(max(0.5em, 4px));
padding-top: 0.75rem;
}
.alert.alert-caution {
border-left-color: #f0ad4e;
}
.alert.alert-info {
border-left-color: #428bca;
h4, h4.alert-heading {
color: #000;
display: block;
float: initial;
font-size: 1rem;
padding: 0;
padding-right: 0.5rem;
margin: 0;
line-height: 1.5;
font-weight: bolder;
}
}
.alert.alert-caution {
border-left-color: #f0ad4e;
h4, h4.alert-heading {
font-size: 1em;
font-weight: bold;
}
}
.alert.alert-warning {
border-left-color: #d9534f;
}
.alert.third-party-content {
border-left-color: #444;
}
h1:first-of-type + .alert.callout {
margin-top: 1.5em;
}
div.feature-state-notice {
background-color: #daeaf9;
border-radius: 0.75rem;
padding: 1rem;
margin-bottom: 1em;
font-size: 1.2em;
> .feature-state-name::before {
content: '';
color: #326ce5; // Kubernetes blue
}
> .feature-state-name {
display: inline-block;
font-size: 0.95em;
font-weight: bold;
color: #000;
background-color: #daeaf9;
}
code {
color: #000;
font-size: 1em;
background-color: #daeaf9;
}
margin-right: 2em;
max-width: 80%;
}
}
// Special color for third party content disclaimers
.alert.third-party-content { border-left-color: #222 };
// Highlight disclaimer when targeted as a fragment
#third-party-content-disclaimer {
color: #000;
background: #f8f9fa;
transition: all 0.5s ease;
}
@keyframes disclaimer-highlight {
from { background: #f8f922; color: #000; }
50% { background: #f8f944; color: #000; }
to { background: #f8f9cb; color: #000; }
}
#third-party-content-disclaimer:target {
color: #000;
animation: disclaimer-highlight 1.25s ease;
background: #f8f9cb;
}
.deprecation-warning, .pageinfo.deprecation-warning {
padding: clamp(10px, 2vmin, 20px);
margin: clamp(10px, 1vh, 20px) 0;
background-color: #faf5b6;
color: #000;
}
.deprecation-warning.outdated-blog, .pageinfo.deprecation-warning.outdated-blog {
background-color: $blue;
color: $white;
}
body.td-home .deprecation-warning, body.td-blog .deprecation-warning, body.td-documentation .deprecation-warning {
border-radius: 3px;
}
.deprecation-warning p:only-child {
margin-bottom: 0;
}
.td-documentation .td-content > .highlight {
max-width: initial;
width: 100%;
}
body.td-home #deprecation-warning {
max-width: 1000px;
margin-top: 2.5rem;
margin-left: auto;
margin-right: auto;
}
body.glossary {
main {
ul.glossary-terms > li {
list-style-type: none;
padding: 0.5em;
padding-bottom: calc(min(0.5em, 0.25em + 0.15vh ));
margin: 0;
margin-top: calc(min(1.0em, 0.25em + 0.15vh ));
}
ul.glossary-terms > li.hide {
display: none;
}
ul.glossary-terms > li:has(.term-anchor:target) {
border-left: 0.3em solid $blue;
background: rgba(#999999, 0.2);
}
#tag-container {
float: left;
max-width: calc(max(80%, 100em));
border-top: 1px solid #999999;
border-bottom: 1px solid #999999;
padding-top: 0.5em 0;
margin: 2em 0;
> p {
display: inline-block;
padding-top: 0.2em;
}
.hide {
display: none;
}
.tag-option {
border-radius: 0.33em;
padding: 0.5em;
padding-left: 0.6em;
padding-right: 0.75em;
margin: 0.75em;
margin-top: 0.1em;
float: left;
font-weight: bold;
font-size: 0.925em;
}
.tag-option:not(.canonical-tag):hover {
outline: 1.5px solid $blue;
}
.tag-description {
margin-left: auto;
margin-right: auto;
padding: 0.2em;
padding-bottom: 0.8em;
text-align: center;
}
.canonical-tag {
color: white;
background-color: #999999;
}
.canonical-tag a {
color: inherit;
background: transparent;
text-decoration: none !important;
}
.active-tag {
color: $white;
background-color: $blue;
}
// darken on hover
.canonical-tag:hover {
background: darken(#999999, 15%)
}
.canonical-tag.active-tag:hover {
background: darken($blue, 15%)
}
}
.term-anchor:target + .term-name > span {
color: $blue;
}
.term-anchor:target {
visibility: initial;
}
.glossary-term-name {
font-weight: bold;
display: inline-block;
padding-left: 0.25em;
padding-right: 0.25em;
}
.glossary-aka {
display: inline-block;
padding-left: 0.25em;
padding-right: 0.25em;
padding-bottom: 0.25em;
}
#glossary-details-before {
margin-top: 3em;
font-style: italic;
clear: both;
}
.preview-text {
display: inline-block;
margin-bottom: 0.2em;
}
.preview-text + * {
margin-top: 0.2em;
}
.term-definition {
margin-left: calc(min(2em, 0.5em + 0.75vw));
.hide {
display: none;
}
}
.glossary-aka {
font-style: italic;
}
.preview-text p {
display: inline;
}
.permalink {
display: inline-block;
background-image: url(../images/link.png);
background-repeat: no-repeat;
background-size: contain;
width: 1em;
height: 1em;
padding-left: 0.1em;
}
.term-name:hover {
color: $blue;
}
.term-name:not(:hover) > .permalink {
visibility: hidden;
}
.term-anchor {
display: block;
position: relative;
top: -4rem; // adjust scrolling to target
visibility: hidden;
}
.invisible {
visibility: hidden;
}
}
}
/* SCSS Related to the list of metris in Kubernetes */
main {
div.metric:nth-of-type(odd) { // Look & Feel , Aesthetics
background-color: $light-grey;
}
div.metrics {
.metric {
div:empty{
display: none;
}
display: flex;
flex-direction: column;
flex-wrap: wrap;
gap: .75em;
padding:.75em .75em .75em .75em;
.metric_name{
font-size: large;
font-weight: bold;
word-break: break-word;
}
label{
font-weight: bold;
margin-right: .5em;
}
ul {
li:empty{
display: none;
}
display: flex;
flex-direction: column;
gap: .75em;
flex-wrap: wrap;
li.metric_labels_varying{
span{
display: inline-block;
background-color: rgb(240, 239, 239);
padding: 0 0.5em;
margin-right: .35em;
font-family: monospace;
border: 1px solid rgb(230 , 230 , 230);
border-radius: 5%;
margin-bottom: .35em;
}
}
}
}
}
}

View File

@ -1,5 +0,0 @@
$blue: #326ce5;
$light-grey: #f7f7f7;
$dark-grey: #303030;
$medium-grey: #4c4c4c;
$white: #ffffff;

View File

@ -0,0 +1,12 @@
/* This file is provided by Docsy as an extension point for styling.
Add styles or import other files. */
@import "reset";
//K8S-Docsy integration
@import "custom";
//Media queries
@import "base";
@import "tablet";
@import "desktop";

View File

@ -1,19 +1,14 @@
/* This file is provided by Docsy as an entry point to styling.
Add styles or override variables from the theme here. */
@import "reset";
@import "skin";
//K8S-Docsy integration
@import "custom";
//Media queries
@import "base";
@import "tablet";
@import "desktop";
/* This file is provided by Docsy as an extension point for styling.
Override variables from the theme here. */
$primary: #326ce5;
$blue: #326ce5;
$light-grey: #f7f7f7;
$dark-grey: #303030;
$medium-grey: #4c4c4c;
$white: #ffffff;
// tooltip
$tooltip-bg: #555;
$tooltip-arrow-color: $tooltip-bg !default;

View File

@ -13,6 +13,10 @@ card:
<!-- overview -->
This page will discuss containers and container images, as well as their use in operations and solution development.
The word _container_ is an overloaded term. Whenever you use the word, check whether your audience uses the same definition.
Each container that you run is repeatable; the standardization from having
dependencies included means that you get the same behavior wherever you
run it.

View File

@ -114,7 +114,7 @@ pay special attention to restricting misuse there.
things: isolation between different applications, and a mechanism to combine
those isolated applications to run on the same host computer. Those two
aspects, isolation and aggregation, mean that runtime security involves
trade-offs and finding an appropriate balance.
identifying trade-offs and finding an appropriate balance.
Kubernetes relies on a {{< glossary_tooltip text="container runtime" term_id="container-runtime" >}}
to actually set up and run containers. The Kubernetes project does

View File

@ -90,7 +90,8 @@ profile to a more permissive profile.
{{</note>}}
To learn how to implement seccomp in Kubernetes, refer to
[Restrict a Container's Syscalls with seccomp](/docs/tutorials/security/seccomp/).
[Restrict a Container's Syscalls with seccomp](/docs/tutorials/security/seccomp/)
or the [Seccomp node reference](/docs/reference/node/seccomp/)
To learn more about seccomp, see
[Seccomp BPF](https://www.kernel.org/doc/html/latest/userspace-api/seccomp_filter.html)
@ -288,3 +289,4 @@ of support that you need. For instructions, refer to
* [Learn how to use AppArmor](/docs/tutorials/security/apparmor/)
* [Learn how to use seccomp](/docs/tutorials/security/seccomp/)
* [Learn how to use SELinux](/docs/tasks/configure-pod-container/security-context/#assign-selinux-labels-to-a-container)
* [Seccomp Node Reference](/docs/reference/node/seccomp/)

View File

@ -286,7 +286,7 @@ workloads running in a shared cluster. Running workloads in a sandbox environmen
insulate the host from container escapes, where an attacker exploits a vulnerability to gain
access to the host system and all the processes/files running on that host.
Virtual machines and userspace kernels are 2 popular approaches to sandboxing. The following
Virtual machines and userspace kernels are two popular approaches to sandboxing. The following
sandboxing implementations are available:
* [gVisor](https://gvisor.dev/) intercepts syscalls from containers and runs them through a
@ -463,7 +463,6 @@ listed below.
#### Multi-team tenancy
* [Capsule](https://github.com/clastix/capsule)
* [Kiosk](https://github.com/loft-sh/kiosk)
#### Multi-customer tenancy
@ -514,4 +513,4 @@ project provides an implementation of virtual control planes.
#### Other implementations
* [Kamaji](https://github.com/clastix/kamaji)
* [vcluster](https://github.com/loft-sh/vcluster)
* [vcluster](https://github.com/loft-sh/vcluster)

View File

@ -68,6 +68,12 @@ next init container from the ordered `.spec.initContainers` list.
That status either becomes true because there is a process running in the
container and no startup probe defined, or as a result of its `startupProbe` succeeding.
Upon Pod [termination](/docs/concepts/workloads/pods/pod-lifecycle/#termination-with-sidecars),
the kubelet postpones terminating sidecar containers until the main application container has fully stopped.
The sidecar containers are then shut down in the opposite order of their appearance in the Pod specification.
This approach ensures that the sidecars remain operational, supporting other containers within the Pod,
until their service is no longer required.
### Jobs with sidecar containers
If you define a Job that uses sidecar using Kubernetes-style init containers,

View File

@ -9,7 +9,9 @@ aka:
tags:
- fundamental
---
Specification of a Kubernetes API object in JSON or YAML format.
Specification of a Kubernetes API object in [JSON](https://www.json.org/json-en.html)
or [YAML](https://yaml.org/) format.
<!--more-->
A manifest specifies the desired state of an object that Kubernetes will maintain when you apply the manifest. Each configuration file can contain multiple manifests.
A manifest specifies the desired state of an object that Kubernetes will maintain when you apply the manifest.
For YAML format, each file can contain multiple manifests.

View File

@ -593,6 +593,16 @@ receiving traffic for the Service from the moment the kubelet starts all contain
and marks it _Running_, til the kubelet stops all containers and deletes the pod from
the API server.
### autoscaling.alpha.kubernetes.io/behavior (deprecated) {#autoscaling-alpha-kubernetes-io-behavior}
Type: Annotation
Used on: HorizontalPodAutoscaler
This annotation was used to configure the scaling behavior for a HorizontalPodAutoscaler (HPA) in earlier Kubernetes versions.
It allowed you to specify how the HPA should scale pods up or down, including setting stabilization windows and scaling policies.
Setting this annotation has no effect in any supported release of Kubernetes.
### kubernetes.io/hostname {#kubernetesiohostname}
Type: Label

View File

@ -249,6 +249,18 @@ these are:
waiting for a fast one; if all servers are busy, the algorithm falls back to the `sed`
behavior.
* `mh` (Maglev Hashing): Assigns incoming jobs based on
[Google's Maglev hashing algorithm](https://static.googleusercontent.com/media/research.google.com/en//pubs/archive/44824.pdf),
This scheduler has two flags: `mh-fallback`, which enables fallback to a different
server if the selected server is unavailable, and `mh-port`, which adds the source port number to
the hash computation. When using `mh`, kube-proxy always sets the `mh-port` flag and does not
enable the `mh-fallback` flag.
In proxy-mode=ipvs `mh` will work as source-hashing (`sh`), but with ports.
These scheduling algorithms are configured through the
[`ipvs.scheduler`](/docs/reference/config-api/kube-proxy-config.v1alpha1/#kubeproxy-config-k8s-io-v1alpha1-KubeProxyIPVSConfiguration)
field in the kube-proxy configuration.
{{< note >}}
To run kube-proxy in IPVS mode, you must make IPVS available on
the node before starting kube-proxy.

View File

@ -13,8 +13,12 @@ This section contains the following reference topics about nodes:
* [Node Labels Populated By The Kubelet](/docs/reference/node/node-labels)
* [Local Files And Paths Used By The Kubelet](/docs/reference/node/kubelet-files)
* [Node `.status` information](/docs/reference/node/node-status/)
* [Seccomp information](/docs/reference/node/seccomp/)
You can also read node reference details from elsewhere in the
Kubernetes documentation, including:

View File

@ -0,0 +1,172 @@
---
content_type: "reference"
title: Local Files And Paths Used By The Kubelet
weight: 42
---
The {{< glossary_tooltip text="kubelet" term_id="kubelet" >}} is mostly a stateless
process running on a Kubernetes {{< glossary_tooltip text="node" term_id="node" >}}.
This document outlines files that kubelet reads and writes.
{{< note >}}
This document is for informational purpose and not describing any guaranteed behaviors or APIs.
It lists resources used by the kubelet, which is an implementation detail and a subject to change at any release.
{{< /note >}}
The kubelet typically uses the {{< glossary_tooltip text="control plane" term_id="control-plane" >}} as
the source of truth on what needs to run on the Node, and the
{{<glossary_tooltip text="container runtime" term_id="container-runtime">}} to retrieve
the current state of containers. So long as you provide a _kubeconfig_ (API client configuration)
to the kubelet, the kubelet does connect to your control plane; otherwise the node operates in
_standalone mode_.
On Linux nodes, the kubelet also relies on reading cgroups and various system files to collect metrics.
On Windows nodes, the kubelet collects metrics via a different mechanism that does not rely on
paths.
There are also a few other files that are used by the kubelet as well as kubelet communicates using local Unix-domain sockets. Some are sockets that the
kubelet listens on, and for other sockets the kubelet discovers them and then connects
as a client.
{{< note >}}
This page lists paths as Linux paths, which map to the Windows paths by adding a root disk
`C:\` in place of `/` (unless specified otherwise). For example, `/var/lib/kubelet/device-plugins` maps to `C:\var\lib\kubelet\device-plugins`.
{{< /note >}}
## Configuration
### Kubelet configuration files
The path to the kubelet configuration file can be configured
using the command line argument `--config`. The kubelet also supports
[drop-in configuration files](/docs/tasks/administer-cluster/kubelet-config-file/#kubelet-conf-d)
to enhance configuration.
### Certificates
Certificates and private keys are typically located at `/var/lib/kubelet/pki`,
but can be configured using the `--cert-dir` kubelet command line argument.
Names of certificate files are also configurable.
### Manifests
Manifests for static pods are typically located in `/etc/kubernetes/manifests`.
Location can be configured using the `staticPodPath` kubelet configuration option.
### Systemd unit settings
When kubelet is running as a systemd unit, some kubelet configuration may be declared
in systemd unit settings file. Typically it includes:
- command line arguments to [run kubelet](/docs/reference/command-line-tools-reference/kubelet/)
- environment variables, used by kubelet or [configuring golang runtime](https://pkg.go.dev/runtime#hdr-Environment_Variables)
## State
### Checkpoint files for resource managers {#resource-managers-state}
All resource managers keep the mapping of Pods to allocated resources in state files.
State files are located in the kubelet's base directory, also termed the _root directory_
(but not the same as `/`, the node root directory). You can configure the base directory
for the kubelet
using the kubelet command line argument `--root-dir`.
Names of files:
- `memory_manager_state` for the [Memory Manager](/docs/tasks/administer-cluster/memory-manager/)
- `cpu_manager_state` for the [CPU Manager](/docs/tasks/administer-cluster/cpu-management-policies/)
- `dra_manager_state` for [DRA](/docs/concepts/scheduling-eviction/dynamic-resource-allocation/)
### Checkpoint file for device manager {#device-manager-state}
Device manager creates checkpoints in the same directory with socket files: `/var/lib/kubelet/device-plugins/`.
The name of a checkpoint file is `kubelet_internal_checkpoint` for [Device Manager](/docs/concepts/extend-kubernetes/compute-storage-net/device-plugins/#device-plugin-integration-with-the-topology-manager)
### Pod status checkpoint storage {#pod-status-manager-state}
{{< feature-state feature_gate_name="InPlacePodVerticalScaling" >}}
If your cluster has
[in-place Pod vertical scaling](/docs/concepts/workloads/autoscaling/#in-place-resizing)
enabled ([feature gate](/docs/reference/command-line-tools-reference/feature-gates/)
name `InPlacePodVerticalScaling`), then the kubelet stores a local record of Pod status.
The file name is `pod_status_manager_state` within the kubelet base directory
(`/var/lib/kubelet` by default on Linux; configurable using `--root-dir`).
### Container runtime
Kubelet communicates with the container runtime using socket configured via the
configuration parameters:
- `containerRuntimeEndpoint` for runtime operations
- `imageServiceEndpoint` for image management operations
The actual values of those endpoints depend on the container runtime being used.
### Device plugins
The kubelet exposes a socket at the path `/var/lib/kubelet/device-plugins/kubelet.sock` for
various [Device Plugins to register](/docs/concepts/extend-kubernetes/compute-storage-net/device-plugins/#device-plugin-implementation).
When a device plugin registers itself, it provides its socket path for the kubelet to connect.
The device plugin socket should be in the directory `device-plugins` within the kubelet base
directory. On a typical Linux node, this means `/var/lib/kubelet/device-plugins`.
### Pod resources API
[Pod Resources API](/docs/concepts/extend-kubernetes/compute-storage-net/device-plugins/#monitoring-device-plugin-resources)
will be exposed at the path `/var/lib/kubelet/pod-resources`.
### DRA, CSI, and Device plugins
The kubelet looks for socket files created by device plugins managed via [DRA](/docs/concepts/scheduling-eviction/dynamic-resource-allocation/),
device manager, or storage plugins, and then attempts to connect
to these sockets. The directory that the kubelet looks in is `plugins_registry` within the kubelet base
directory, so on a typical Linux node this means `/var/lib/kubelet/plugins_registry`.
Note, for the device plugins there are two alternative registration mechanisms. Only one should be used for a given plugin.
The types of plugins that can place socket files into that directory are:
- CSI plugins
- DRA plugins
- Device Manager plugins
(typically `/var/lib/kubelet/plugins_registry`).
## Security profiles & configuration
### Seccomp
Seccomp profile files referenced from Pods should be placed in `/var/lib/kubelet/seccomp`.
See the [seccomp reference](/docs/reference/node/seccomp/) for details.
### AppArmor
The kubelet does not load or refer to AppArmor profiles by a Kubernetes-specific path.
AppArmor profiles are loaded via the node operating system rather then referenced by their path.
## Locking
{{< feature-state state="alpha" for_k8s_version="v1.2" >}}
A lock file for the kubelet; typically `/var/run/kubelet.lock`. The kubelet uses this to ensure
that two different kubelets don't try to run in conflict with each other.
You can configure the path to the lock file using the the `--lock-file` kubelet command line argument.
If two kubelets on the same node use a different value for the lock file path, they will not be able to
detect a conflict when both are running.
## {{% heading "whatsnext" %}}
- Learn about the kubelet [command line arguments](/docs/reference/command-line-tools-reference/kubelet/).
- Review the [Kubelet Configuration (v1beta1) reference](/docs/reference/config-api/kubelet-config.v1beta1/)

View File

@ -0,0 +1,151 @@
---
content_type: reference
title: Seccomp and Kubernetes
weight: 80
---
<!-- overview -->
Seccomp stands for secure computing mode and has been a feature of the Linux
kernel since version 2.6.12. It can be used to sandbox the privileges of a
process, restricting the calls it is able to make from userspace into the
kernel. Kubernetes lets you automatically apply seccomp profiles loaded onto a
{{< glossary_tooltip text="node" term_id="node" >}} to your Pods and containers.
## Seccomp fields
{{< feature-state for_k8s_version="v1.19" state="stable" >}}
There are four ways to specify a seccomp profile for a
{{< glossary_tooltip text="pod" term_id="pod" >}}:
- for the whole Pod using [`spec.securityContext.seccompProfile`](/docs/reference/kubernetes-api/workload-resources/pod-v1/#security-context)
- for a single container using [`spec.containers[*].securityContext.seccompProfile`](/docs/reference/kubernetes-api/workload-resources/pod-v1/#security-context-1)
- for an (restartable / sidecar) init container using [`spec.initContainers[*].securityContext.seccompProfile`](/docs/reference/kubernetes-api/workload-resources/pod-v1/#security-context-1)
- for an [ephermal container](/docs/concepts/workloads/pods/ephemeral-containers) using [`spec.ephemeralContainers[*].securityContext.seccompProfile`](/docs/reference/kubernetes-api/workload-resources/pod-v1/#security-context-2)
{{% code_sample file="pods/security/seccomp/fields.yaml" %}}
The Pod in the example above runs as `Unconfined`, while the
`ephemeral-container` and `init-container` specifically defines
`RuntimeDefault`. If the ephemeral or init container would not have set the
`securityContext.seccompProfile` field explicitly, then the value would be
inherited from the Pod. The same applies to the container, which runs a
`Localhost` profile `my-profile.json`.
Generally speaking, fields from (ephemeral) containers have a higher priority
than the Pod level value, while containers which do not set the seccomp field
inherit the profile from the Pod.
{{< note >}}
It is not possible to apply a seccomp profile to a Pod or container running with
`privileged: true` set in the container's `securityContext`. Privileged
containers always run as `Unconfined`.
{{< /note >}}
The following values are possible for the `seccompProfile.type`:
`Unconfined`
: The workload runs without any seccomp restrictions.
`RuntimeDefault`
: A default seccomp profile defined by the
{{< glossary_tooltip text="container runtime" term_id="container-runtime" >}}
is applied. The default profiles aim to provide a strong set of security
defaults while preserving the functionality of the workload. It is possible that
the default profiles differ between container runtimes and their release
versions, for example when comparing those from
{{< glossary_tooltip text="CRI-O" term_id="cri-o" >}} and
{{< glossary_tooltip text="containerd" term_id="containerd" >}}.
`Localhost`
: The `localhostProfile` will be applied, which has to be available on the node
disk (on Linux it's `/var/lib/kubelet/seccomp`). The availability of the seccomp
profile is verified by the
{{< glossary_tooltip text="container runtime" term_id="container-runtime" >}}
on container creation. If the profile does not exist, then the container
creation will fail with a `CreateContainerError`.
### `Localhost` profiles
Seccomp profiles are JSON files following the scheme defined by the
[OCI runtime specification](https://github.com/opencontainers/runtime-spec/blob/f329913/config-linux.md#seccomp).
A profile basically defines actions based on matched syscalls, but also allows
to pass specific values as arguments to syscalls. For example:
```json
{
"defaultAction": "SCMP_ACT_ERRNO",
"defaultErrnoRet": 38,
"syscalls": [
{
"names": [
"adjtimex",
"alarm",
"bind",
"waitid",
"waitpid",
"write",
"writev"
],
"action": "SCMP_ACT_ALLOW"
}
]
}
```
The `defaultAction` in the profile above is defined as `SCMP_ACT_ERRNO` and
will return as fallback to the actions defined in `syscalls`. The error is
defined as code `38` via the `defaultErrnoRet` field.
The following actions are generally possible:
`SCMP_ACT_ERRNO`
: Return the specified error code.
`SCMP_ACT_ALLOW`
: Allow the syscall to be executed.
`SCMP_ACT_KILL_PROCESS`
: Kill the process.
`SCMP_ACT_KILL_THREAD` and `SCMP_ACT_KILL`
: Kill only the thread.
`SCMP_ACT_TRAP`
: Throw a `SIGSYS` signal.
`SCMP_ACT_NOTIFY` and `SECCOMP_RET_USER_NOTIF`.
: Notify the user space.
`SCMP_ACT_TRACE`
: Notify a tracing process with the specified value.
`SCMP_ACT_LOG`
: Allow the syscall to be executed after the action has been logged to syslog or
auditd.
Some actions like `SCMP_ACT_NOTIFY` or `SECCOMP_RET_USER_NOTIF` may be not
supported depending on the container runtime, OCI runtime or Linux kernel
version being used. There may be also further limitations, for example that
`SCMP_ACT_NOTIFY` cannot be used as `defaultAction` or for certain syscalls like
`write`. All those limitations are defined by either the OCI runtime
([runc](https://github.com/opencontainers/runc),
[crun](https://github.com/containers/crun)) or
[libseccomp](https://github.com/seccomp/libseccomp).
The `syscalls` JSON array contains a list of objects referencing syscalls by
their respective `names`. For example, the action `SCMP_ACT_ALLOW` can be used
to create a whitelist of allowed syscalls as outlined in the example above. It
would also be possible to define another list using the action `SCMP_ACT_ERRNO`
but a different return (`errnoRet`) value.
It is also possible to specify the arguments (`args`) passed to certain
syscalls. More information about those advanced use cases can be found in the
[OCI runtime spec](https://github.com/opencontainers/runtime-spec/blob/f329913/config-linux.md#seccomp)
and the [Seccomp Linux kernel documentation](https://www.kernel.org/doc/Documentation/prctl/seccomp_filter.txt).
## Further reading
- [Restrict a Container's Syscalls with seccomp](/docs/tutorials/security/seccomp/)
- [Pod Security Standards](/docs/concepts/security/pod-security-standards/)

View File

@ -122,6 +122,171 @@ see the [API reference](/docs/reference/kubernetes-api/) for more information. I
is not possible to access sub-resources across multiple resources - generally a new
virtual resource type would be used if that becomes necessary.
## HTTP media types {#alternate-representations-of-resources}
Over HTTP, Kubernetes supports JSON and Protobuf wire encodings.
{{% note %}}
Although YAML is widely used to define Kubernetes manifests locally, Kubernetes does not
support the [`application/yaml`](https://www.rfc-editor.org/rfc/rfc9512.html) media type
for API operations.
All JSON documents are valid YAML, so you can also use a JSON API response anywhere that is
expecting a YAML input.
{{% /note %}}
By default, Kubernetes returns objects in [JSON serialization](#json-encoding), using the
`application/json` media type. Although JSON is the default, clients may request the more
efficient binary [Protobuf representation](#protobuf-encoding) for better performance at scale.
The Kubernetes API implements standard HTTP content type negotiation: passing an
`Accept` header with a `GET` call will request that the server tries to return
a response in your preferred media type. If you want to send an object in Protobuf to
the server for a `PUT` or `POST` request, you must set the `Content-Type` request header
appropriately.
If you request an available media type, the API server returns a response with a suitable
`Content-Type`; if none of the media types you request are supported, the API server returns
a `406 Not acceptable` error message.
All built-in resource types support the `application/json` media type.
### JSON resource encoding {#json-encoding}
The Kubernetes API defaults to using [JSON](https://www.json.org/json-en.html) for encoding
HTTP message bodies.
For example:
1. List all of the pods on a cluster, without specifying a preferred format
```
GET /api/v1/pods
---
200 OK
Content-Type: application/json
… JSON encoded collection of Pods (PodList object)
```
1. Create a pod by sending JSON to the server, requesting a JSON response.
```
POST /api/v1/namespaces/test/pods
Content-Type: application/json
Accept: application/json
… JSON encoded Pod object
---
200 OK
Content-Type: application/json
{
"kind": "Pod",
"apiVersion": "v1",
}
```
### Kubernetes Protobuf encoding {#protobuf-encoding}
Kubernetes uses an envelope wrapper to encode [Protobuf](https://protobuf.dev/) responses.
That wrapper starts with a 4 byte magic number to help identify content in disk or in etcd as Protobuf
(as opposed to JSON). The 4 byte magic number data is followed by a Protobuf encoded wrapper message, which
describes the encoding and type of the underlying object. Within the Protobuf wrapper message,
the inner object data is recorded using the `raw` field of Unknown (see the [IDL](##protobuf-encoding-idl)
for more detail).
For example:
1. List all of the pods on a cluster in Protobuf format.
```
GET /api/v1/pods
Accept: application/vnd.kubernetes.protobuf
---
200 OK
Content-Type: application/vnd.kubernetes.protobuf
… JSON encoded collection of Pods (PodList object)
```
1. Create a pod by sending Protobuf encoded data to the server, but request a response
in JSON.
```
POST /api/v1/namespaces/test/pods
Content-Type: application/vnd.kubernetes.protobuf
Accept: application/json
… binary encoded Pod object
---
200 OK
Content-Type: application/json
{
"kind": "Pod",
"apiVersion": "v1",
...
}
```
You can use both techniques together and use Kubernetes' Protobuf encoding to interact with any API that
supports it, for both reads and writes. Only some API resource types are [compatible](#protobuf-encoding-compatibility)
with Protobuf.
<a id="protobuf-encoding-idl" />
The wrapper format is:
```
A four byte magic number prefix:
Bytes 0-3: "k8s\x00" [0x6b, 0x38, 0x73, 0x00]
An encoded Protobuf message with the following IDL:
message Unknown {
// typeMeta should have the string values for "kind" and "apiVersion" as set on the JSON object
optional TypeMeta typeMeta = 1;
// raw will hold the complete serialized object in protobuf. See the protobuf definitions in the client libraries for a given kind.
optional bytes raw = 2;
// contentEncoding is encoding used for the raw data. Unspecified means no encoding.
optional string contentEncoding = 3;
// contentType is the serialization method used to serialize 'raw'. Unspecified means application/vnd.kubernetes.protobuf and is usually
// omitted.
optional string contentType = 4;
}
message TypeMeta {
// apiVersion is the group/version for this type
optional string apiVersion = 1;
// kind is the name of the object schema. A protobuf definition should exist for this object.
optional string kind = 2;
}
```
{{< note >}}
Clients that receive a response in `application/vnd.kubernetes.protobuf` that does
not match the expected prefix should reject the response, as future versions may need
to alter the serialization format in an incompatible way and will do so by changing
the prefix.
{{< /note >}}
#### Compatibility with Kubernetes Protobuf {#protobuf-encoding-compatibility}
Not all API resource types support Kubernetes' Protobuf encoding; specifically, Protobuf isn't
available for resources that are defined as
{{< glossary_tooltip term_id="CustomResourceDefinition" text="CustomResourceDefinitions" >}}
or are served via the
{{< glossary_tooltip text="aggregation layer" term_id="aggregation-layer" >}}.
As a client, if you might need to work with extension types you should specify multiple
content types in the request `Accept` header to support fallback to JSON.
For example:
```
Accept: application/vnd.kubernetes.protobuf, application/json
```
## Efficient detection of changes
@ -602,116 +767,6 @@ extensions, you should make requests that specify multiple content types in the
Accept: application/json;as=Table;g=meta.k8s.io;v=v1, application/json
```
## Alternate representations of resources
By default, Kubernetes returns objects serialized to JSON with content type
`application/json`. This is the default serialization format for the API. However,
clients may request the more efficient
[Protobuf representation](#protobuf-encoding) of these objects for better performance at scale.
The Kubernetes API implements standard HTTP content type negotiation: passing an
`Accept` header with a `GET` call will request that the server tries to return
a response in your preferred media type, while sending an object in Protobuf to
the server for a `PUT` or `POST` call means that you must set the `Content-Type`
header appropriately.
The server will return a response with a `Content-Type` header if the requested
format is supported, or the `406 Not acceptable` error if none of the media types you
requested are supported. All built-in resource types support the `application/json`
media type.
See the Kubernetes [API reference](/docs/reference/kubernetes-api/) for a list of
supported content types for each API.
For example:
1. List all of the pods on a cluster in Protobuf format.
```
GET /api/v1/pods
Accept: application/vnd.kubernetes.protobuf
---
200 OK
Content-Type: application/vnd.kubernetes.protobuf
... binary encoded PodList object
```
1. Create a pod by sending Protobuf encoded data to the server, but request a response
in JSON.
```
POST /api/v1/namespaces/test/pods
Content-Type: application/vnd.kubernetes.protobuf
Accept: application/json
... binary encoded Pod object
---
200 OK
Content-Type: application/json
{
"kind": "Pod",
"apiVersion": "v1",
...
}
```
Not all API resource types support Protobuf; specifically, Protobuf isn't available for
resources that are defined as
{{< glossary_tooltip term_id="CustomResourceDefinition" text="CustomResourceDefinitions" >}}
or are served via the
{{< glossary_tooltip text="aggregation layer" term_id="aggregation-layer" >}}.
As a client, if you might need to work with extension types you should specify multiple
content types in the request `Accept` header to support fallback to JSON.
For example:
```
Accept: application/vnd.kubernetes.protobuf, application/json
```
### Kubernetes Protobuf encoding {#protobuf-encoding}
Kubernetes uses an envelope wrapper to encode Protobuf responses. That wrapper starts
with a 4 byte magic number to help identify content in disk or in etcd as Protobuf
(as opposed to JSON), and then is followed by a Protobuf encoded wrapper message, which
describes the encoding and type of the underlying object and then contains the object.
The wrapper format is:
```
A four byte magic number prefix:
Bytes 0-3: "k8s\x00" [0x6b, 0x38, 0x73, 0x00]
An encoded Protobuf message with the following IDL:
message Unknown {
// typeMeta should have the string values for "kind" and "apiVersion" as set on the JSON object
optional TypeMeta typeMeta = 1;
// raw will hold the complete serialized object in protobuf. See the protobuf definitions in the client libraries for a given kind.
optional bytes raw = 2;
// contentEncoding is encoding used for the raw data. Unspecified means no encoding.
optional string contentEncoding = 3;
// contentType is the serialization method used to serialize 'raw'. Unspecified means application/vnd.kubernetes.protobuf and is usually
// omitted.
optional string contentType = 4;
}
message TypeMeta {
// apiVersion is the group/version for this type
optional string apiVersion = 1;
// kind is the name of the object schema. A protobuf definition should exist for this object.
optional string kind = 2;
}
```
{{< note >}}
Clients that receive a response in `application/vnd.kubernetes.protobuf` that does
not match the expected prefix should reject the response, as future versions may need
to alter the serialization format in an incompatible way and will do so by changing
the prefix.
{{< /note >}}
## Resource deletion
When you **delete** a resource this takes place in two phases.

View File

@ -24,9 +24,8 @@ control plane's API server component remains available.
## Language overview
The [CEL
language](https://github.com/google/cel-spec/blob/master/doc/langdef.md) has a
straightforward syntax that is similar to the expressions in C, C++, Java,
The [CEL language](https://github.com/google/cel-spec/blob/master/doc/langdef.md)
has a straightforward syntax that is similar to the expressions in C, C++, Java,
JavaScript and Go.
CEL was designed to be embedded into applications. Each CEL "program" is a
@ -67,21 +66,21 @@ Example CEL expressions:
CEL is configured with the following options, libraries and language features, introduced at the specified Kubernetes versions:
| CEL option, library or language feature | Included | Availablity |
|------------------------------------------------------------------------------------------------------------------------|-------------------------------------------------------------------------------------------------------------------------------------------|---------------------------|
| [Standard macros](https://github.com/google/cel-spec/blob/v0.7.0/doc/langdef.md#macros) | `has`, `all`, `exists`, `exists_one`, `map`, `filter` | All Kubernetes versions |
| [Standard functions](https://github.com/google/cel-spec/blob/master/doc/langdef.md#list-of-standard-definitions) | See [official list of standard definitions](https://github.com/google/cel-spec/blob/master/doc/langdef.md#list-of-standard-definitions) | All Kubernetes versions |
| [Homogeneous Aggregate Literals](https://pkg.go.dev/github.com/google/cel-go@v0.17.4/cel#HomogeneousAggregateLiterals) | | All Kubernetes versions |
| [Default UTC Time Zone](https://pkg.go.dev/github.com/google/cel-go@v0.17.4/cel#DefaultUTCTimeZone) | | All Kubernetes versions |
| [Eagerly Validate Declarations](https://pkg.go.dev/github.com/google/cel-go@v0.17.4/cel#EagerlyValidateDeclarations) | | All Kubernetes versions |
| [extended strings library](https://pkg.go.dev/github.com/google/cel-go/ext#Strings), Version 1 | `charAt`, `indexOf`, `lastIndexOf`, `lowerAscii`, `upperAscii`, `replace`, `split`, `join`, `substring`, `trim` | All Kubernetes versions |
| Kubernetes list library | See [Kubernetes list library](#kubernetes-list-library) | All Kubernetes versions |
| Kubernetes regex library | See [Kubernetes regex library](#kubernetes-regex-library) | All Kubernetes versions |
| Kubernetes URL library | See [Kubernetes URL library](#kubernetes-url-library) | All Kubernetes versions |
| Kubernetes authorizer library | See [Kubernetes authorizer library](#kubernetes-authorizer-library) | All Kubernetes versions |
| Kubernetes quantity library | See [Kubernetes quantity library](#kubernetes-quantity-library) | Kubernetes versions 1.29+ |
| CEL optional types | See [CEL optional types](https://pkg.go.dev/github.com/google/cel-go@v0.17.4/cel#OptionalTypes) | Kubernetes versions 1.29+ |
| CEL CrossTypeNumericComparisons | See [CEL CrossTypeNumericComparisons](https://pkg.go.dev/github.com/google/cel-go@v0.17.4/cel#CrossTypeNumericComparisons) | Kubernetes versions 1.29+ |
| CEL option, library or language feature | Included | Availablity |
|-----------------------------------------|----------|-------------|
| [Standard macros](https://github.com/google/cel-spec/blob/v0.7.0/doc/langdef.md#macros) | `has`, `all`, `exists`, `exists_one`, `map`, `filter` | All Kubernetes versions |
| [Standard functions](https://github.com/google/cel-spec/blob/master/doc/langdef.md#list-of-standard-definitions) | See [official list of standard definitions](https://github.com/google/cel-spec/blob/master/doc/langdef.md#list-of-standard-definitions) | All Kubernetes versions |
| [Homogeneous Aggregate Literals](https://pkg.go.dev/github.com/google/cel-go@v0.17.4/cel#HomogeneousAggregateLiterals) | | All Kubernetes versions |
| [Default UTC Time Zone](https://pkg.go.dev/github.com/google/cel-go@v0.17.4/cel#DefaultUTCTimeZone) | | All Kubernetes versions |
| [Eagerly Validate Declarations](https://pkg.go.dev/github.com/google/cel-go@v0.17.4/cel#EagerlyValidateDeclarations) | | All Kubernetes versions |
| [Extended strings library](https://pkg.go.dev/github.com/google/cel-go/ext#Strings), Version 1 | `charAt`, `indexOf`, `lastIndexOf`, `lowerAscii`, `upperAscii`, `replace`, `split`, `join`, `substring`, `trim` | All Kubernetes versions |
| Kubernetes list library | See [Kubernetes list library](#kubernetes-list-library) | All Kubernetes versions |
| Kubernetes regex library | See [Kubernetes regex library](#kubernetes-regex-library) | All Kubernetes versions |
| Kubernetes URL library | See [Kubernetes URL library](#kubernetes-url-library) | All Kubernetes versions |
| Kubernetes authorizer library | See [Kubernetes authorizer library](#kubernetes-authorizer-library) | All Kubernetes versions |
| Kubernetes quantity library | See [Kubernetes quantity library](#kubernetes-quantity-library) | Kubernetes versions 1.29+ |
| CEL optional types | See [CEL optional types](https://pkg.go.dev/github.com/google/cel-go@v0.17.4/cel#OptionalTypes) | Kubernetes versions 1.29+ |
| CEL CrossTypeNumericComparisons | See [CEL CrossTypeNumericComparisons](https://pkg.go.dev/github.com/google/cel-go@v0.17.4/cel#CrossTypeNumericComparisons) | Kubernetes versions 1.29+ |
CEL functions, features and language settings support Kubernetes control plane
rollbacks. For example, _CEL Optional Values_ was introduced at Kubernetes 1.29
@ -144,8 +143,8 @@ godoc for more information.
To make it easier and safer to process URLs, the following functions have been added:
- `isURL(string)` checks if a string is a valid URL according to the [Go's
net/url](https://pkg.go.dev/net/url#URL) package. The string must be an
- `isURL(string)` checks if a string is a valid URL according to the
[Go's net/url](https://pkg.go.dev/net/url#URL) package. The string must be an
absolute URL.
- `url(string) URL` converts a string to a URL or results in an error if the
string is not a valid URL.
@ -158,7 +157,7 @@ Examples:
{{< table caption="Examples of CEL expressions using URL library functions" >}}
| CEL Expression | Purpose |
|-----------------------------------------------------------------|------------------------------------------------|
| `url('https://example.com:80/').getHost()` | Get the 'example.com:80' host part of the URL. |
| `url('https://example.com:80/').getHost()` | Gets the 'example.com:80' host part of the URL |
| `url('https://example.com/path with spaces/').getEscapedPath()` | Returns '/path%20with%20spaces/' |
{{< /table >}}
@ -174,17 +173,17 @@ the authorizer may be used to perform authorization checks for the principal
API resource checks are performed as follows:
1. Specify the group and resource to check: `Authorizer.group(string).resource(string) ResourceCheck`
2. Optionally call any combination of the following builder functions to further narrow the authorization check.
1. Optionally call any combination of the following builder functions to further narrow the authorization check.
Note that these functions return the receiver type and can be chained:
- `ResourceCheck.subresource(string) ResourceCheck`
- `ResourceCheck.namespace(string) ResourceCheck`
- `ResourceCheck.name(string) ResourceCheck`
3. Call `ResourceCheck.check(verb string) Decision` to perform the authorization check.
4. Call `allowed() bool` or `reason() string` to inspect the result of the authorization check.
- `ResourceCheck.subresource(string) ResourceCheck`
- `ResourceCheck.namespace(string) ResourceCheck`
- `ResourceCheck.name(string) ResourceCheck`
1. Call `ResourceCheck.check(verb string) Decision` to perform the authorization check.
1. Call `allowed() bool` or `reason() string` to inspect the result of the authorization check.
Non-resource authorization performed are used as follows:
1. specify only a path: `Authorizer.path(string) PathCheck`
1. Specify only a path: `Authorizer.path(string) PathCheck`
1. Call `PathCheck.check(httpVerb string) Decision` to perform the authorization check.
1. Call `allowed() bool` or `reason() string` to inspect the result of the authorization check.
@ -193,10 +192,10 @@ To perform an authorization check for a service account:
- `Authorizer.serviceAccount(namespace string, name string) Authorizer`
{{< table caption="Examples of CEL expressions using URL library functions" >}}
| CEL Expression | Purpose |
|--------------------------------------------------------------------------------------------------------------|------------------------------------------------|
| `authorizer.group('').resource('pods').namespace('default').check('create').allowed()` | Returns true if the principal (user or service account) is allowed create pods in the 'default' namespace. |
| `authorizer.path('/healthz').check('get').allowed()` | Checks if the principal (user or service account) is authorized to make HTTP GET requests to the /healthz API path. |
| CEL Expression | Purpose |
|----------------|---------|
| `authorizer.group('').resource('pods').namespace('default').check('create').allowed()` | Returns true if the principal (user or service account) is allowed create pods in the 'default' namespace. |
| `authorizer.path('/healthz').check('get').allowed()` | Checks if the principal (user or service account) is authorized to make HTTP GET requests to the /healthz API path. |
| `authorizer.serviceAccount('default', 'myserviceaccount').resource('deployments').check('delete').allowed()` | Checks if the service account is authorized to delete deployments. |
{{< /table >}}
@ -205,9 +204,9 @@ To perform an authorization check for a service account:
With the alpha `AuthorizeWithSelectors` feature enabled, field and label selectors can be added to authorization checks.
{{< table caption="Examples of CEL expressions using selector authorization functions" >}}
| CEL Expression | Purpose |
|--------------------------------------------------------------------------------------------------------------|------------------------------------------------|
| `authorizer.group('').resource('pods').fieldSelector('spec.nodeName=mynode').check('list').allowed()` | Returns true if the principal (user or service account) is allowed to list pods with the field selector `spec.nodeName=mynode`. |
| CEL Expression | Purpose |
|----------------|---------|
| `authorizer.group('').resource('pods').fieldSelector('spec.nodeName=mynode').check('list').allowed()` | Returns true if the principal (user or service account) is allowed to list pods with the field selector `spec.nodeName=mynode`. |
| `authorizer.group('').resource('pods').labelSelector('example.com/mylabel=myvalue').check('list').allowed()` | Returns true if the principal (user or service account) is allowed to list pods with the label selector `example.com/mylabel=myvalue`. |
{{< /table >}}
@ -219,28 +218,28 @@ godoc for more information.
Kubernetes 1.28 adds support for manipulating quantity strings (ex 1.5G, 512k, 20Mi)
- `isQuantity(string)` checks if a string is a valid Quantity according to [Kubernetes'
resource.Quantity](https://pkg.go.dev/k8s.io/apimachinery/pkg/api/resource#Quantity).
- `isQuantity(string)` checks if a string is a valid Quantity according to
[Kubernetes' resource.Quantity](https://pkg.go.dev/k8s.io/apimachinery/pkg/api/resource#Quantity).
- `quantity(string) Quantity` converts a string to a Quantity or results in an error if the
string is not a valid quantity.
Once parsed via the `quantity` function, the resulting Quantity object has the
Once parsed via the `quantity` function, the resulting Quantity object has the
following library of member functions:
{{< table caption="Available member functions of a Quantity" >}}
| Member Function | CEL Return Value | Description |
|-------------------------------|-------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------|
| `isInteger()` | bool | returns true if and only if asInteger is safe to call without an error |
| `asInteger()` | int | returns a representation of the current value as an int64 if possible or results in an error if conversion would result in overflow or loss of precision. |
| `asApproximateFloat()` | float | returns a float64 representation of the quantity which may lose precision. If the value of the quantity is outside the range of a float64 +Inf/-Inf will be returned. |
| `sign()` | int | Returns `1` if the quantity is positive, `-1` if it is negative. `0` if it is zero |
| `add(<Quantity>)` | Quantity | Returns sum of two quantities |
| `add(<int>)` | Quantity | Returns sum of quantity and an integer |
| `sub(<Quantity>)` | Quantity | Returns difference between two quantities |
| `sub(<int>)` | Quantity | Returns difference between a quantity and an integer |
| `isLessThan(<Quantity>)` | bool | Returns true if and only if the receiver is less than the operand |
| `isGreaterThan(<Quantity>)` | bool | Returns true if and only if the receiver is greater than the operand |
| `compareTo(<Quantity>)` | int | Compares receiver to operand and returns 0 if they are equal, 1 if the receiver is greater, or -1 if the receiver is less than the operand |
| Member Function | CEL Return Value | Description |
|-----------------------------|------------------|-------------|
| `isInteger()` | bool | Returns true if and only if asInteger is safe to call without an error |
| `asInteger()` | int | Returns a representation of the current value as an int64 if possible or results in an error if conversion would result in overflow or loss of precision. |
| `asApproximateFloat()` | float | Returns a float64 representation of the quantity which may lose precision. If the value of the quantity is outside the range of a float64 +Inf/-Inf will be returned. |
| `sign()` | int | Returns `1` if the quantity is positive, `-1` if it is negative. `0` if it is zero |
| `add(<Quantity>)` | Quantity | Returns sum of two quantities |
| `add(<int>)` | Quantity | Returns sum of quantity and an integer |
| `sub(<Quantity>)` | Quantity | Returns difference between two quantities |
| `sub(<int>)` | Quantity | Returns difference between a quantity and an integer |
| `isLessThan(<Quantity>)` | bool | Returns true if and only if the receiver is less than the operand |
| `isGreaterThan(<Quantity>)` | bool | Returns true if and only if the receiver is greater than the operand |
| `compareTo(<Quantity>)` | int | Compares receiver to operand and returns 0 if they are equal, 1 if the receiver is greater, or -1 if the receiver is less than the operand |
{{< /table >}}
Examples:
@ -263,9 +262,8 @@ Examples:
CEL is a [gradually typed language](https://github.com/google/cel-spec/blob/master/doc/langdef.md#gradual-type-checking).
Some Kubernetes API fields contain fully type checked CEL expressions. For
example, [CustomResourceDefinitions Validation
Rules](/docs/tasks/extend-kubernetes/custom-resources/custom-resource-definitions/#validation-rules)
Some Kubernetes API fields contain fully type checked CEL expressions. For example,
[CustomResourceDefinitions Validation Rules](/docs/tasks/extend-kubernetes/custom-resources/custom-resource-definitions/#validation-rules)
are fully type checked.
Some Kubernetes API fields contain partially type checked CEL expressions. A
@ -290,26 +288,26 @@ has(object.namex) ? object.namex == 'special' : request.name == 'special'
## Type system integration
{{< table caption="Table showing the relationship between OpenAPIv3 types and CEL types" >}}
| OpenAPIv3 type | CEL type |
|----------------------------------------------------|------------------------------------------------------------------------------------------------------------------------------|
| 'object' with Properties | object / "message type" (`type(<object>)` evaluates to `selfType<uniqueNumber>.path.to.object.from.self` |
| 'object' with AdditionalProperties | map |
| OpenAPIv3 type | CEL type |
|----------------------------------------------------|----------|
| 'object' with Properties | object / "message type" (`type(<object>)` evaluates to `selfType<uniqueNumber>.path.to.object.from.self`) |
| 'object' with AdditionalProperties | map |
| 'object' with x-kubernetes-embedded-type | object / "message type", 'apiVersion', 'kind', 'metadata.name' and 'metadata.generateName' are implicitly included in schema |
| 'object' with x-kubernetes-preserve-unknown-fields | object / "message type", unknown fields are NOT accessible in CEL expression |
| x-kubernetes-int-or-string | union of int or string, `self.intOrString < 100 \|\| self.intOrString == '50%'` evaluates to true for both `50` and `"50%"` |
| 'array | list |
| 'array' with x-kubernetes-list-type=map | list with map based Equality & unique key guarantees |
| 'array' with x-kubernetes-list-type=set | list with set based Equality & unique entry guarantees |
| 'boolean' | boolean |
| 'number' (all formats) | double |
| 'integer' (all formats) | int (64) |
| _no equivalent_ | uint (64) |
| 'null' | null_type |
| 'string' | string |
| 'string' with format=byte (base64 encoded) | bytes |
| 'string' with format=date | timestamp (google.protobuf.Timestamp) |
| 'string' with format=datetime | timestamp (google.protobuf.Timestamp) |
| 'string' with format=duration | duration (google.protobuf.Duration) |
| 'object' with x-kubernetes-preserve-unknown-fields | object / "message type", unknown fields are NOT accessible in CEL expression |
| x-kubernetes-int-or-string | union of int or string, `self.intOrString < 100 \|\| self.intOrString == '50%'` evaluates to true for both `50` and `"50%"` |
| 'array' | list |
| 'array' with x-kubernetes-list-type=map | list with map based Equality & unique key guarantees |
| 'array' with x-kubernetes-list-type=set | list with set based Equality & unique entry guarantees |
| 'boolean' | boolean |
| 'number' (all formats) | double |
| 'integer' (all formats) | int (64) |
| _no equivalent_ | uint (64) |
| 'null' | null_type |
| 'string' | string |
| 'string' with format=byte (base64 encoded) | bytes |
| 'string' with format=date | timestamp (google.protobuf.Timestamp) |
| 'string' with format=datetime | timestamp (google.protobuf.Timestamp) |
| 'string' with format=duration | duration (google.protobuf.Duration) |
{{< /table >}}
Also see: [CEL types](https://github.com/google/cel-spec/blob/v0.6.0/doc/langdef.md#values),
@ -322,10 +320,13 @@ order. For example `[1, 2] == [2, 1]` if the arrays represent Kubernetes `set` v
Concatenation on arrays with `x-kubernetes-list-type` use the semantics of the
list type:
- `set`: `X + Y` performs a union where the array positions of all elements in
`set`
: `X + Y` performs a union where the array positions of all elements in
`X` are preserved and non-intersecting elements in `Y` are appended, retaining
their partial order.
- `map`: `X + Y` performs a merge where the array positions of all keys in `X`
`map`
: `X + Y` performs a merge where the array positions of all keys in `X`
are preserved but the values are overwritten by values in `Y` when the key
sets of `X` and `Y` intersect. Elements in `Y` with non-intersecting keys are
appended, retaining their partial order.
@ -411,6 +412,5 @@ estimated running time of CEL expressions would be prohibitively expensive to
execute. If so, the API server prevent the CEL expression from being written to
API resources by rejecting create or update operations containing the CEL
expression to the API resources. This feature offers a stronger assurance that
CEL expressions written to the API resource will be evaluate at runtime without
CEL expressions written to the API resource will be evaluated at runtime without
exceeding the runtime cost budget.

View File

@ -275,3 +275,4 @@ page for more on how to report vulnerabilities.
## What's next
- [Security Checklist](/docs/concepts/security/security-checklist/) for additional information on Kubernetes security guidance.
- [Seccomp Node Reference](/docs/reference/node/seccomp/)

View File

@ -582,9 +582,11 @@ Here is the manifest you will use:
Now, the Pod's output includes environment variables `SPECIAL_LEVEL_KEY=very` and `LOG_LEVEL=INFO`.
Once you're happy to move on, delete that Pod:
Once you're happy to move on, delete that Pod and ConfigMap:
```shell
kubectl delete pod dapi-test-pod --now
kubectl delete configmap special-config
kubectl delete configmap env-config
```
## Configure all key-value pairs in a ConfigMap as container environment variables
@ -911,9 +913,19 @@ kubectl delete configmaps/special-config configmaps/env-config
kubectl delete configmap -l 'game-config in (config-4,config-5)'
```
Remove the `kustomization.yaml` file that you used to generate the ConfigMap:
```bash
rm kustomization.yaml
```
If you created a directory `configure-pod-container` and no longer need it, you should remove that too,
or move it into the trash can / deleted files location.
```bash
rm -r configure-pod-container
```
## {{% heading "whatsnext" %}}
* Follow a real world example of

View File

@ -40,7 +40,7 @@ Process namespace sharing is enabled using the `shareProcessNamespace` field of
1. Attach to the `shell` container and run `ps`:
```shell
kubectl attach -it nginx -c shell
kubectl exec -it nginx -c shell -- /bin/sh
```
If you don't see a command prompt, try pressing enter. In the container shell:

View File

@ -9,8 +9,10 @@ content_type: task
<!-- overview -->
This guide demonstrates how to install and write extensions for [kubectl](/docs/reference/kubectl/kubectl/). By thinking of core `kubectl` commands as essential building blocks for interacting with a Kubernetes cluster, a cluster administrator can think
of plugins as a means of utilizing these building blocks to create more complex behavior. Plugins extend `kubectl` with new sub-commands, allowing for new and custom features not included in the main distribution of `kubectl`.
This guide demonstrates how to install and write extensions for [kubectl](/docs/reference/kubectl/kubectl/).
By thinking of core `kubectl` commands as essential building blocks for interacting with a Kubernetes cluster,
a cluster administrator can think of plugins as a means of utilizing these building blocks to create more complex behavior.
Plugins extend `kubectl` with new sub-commands, allowing for new and custom features not included in the main distribution of `kubectl`.
## {{% heading "prerequisites" %}}
@ -35,7 +37,8 @@ own risk, since they are arbitrary programs running on your machine.
### Discovering plugins
`kubectl` provides a command `kubectl plugin list` that searches your `PATH` for valid plugin executables.
Executing this command causes a traversal of all files in your `PATH`. Any files that are executable, and begin with `kubectl-` will show up *in the order in which they are present in your `PATH`* in this command's output.
Executing this command causes a traversal of all files in your `PATH`. Any files that are executable, and
begin with `kubectl-` will show up *in the order in which they are present in your `PATH`* in this command's output.
A warning will be included for any files beginning with `kubectl-` that are *not* executable.
A warning will also be included for any valid plugin files that overlap each other's name.
@ -43,9 +46,17 @@ You can use [Krew](https://krew.dev/) to discover and install `kubectl`
plugins from a community-curated
[plugin index](https://krew.sigs.k8s.io/plugins/).
#### Create plugins
`kubectl` allows plugins to add custom create commands of the shape `kubectl create something` by providing a `kubectl-create-something` binary in the `PATH`.
#### Limitations
It is currently not possible to create plugins that overwrite existing `kubectl` commands. For example, creating a plugin `kubectl-version` will cause that plugin to never be executed, as the existing `kubectl version` command will always take precedence over it. Due to this limitation, it is also *not* possible to use plugins to add new subcommands to existing `kubectl` commands. For example, adding a subcommand `kubectl create foo` by naming your plugin `kubectl-create-foo` will cause that plugin to be ignored.
It is currently not possible to create plugins that overwrite existing `kubectl` commands or extend commands other than `create`.
For example, creating a plugin `kubectl-version` will cause that plugin to never be executed, as the existing `kubectl version`
command will always take precedence over it.
Due to this limitation, it is also *not* possible to use plugins to add new subcommands to existing `kubectl` commands.
For example, adding a subcommand `kubectl attach vm` by naming your plugin `kubectl-attach-vm` will cause that plugin to be ignored.
`kubectl plugin list` shows warnings for any valid plugins that attempt to do this.

View File

@ -37,6 +37,7 @@ on general patterns for running stateful applications in Kubernetes.
- Some familiarity with MySQL helps, but this tutorial aims to present
general patterns that should be useful for other systems.
- You are using the default namespace or another namespace that does not contain any conflicting objects.
- You need to have a AMD64-compatible CPU.
## {{% heading "objectives" %}}

View File

@ -175,7 +175,7 @@ description: |-
<p>The <code>rollout undo</code> command reverts the deployment to the previous known state (v2 of the image). Updates are versioned and you can revert to any previously known state of a Deployment.</p>
<p>Use the <code>get pods</code> subcommand to list the Pods again:</p>
<p><code><b>kubectl get pods</b></code></p>
<p>Four Pods are running. To check the image deployed on these Pods, use the <code>describe pods</code> subcommand:</p>
<p>To check the image deployed on the running Pods, use the <code>describe pods</code> subcommand:</p>
<p><code><b>kubectl describe pods</b></code></p>
<p>The Deployment is once again using a stable version of the app (v2). The rollback was successful.</p>
</div>

View File

@ -0,0 +1,27 @@
apiVersion: v1
kind: Pod
metadata:
name: pod
spec:
securityContext:
seccompProfile:
type: Unconfined
ephemeralContainers:
- name: ephemeral-container
image: debian
securityContext:
seccompProfile:
type: RuntimeDefault
initContainers:
- name: init-container
image: debian
securityContext:
seccompProfile:
type: RuntimeDefault
containers:
- name: container
image: docker.io/library/debian:stable
securityContext:
seccompProfile:
type: Localhost
localhostProfile: my-profile.json

View File

@ -34,7 +34,7 @@ Le partage de l'espace de nommage du processus est activé en utilisant le champ
1. Attachez-le au conteneur `shell` et lancez `ps` :
```shell
kubectl attach -it nginx -c shell
kubectl exec -it nginx -c shell -- /bin/sh
```
Si vous ne verrez pas d'invite de commande, appuyez sur la touche Entrée.

View File

@ -0,0 +1,16 @@
---
title: कंटेनर एनवायरनमेंट वेरिएबल्स (Container Environment Variables)
id: container-env-variables
date: 2018-04-12
full_link: /docs/concepts/containers/container-environment/
short_description: >
कंटेनर एनवायरनमेंट वेरिएबल्स नाम=मान (name=value) की जोड़ी होते हैं जो पोड में चल रहे कंटेनरों को उपयोगी जानकारी प्रदान करते हैं।
aka:
tags:
- fundamental
---
कंटेनर एनवायरनमेंट वेरिएबल्स नाम=मान (name=value) की जोड़ी होते हैं जो {{< glossary_tooltip text="पोड" term_id="pod" >}} में चल रहे कंटेनरों को उपयोगी जानकारी प्रदान करते हैं।
<!--more-->
कंटेनर एनवायरनमेंट वेरिएबल्स उन जानकारी को प्रदान करते हैं जो चल रही कंटेनराइज्ड एप्लिकेशनों के लिए आवश्यक होती है, साथ ही महत्वपूर्ण संसाधनों की जानकारी भी {{< glossary_tooltip text="कंटेनरों" term_id="container" >}} को देती हैं। उदाहरण के लिए, फाइल सिस्टम का विवरण, स्वयं कंटेनर के बारे में जानकारी, और अन्य क्लस्टर संसाधन जैसे सर्विस एंडपॉइंट्स।

View File

@ -0,0 +1,19 @@
---
title: कस्टम रिसोर्स डिफिनिशन (Custom Resource Definition)
id: CustomResourceDefinition
date: 2018-04-12
full_link: /docs/tasks/extend-kubernetes/custom-resources/custom-resource-definitions/
short_description: >
कस्टम कोड जो एक संसाधन को परिभाषित करता है जिसे आपके Kubernetes API सर्वर में बिना पूरा कस्टम सर्वर बनाए जोड़ा जा सकता है।
aka:
tags:
- fundamental
- operation
- extension
---
कस्टम कोड जो एक संसाधन को परिभाषित करता है जिसे आपके Kubernetes API सर्वर में बिना पूरा कस्टम सर्वर बनाए जोड़ा जा सकता है।
<!--more-->
कस्टम रिसोर्स डिफिनिशन आपको अपने एनवायरनमेंट के लिए Kubernetes API का विस्तार करने की अनुमति देती हैं यदि सार्वजनिक रूप से समर्थित API संसाधन आपकी आवश्यकताओं को पूरा नहीं कर सकते।

View File

@ -0,0 +1,126 @@
---
title: ServiceのClusterIPの割り当て
content_type: concept
weight: 120
---
<!-- overview -->
Kubernetesでは、[Service](/ja/docs/concepts/services-networking/service/)はPodの集合上で実行しているアプリケーションを抽象的に公開する方法です。Serviceはクラスター内で仮想IPアドレス(type: ClusterIPのServiceを使用)を持つことができます。クライアントはその仮想IPアドレスを使用してServiceに接続することができます。そしてKubernetesは、そのServiceへのトラフィックを異なる背後のPod間で負荷分散します。
<!-- body -->
## どのようにServiceのClusterIPが割り当てられるのか
KubernetesがServiceに仮想IPアドレスを割り当てる必要がある場合、2つの方法の内どちらかの方法で行われます:
_動的割り当て_
: クラスターのコントロールプレーンは自動的に`type: ClusterIP`のServiceのために設定されたIP範囲の中から未割り当てのIPアドレスを選びます。
_静的割り当て_
: Serviceのために設定されたIP範囲の中から自身でIPアドレスを選びます。
クラスター全体を通して、Serviceの`ClusterIP`はユニークでなければいけません。割り当て済みの`ClusterIP`を使用してServiceを作成しようとするとエラーが返ってきます。
## なぜServiceのClusterIPを予約する必要があるのか
時には、クラスター内の他のコンポーネントやユーザーが利用できるように、Serviceをよく知られたIPアドレスで実行したい場合があります。
その最たる例がクラスターのDNS Serviceです。慣習として、一部のKubernetesインストーラーはServiceのIP範囲の10番目のIPアドレスをDNS Serviceに割り当てます。ServiceのIP範囲を10.96.0.0/16とするクラスターを構成し、DNS ServiceのIPを10.96.0.10にするとします。この場合、下記のようなServiceを作成する必要があります。
```yaml
apiVersion: v1
kind: Service
metadata:
labels:
k8s-app: kube-dns
kubernetes.io/cluster-service: "true"
kubernetes.io/name: CoreDNS
name: kube-dns
namespace: kube-system
spec:
clusterIP: 10.96.0.10
ports:
- name: dns
port: 53
protocol: UDP
targetPort: 53
- name: dns-tcp
port: 53
protocol: TCP
targetPort: 53
selector:
k8s-app: kube-dns
type: ClusterIP
```
しかし、前述したように10.96.0.10のIPアドレスは予約されていません。他のServiceが動的割り当てよりも前に、または同時に作成された場合、このIPアドレスがそのServiceに割り当てられる可能性があります。その場合、競合エラーで失敗しDNS Serviceを作成することができません。
## どのようにServiceのClusterIPの競合を回避するのか {#avoid-ClusterIP-conflict}
Kubernetesで実装されているServiceへのClusterIPの割り当て戦略は、衝突リスクを軽減します。
`ClusterIP`の範囲は、`min(max(16, cidrSize / 16), 256)`という式に基づいて分割されます。_最小で16、最大でも256で、その範囲内で段階的に変化する_ ように表されます。
動的IP割り当てはデフォルトで上位の帯域を使用し、それが使い切られると下位の範囲を使用します。これにより、ユーザーは下位の帯域を使用して静的な割り当てを行うことができ、衝突のリスクを抑えることができます。
## 例 {#allocation-examples}
### 例1 {#allocation-example-1}
この例ではServiceのIPアドレスとして、10.96.0.0/24(CIDR表記法)のIPアドレスの範囲を使用します。
範囲の大きさ: 2<sup>8</sup> - 2 = 254
帯域のオフセット(開始位置): `min(max(16, 256/16), 256)` = `min(16, 256)` = 16
静的割り当ての帯域の開始: 10.96.0.1
静的割り当ての帯域の終了: 10.96.0.16
範囲の終了: 10.96.0.254
{{< mermaid >}}
pie showData
title 10.96.0.0/24
"静的割り当て" : 16
"動的割り当て" : 238
{{< /mermaid >}}
### 例2 {#allocation-example-2}
この例では、ServiceのIPアドレスとして、10.96.0.0/20(CIDR表記法)のIPアドレスの範囲を使用します。
範囲の大きさ: 2<sup>12</sup> - 2 = 4094
帯域のオフセット(開始位置): `min(max(16, 4096/16), 256)` = `min(256, 256)` = 256
静的割り当ての帯域の開始: 10.96.0.1
静的割り当ての帯域の終了: 10.96.1.0
範囲の終了: 10.96.15.254
{{< mermaid >}}
pie showData
title 10.96.0.0/20
"静的割り当て" : 256
"動的割り当て" : 3838
{{< /mermaid >}}
### 例3 {#allocation-example-3}
この例ではServiceのIPアドレスとして、10.96.0.0/16(CIDR表記法)のIPアドレスの範囲を使用します。
範囲の大きさ: 2<sup>16</sup> - 2 = 65534
帯域のオフセット(開始位置): `min(max(16, 65536/16), 256)` = `min(4096, 256)` = 256
静的割り当ての帯域の開始: 10.96.0.1
静的割り当ての帯域の終了: 10.96.1.0
範囲の終了: 10.96.255.254
{{< mermaid >}}
pie showData
title 10.96.0.0/16
"静的割り当て" : 256
"動的割り当て" : 65278
{{< /mermaid >}}
## {{% heading "whatsnext" %}}
* [Serviceの外部トラフィックのポリシー](/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip)を参照してください。
* [アプリケーションをServiceに接続する](/ja/docs/tutorials/services/connect-applications-service/)を参照してください。
* [Service](/ja/docs/concepts/services-networking/service/)を参照してください。

View File

@ -6,14 +6,6 @@ weight: 70
<!-- overview -->
{{< note >}}
While kubeadm is being used as the management tool for external etcd nodes
in this guide, please note that kubeadm does not plan to support certificate rotation
or upgrades for such nodes. The long term plan is to empower the tool
[etcdadm](https://github.com/kubernetes-sigs/etcdadm) to manage these
aspects.
{{< /note >}}
Kubeadm defaults to running a single member etcd cluster in a static pod managed
by the kubelet on the control plane node. This is not a high availability setup
as the etcd cluster contains only one member and cannot sustain any members

View File

@ -41,7 +41,7 @@ weight: 200
1. `shell`コンテナにアタッチして`ps`を実行します:
```shell
kubectl attach -it nginx -c shell
kubectl exec -it nginx -c shell -- /bin/sh
```
コマンドプロンプトが表示されない場合は、Enterキーを押してみてください。

View File

@ -54,7 +54,7 @@ spec:
### AMDのGPUデバイスプラグインをデプロイする {#deploying-amd-gpu-device-plugin}
[AMD公式のGPUデバイスプラグイン](https://github.com/RadeonOpenCompute/k8s-device-plugin)には以下の要件があります。
[AMD公式のGPUデバイスプラグイン](https://github.com/ROCm/k8s-device-plugin)には以下の要件があります。
- Kubernetesのードに、AMDのGPUのLinuxドライバーがあらかじめインストール済みでなければならない。
@ -64,7 +64,7 @@ spec:
kubectl create -f https://raw.githubusercontent.com/RadeonOpenCompute/k8s-device-plugin/v1.10/k8s-ds-amdgpu-dp.yaml
```
このサードパーティーのデバイスプラグインに関する問題は、[RadeonOpenCompute/k8s-device-plugin](https://github.com/RadeonOpenCompute/k8s-device-plugin)で報告できます。
このサードパーティーのデバイスプラグインに関する問題は、[RadeonOpenCompute/k8s-device-plugin](https://github.com/ROCm/k8s-device-plugin)で報告できます。
### NVIDIAのGPUデバイスプラグインをデプロイする {#deploying-nvidia-gpu-device-plugin}
@ -123,7 +123,7 @@ kubectl label nodes <node-with-p100> accelerator=nvidia-tesla-p100
## 自動的なNodeラベルの付加 {#node-labeller}
AMDのGPUデバイスを使用している場合、[Node Labeller](https://github.com/RadeonOpenCompute/k8s-device-plugin/tree/master/cmd/k8s-node-labeller)をデプロイできます。Node Labellerは{{< glossary_tooltip text="コントローラー" term_id="controller" >}}の1種で、GPUデバイスのプロパティを持つードに自動的にラベルを付けてくれます。
AMDのGPUデバイスを使用している場合、[Node Labeller](https://github.com/ROCm/k8s-device-plugin/tree/master/cmd/k8s-node-labeller)をデプロイできます。Node Labellerは{{< glossary_tooltip text="コントローラー" term_id="controller" >}}の1種で、GPUデバイスのプロパティを持つードに自動的にラベルを付けてくれます。
現在は、このコントローラーは以下のプロパティに基づいてラベルを追加できます。

View File

@ -5,6 +5,7 @@ metadata:
annotations:
nginx.ingress.kubernetes.io/rewrite-target: /$1
spec:
ingressClassName: nginx
rules:
- host: hello-world.info
http:

View File

@ -40,7 +40,6 @@ Esta página lista alguns dos complementos disponíveis e links com suas respect
## Visualização &amp; Controle
* [Dashboard](https://github.com/kubernetes/dashboard#kubernetes-dashboard) é uma interface web para gestão do Kubernetes.
* [Weave Scope](https://www.weave.works/documentation/scope-latest-installing/#k8s) é uma ferramenta gráfica para visualizar contêineres, pods, serviços, entre outros objetos do cluster. Pode ser utilizado com uma [conta Weave Cloud](https://cloud.weave.works/). Como alternativa, é possível hospedar a interface do usuário por conta própria.
## Infraestrutura

View File

@ -45,17 +45,17 @@ Kubernetes — это проект с открытым исходным кодо
<button id="desktopShowVideoButton" onclick="kub.showVideo()">Смотреть видео</button>
<br>
<br>
<a href="https://events.linuxfoundation.org/kubecon-cloudnativecon-open-source-summit-ai-dev-china/" button id="desktopKCButton">Посетите KubeCon + CloudNativeCon в Китае 21-23 августа</a>
<br>
<br>
<br>
<br>
<a href="https://events.linuxfoundation.org/kubecon-cloudnativecon-north-america-2024/" button id="desktopKCButton">Посетите KubeCon + CloudNativeCon в США 12-15 ноября</a>
<a href="https://events.linuxfoundation.org/kubecon-cloudnativecon-north-america/" button id="desktopKCButton">Посетите KubeCon + CloudNativeCon в США 12-15 ноября</a>
<br>
<br>
<br>
<br>
<a href="https://events.linuxfoundation.org/kubecon-cloudnativecon-india/" button id="desktopKCButton">Посетите KubeCon + CloudNativeCon в Индии 11-12 декабря</a>
<br>
<br>
<br>
<br>
<a href="https://events.linuxfoundation.org/kubecon-cloudnativecon-europe/" button id="desktopKCButton">Посетите KubeCon + CloudNativeCon в Лондоне 1-4 апреля, 2025</a>
</div>
<div id="videoPlayer">
<iframe data-url="https://www.youtube.com/embed/H06qrNmGqyE?autoplay=1" frameborder="0" allowfullscreen></iframe>

View File

@ -64,16 +64,16 @@ Kubernetes - проект з відкритим вихідним кодом. В
<button id="desktopShowVideoButton" onclick="kub.showVideo()">Переглянути відео</button>
<br>
<br>
<a href="https://events.linuxfoundation.org/kubecon-cloudnativecon-open-source-summit-ai-dev-china/" button id="desktopKCButton">Відвідайте KubeCon + CloudNativeCon у Китаї 21-23 серпня</a>
<br>
<br>
<br>
<a href="https://events.linuxfoundation.org/kubecon-cloudnativecon-north-america-2024" button id="desktopKCButton">Відвідайте KubeCon + CloudNativeCon у Північній Америці, 12-15 листопада 2024 року</a>
<a href="https://events.linuxfoundation.org/kubecon-cloudnativecon-north-america-2024" button id="desktopKCButton">Відвідайте KubeCon + CloudNativeCon у Північній Америці, 12-15 листопада 2024 року</a>
<br>
<br>
<br>
<br>
<a href="https://events.linuxfoundation.org/kubecon-cloudnativecon-india/" button id="desktopKCButton">Відвідайте KubeCon + CloudNativeCon в Індії 11-12 грудня</a>
<br>
<br>
<br>
<a href="https://events.linuxfoundation.org/kubecon-cloudnativecon-europe/" button id="desktopKCButton">Відвідайте KubeCon + CloudNativeCon в Європі 1-4 квітня</a>
</div>
<div id="videoPlayer">

View File

@ -0,0 +1,355 @@
---
layout: blog
title: "Kubernetes 1.31:针对 Job 的 Pod 失效策略进阶至 GA"
date: 2024-08-19
slug: kubernetes-1-31-pod-failure-policy-for-jobs-goes-ga
author: >
[Michał Woźniak](https://github.com/mimowo) (Google),
[Shannon Kularathna](https://github.com/shannonxtreme) (Google)
translator: >
[Michael Yao](https://github.com/windsonsea) (DaoCloud)
---
<!--
layout: blog
title: "Kubernetes 1.31: Pod Failure Policy for Jobs Goes GA"
date: 2024-08-19
slug: kubernetes-1-31-pod-failure-policy-for-jobs-goes-ga
author: >
[Michał Woźniak](https://github.com/mimowo) (Google),
[Shannon Kularathna](https://github.com/shannonxtreme) (Google)
-->
<!--
This post describes _Pod failure policy_, which graduates to stable in Kubernetes
1.31, and how to use it in your Jobs.
-->
这篇博文阐述在 Kubernetes 1.31 中进阶至 Stable 的 **Pod 失效策略**,还介绍如何在你的 Job 中使用此策略。
<!--
## About Pod failure policy
When you run workloads on Kubernetes, Pods might fail for a variety of reasons.
Ideally, workloads like Jobs should be able to ignore transient, retriable
failures and continue running to completion.
-->
## 关于 Pod 失效策略
当你在 Kubernetes 上运行工作负载时Pod 可能因各种原因而失效。
理想情况下,像 Job 这样的工作负载应该能够忽略瞬时的、可重试的失效,并继续运行直到完成。
<!--
To allow for these transient failures, Kubernetes Jobs include the `backoffLimit`
field, which lets you specify a number of Pod failures that you're willing to tolerate
during Job execution. However, if you set a large value for the `backoffLimit` field
and rely solely on this field, you might notice unnecessary increases in operating
costs as Pods restart excessively until the backoffLimit is met.
-->
要允许这些瞬时的失效Kubernetes Job 需包含 `backoffLimit` 字段,
此字段允许你指定在 Job 执行期间你愿意容忍的 Pod 失效次数。然而,
如果你为 `backoffLimit` 字段设置了一个较大的值,并完全依赖这个字段,
你可能会发现,由于在满足 backoffLimit 条件之前 Pod 重启次数太多,导致运营成本发生不必要的增加。
<!--
This becomes particularly problematic when running large-scale Jobs with
thousands of long-running Pods across thousands of nodes.
The Pod failure policy extends the backoff limit mechanism to help you reduce
costs in the following ways:
- Gives you control to fail the Job as soon as a non-retriable Pod failure occurs.
- Allows you to ignore retriable errors without increasing the `backoffLimit` field.
-->
在运行大规模的、包含跨数千节点且长时间运行的 Pod 的 Job 时,这个问题尤其严重。
Pod 失效策略扩展了回退限制机制,帮助你通过以下方式降低成本:
- 让你在出现不可重试的 Pod 失效时控制 Job 失败。
- 允许你忽略可重试的错误,而不增加 `backoffLimit` 字段。
<!--
For example, you can use a Pod failure policy to run your workload on more affordable spot machines
by ignoring Pod failures caused by
[graceful node shutdown](/docs/concepts/cluster-administration/node-shutdown/#graceful-node-shutdown).
The policy allows you to distinguish between retriable and non-retriable Pod
failures based on container exit codes or Pod conditions in a failed Pod.
-->
例如,通过忽略由[节点体面关闭](/zh-cn/docs/concepts/cluster-administration/node-shutdown/#graceful-node-shutdown)引起的
Pod 失效,你可以使用 Pod 失效策略在更实惠的临时机器上运行你的工作负载。
此策略允许你基于失效 Pod 中的容器退出码或 Pod 状况来区分可重试和不可重试的 Pod 失效。
<!--
## How it works
You specify a Pod failure policy in the Job specification, represented as a list
of rules.
For each rule you define _match requirements_ based on one of the following properties:
- Container exit codes: the `onExitCodes` property.
- Pod conditions: the `onPodConditions` property.
-->
## 它是如何工作的
你在 Job 规约中指定的 Pod 失效策略是一个规则的列表。
对于每个规则,你基于以下属性之一来定义**匹配条件**
- 容器退出码:`onExitCodes` 属性。
- Pod 状况:`onPodConditions` 属性。
<!--
Additionally, for each rule, you specify one of the following actions to take
when a Pod matches the rule:
- `Ignore`: Do not count the failure towards the `backoffLimit` or `backoffLimitPerIndex`.
- `FailJob`: Fail the entire Job and terminate all running Pods.
- `FailIndex`: Fail the index corresponding to the failed Pod.
This action works with the [Backoff limit per index](/docs/concepts/workloads/controllers/job/#backoff-limit-per-index) feature.
- `Count`: Count the failure towards the `backoffLimit` or `backoffLimitPerIndex`.
This is the default behavior.
-->
此外,对于每个规则,你要指定在 Pod 与此规则匹配时应采取的动作,可选动作为以下之一:
- `Ignore`:不将失效计入 `backoffLimit``backoffLimitPerIndex`
- `FailJob`:让整个 Job 失败并终止所有运行的 Pod。
- `FailIndex`:与失效 Pod 对应的索引失效。
此动作与[逐索引回退限制](/zh-cn/docs/concepts/workloads/controllers/job/#backoff-limit-per-index)特性一起使用。
- `Count`:将失效计入 `backoffLimit``backoffLimitPerIndex`。这是默认行为。
<!--
When Pod failures occur in a running Job, Kubernetes matches the
failed Pod status against the list of Pod failure policy rules, in the specified
order, and takes the corresponding actions for the first matched rule.
Note that when specifying the Pod failure policy, you must also set the Job's
Pod template with `restartPolicy: Never`. This prevents race conditions between
the kubelet and the Job controller when counting Pod failures.
-->
当在运行的 Job 中发生 Pod 失效时Kubernetes 按所给的顺序将失效 Pod 的状态与
Pod 失效策略规则的列表进行匹配,并根据匹配的第一个规则采取相应的动作。
请注意,在指定 Pod 失效策略时,你还必须在 Job 的 Pod 模板中设置 `restartPolicy: Never`
此字段可以防止在对 Pod 失效计数时在 kubelet 和 Job 控制器之间出现竞争条件。
<!--
### Kubernetes-initiated Pod disruptions
To allow matching Pod failure policy rules against failures caused by
disruptions initiated by Kubernetes, this feature introduces the `DisruptionTarget`
Pod condition.
Kubernetes adds this condition to any Pod, regardless of whether it's managed by
a Job controller, that fails because of a retriable
[disruption scenario](/docs/concepts/workloads/pods/disruptions/#pod-disruption-conditions).
The `DisruptionTarget` condition contains one of the following reasons that
corresponds to these disruption scenarios:
-->
### Kubernetes 发起的 Pod 干扰
为了允许将 Pod 失效策略规则与由 Kubernetes 引发的干扰所导致的失效进行匹配,
此特性引入了 `DisruptionTarget` Pod 状况。
Kubernetes 会将此状况添加到因可重试的[干扰场景](/zh-cn/docs/concepts/workloads/pods/disruptions/#pod-disruption-conditions)而失效的所有
Pod无论其是否由 Job 控制器管理。其中 `DisruptionTarget` 状况包含与这些干扰场景对应的以下原因之一:
<!--
- `PreemptionByKubeScheduler`: [Preemption](/docs/concepts/scheduling-eviction/pod-priority-preemption)
by `kube-scheduler` to accommodate a new Pod that has a higher priority.
- `DeletionByTaintManager` - the Pod is due to be deleted by
`kube-controller-manager` due to a `NoExecute` [taint](/docs/concepts/scheduling-eviction/taint-and-toleration/)
that the Pod doesn't tolerate.
- `EvictionByEvictionAPI` - the Pod is due to be deleted by an
[API-initiated eviction](/docs/concepts/scheduling-eviction/api-eviction/).
- `DeletionByPodGC` - the Pod is bound to a node that no longer exists, and is due to
be deleted by [Pod garbage collection](/docs/concepts/workloads/pods/pod-lifecycle/#pod-garbage-collection).
- `TerminationByKubelet` - the Pod was terminated by
[graceful node shutdown](/docs/concepts/cluster-administration/node-shutdown/#graceful-node-shutdown),
[node pressure eviction](/docs/concepts/scheduling-eviction/node-pressure-eviction/)
or preemption for [system critical pods](/docs/tasks/administer-cluster/guaranteed-scheduling-critical-addon-pods/).
-->
- `PreemptionByKubeScheduler`:由 `kube-scheduler`
[抢占](/zh-cn/docs/concepts/scheduling-eviction/pod-priority-preemption)以接纳更高优先级的新 Pod。
- `DeletionByTaintManager` - Pod 因其不容忍的 `NoExecute`
[污点](/zh-cn/docs/concepts/scheduling-eviction/taint-and-toleration/)而被 `kube-controller-manager` 删除。
- `EvictionByEvictionAPI` - Pod 因为 [API 发起的驱逐](/zh-cn/docs/concepts/scheduling-eviction/api-eviction/)而被删除。
- `DeletionByPodGC` - Pod 被绑定到一个不再存在的节点,并将通过
[Pod 垃圾收集](/zh-cn/docs/concepts/workloads/pods/pod-lifecycle/#pod-garbage-collection)而被删除。
- `TerminationByKubelet` - Pod 因[节点体面关闭](/zh-cn/docs/concepts/cluster-administration/node-shutdown/#graceful-node-shutdown)、
[节点压力驱逐](/zh-cn/docs/concepts/scheduling-eviction/node-pressure-eviction/)或被[系统关键 Pod](/zh-cn/docs/tasks/administer-cluster/guaranteed-scheduling-critical-addon-pods/)抢占
<!--
In all other disruption scenarios, like eviction due to exceeding
[Pod container limits](/docs/concepts/configuration/manage-resources-containers/),
Pods don't receive the `DisruptionTarget` condition because the disruptions were
likely caused by the Pod and would reoccur on retry.
### Example
The Pod failure policy snippet below demonstrates an example use:
-->
在所有其他干扰场景中,例如因超过
[Pod 容器限制](/zh-cn/docs/concepts/configuration/manage-resources-containers/)而驱逐,
Pod 不会收到 `DisruptionTarget` 状况,因为干扰可能是由 Pod 引起的,并且在重试时会再次发生干扰。
### 示例
下面的 Pod 失效策略片段演示了一种用法:
```yaml
podFailurePolicy:
rules:
- action: Ignore
onPodConditions:
- type: DisruptionTarget
- action: FailJob
onPodConditions:
- type: ConfigIssue
- action: FailJob
onExitCodes:
operator: In
values: [ 42 ]
```
<!--
In this example, the Pod failure policy does the following:
- Ignores any failed Pods that have the built-in `DisruptionTarget`
condition. These Pods don't count towards Job backoff limits.
- Fails the Job if any failed Pods have the custom user-supplied
`ConfigIssue` condition, which was added either by a custom controller or webhook.
- Fails the Job if any containers exited with the exit code 42.
- Counts all other Pod failures towards the default `backoffLimit` (or
`backoffLimitPerIndex` if used).
-->
在这个例子中Pod 失效策略执行以下操作:
- 忽略任何具有内置 `DisruptionTarget` 状况的失效 Pod。这些 Pod 不计入 Job 回退限制。
- 如果任何失效的 Pod 具有用户自定义的、由自定义控制器或 Webhook 添加的 `ConfigIssue`
状况,则让 Job 失败。
- 如果任何容器以退出码 42 退出,则让 Job 失败。
- 将所有其他 Pod 失效计入默认的 `backoffLimit`(在合适的情况下,计入 `backoffLimitPerIndex`)。
<!--
## Learn more
- For a hands-on guide to using Pod failure policy, see
[Handling retriable and non-retriable pod failures with Pod failure policy](/docs/tasks/job/pod-failure-policy/)
- Read the documentation for
[Pod failure policy](/docs/concepts/workloads/controllers/job/#pod-failure-policy) and
[Backoff limit per index](/docs/concepts/workloads/controllers/job/#backoff-limit-per-index)
- Read the documentation for
[Pod disruption conditions](/docs/concepts/workloads/pods/disruptions/#pod-disruption-conditions)
- Read the KEP for [Pod failure policy](https://github.com/kubernetes/enhancements/tree/master/keps/sig-apps/3329-retriable-and-non-retriable-failures)
-->
## 进一步了解
- 有关使用 Pod 失效策略的实践指南,
参见[使用 Pod 失效策略处理可重试和不可重试的 Pod 失效](/zh-cn/docs/tasks/job/pod-failure-policy/)
- 阅读文档:[Pod 失效策略](/zh-cn/docs/concepts/workloads/controllers/job/#pod-failure-policy)和[逐索引回退限制](/zh-cn/docs/concepts/workloads/controllers/job/#backoff-limit-per-index)
- 阅读文档:[Pod 干扰状况](/zh-cn/docs/concepts/workloads/pods/disruptions/#pod-disruption-conditions)
- 阅读 KEP[Pod 失效策略](https://github.com/kubernetes/enhancements/tree/master/keps/sig-apps/3329-retriable-and-non-retriable-failures)
<!--
## Related work
Based on the concepts introduced by Pod failure policy, the following additional work is in progress:
- JobSet integration: [Configurable Failure Policy API](https://github.com/kubernetes-sigs/jobset/issues/262)
- [Pod failure policy extension to add more granular failure reasons](https://github.com/kubernetes/enhancements/issues/4443)
- Support for Pod failure policy via JobSet in [Kubeflow Training v2](https://github.com/kubeflow/training-operator/pull/2171)
- Proposal: [Disrupted Pods should be removed from endpoints](https://docs.google.com/document/d/1t25jgO_-LRHhjRXf4KJ5xY_t8BZYdapv7MDAxVGY6R8)
-->
## 相关工作
基于 Pod 失效策略所引入的概念,正在进行中的进一步工作如下:
- JobSet 集成:[可配置的失效策略 API](https://github.com/kubernetes-sigs/jobset/issues/262)
- [扩展 Pod 失效策略以添加更细粒度的失效原因](https://github.com/kubernetes/enhancements/issues/4443)
- 通过 JobSet 在 [Kubeflow Training v2](https://github.com/kubeflow/training-operator/pull/2171)
中支持 Pod 失效策略
- 提案:[受干扰的 Pod 应从端点中移除](https://docs.google.com/document/d/1t25jgO_-LRHhjRXf4KJ5xY_t8BZYdapv7MDAxVGY6R8)
<!--
## Get involved
This work was sponsored by
[batch working group](https://github.com/kubernetes/community/tree/master/wg-batch)
in close collaboration with the
[SIG Apps](https://github.com/kubernetes/community/tree/master/sig-apps),
and [SIG Node](https://github.com/kubernetes/community/tree/master/sig-node),
and [SIG Scheduling](https://github.com/kubernetes/community/tree/master/sig-scheduling)
communities.
-->
## 参与其中
这项工作由 [Batch Working Group批处理工作组](https://github.com/kubernetes/community/tree/master/wg-batch) 发起,
与 [SIG Apps](https://github.com/kubernetes/community/tree/master/sig-apps)、
[SIG Node](https://github.com/kubernetes/community/tree/master/sig-node)
和 [SIG Scheduling](https://github.com/kubernetes/community/tree/master/sig-scheduling)
社区密切合作。
<!--
If you are interested in working on new features in the space we recommend
subscribing to our [Slack](https://kubernetes.slack.com/messages/wg-batch)
channel and attending the regular community meetings.
## Acknowledgments
I would love to thank everyone who was involved in this project over the years -
it's been a journey and a joint community effort! The list below is
my best-effort attempt to remember and recognize people who made an impact.
Thank you!
-->
如果你有兴趣处理这个领域中的新特性,建议你订阅我们的
[Slack](https://kubernetes.slack.com/messages/wg-batch) 频道,并参加定期的社区会议。
## 感谢
我想感谢在这些年里参与过这个项目的每个人。
这是一段旅程,也是一个社区共同努力的见证!
以下名单是我尽力记住并对此特性产生过影响的人。感谢大家!
<!--
- [Aldo Culquicondor](https://github.com/alculquicondor/) for guidance and reviews throughout the process
- [Jordan Liggitt](https://github.com/liggitt) for KEP and API reviews
- [David Eads](https://github.com/deads2k) for API reviews
- [Maciej Szulik](https://github.com/soltysh) for KEP reviews from SIG Apps PoV
- [Clayton Coleman](https://github.com/smarterclayton) for guidance and SIG Node reviews
- [Sergey Kanzhelev](https://github.com/SergeyKanzhelev) for KEP reviews from SIG Node PoV
- [Dawn Chen](https://github.com/dchen1107) for KEP reviews from SIG Node PoV
- [Daniel Smith](https://github.com/lavalamp) for reviews from SIG API machinery PoV
- [Antoine Pelisse](https://github.com/apelisse) for reviews from SIG API machinery PoV
- [John Belamaric](https://github.com/johnbelamaric) for PRR reviews
- [Filip Křepinský](https://github.com/atiratree) for thorough reviews from SIG Apps PoV and bug-fixing
- [David Porter](https://github.com/bobbypage) for thorough reviews from SIG Node PoV
- [Jensen Lo](https://github.com/jensentanlo) for early requirements discussions, testing and reporting issues
- [Daniel Vega-Myhre](https://github.com/danielvegamyhre) for advancing JobSet integration and reporting issues
- [Abdullah Gharaibeh](https://github.com/ahg-g) for early design discussions and guidance
- [Antonio Ojea](https://github.com/aojea) for test reviews
- [Yuki Iwai](https://github.com/tenzen-y) for reviews and aligning implementation of the closely related Job features
- [Kevin Hannon](https://github.com/kannon92) for reviews and aligning implementation of the closely related Job features
- [Tim Bannister](https://github.com/sftim) for docs reviews
- [Shannon Kularathna](https://github.com/shannonxtreme) for docs reviews
- [Paola Cortés](https://github.com/cortespao) for docs reviews
-->
- [Aldo Culquicondor](https://github.com/alculquicondor/) 在整个过程中提供指导和审查
- [Jordan Liggitt](https://github.com/liggitt) 审查 KEP 和 API
- [David Eads](https://github.com/deads2k) 审查 API
- [Maciej Szulik](https://github.com/soltysh) 从 SIG Apps 角度审查 KEP
- [Clayton Coleman](https://github.com/smarterclayton) 提供指导和 SIG Node 审查
- [Sergey Kanzhelev](https://github.com/SergeyKanzhelev) 从 SIG Node 角度审查 KEP
- [Dawn Chen](https://github.com/dchen1107) 从 SIG Node 角度审查 KEP
- [Daniel Smith](https://github.com/lavalamp) 从 SIG API Machinery 角度进行审查
- [Antoine Pelisse](https://github.com/apelisse) 从 SIG API Machinery 角度进行审查
- [John Belamaric](https://github.com/johnbelamaric) 审查 PRR
- [Filip Křepinský](https://github.com/atiratree) 从 SIG Apps 角度进行全面审查并修复 Bug
- [David Porter](https://github.com/bobbypage) 从 SIG Node 角度进行全面审查
- [Jensen Lo](https://github.com/jensentanlo) 进行早期需求讨论、测试和报告问题
- [Daniel Vega-Myhre](https://github.com/danielvegamyhre) 推进 JobSet 集成并报告问题
- [Abdullah Gharaibeh](https://github.com/ahg-g) 进行早期设计讨论和指导
- [Antonio Ojea](https://github.com/aojea) 审查测试
- [Yuki Iwai](https://github.com/tenzen-y) 审查并协调相关 Job 特性的实现
- [Kevin Hannon](https://github.com/kannon92) 审查并协调相关 Job 特性的实现
- [Tim Bannister](https://github.com/sftim) 审查文档
- [Shannon Kularathna](https://github.com/shannonxtreme) 审查文档
- [Paola Cortés](https://github.com/cortespao) 审查文档

View File

@ -0,0 +1,76 @@
---
layout: blog
title: "Kubernetes 1.31: 节点 Cgroup 驱动程序的自动配置 (beta)"
date: 2024-08-21
slug: cri-cgroup-driver-lookup-now-beta
author: >
Peter Hunt (Red Hat)
translator: >
XiaoYang Zhang (Huawei)
---
<!--
layout: blog
title: "Kubernetes 1.31: Autoconfiguration For Node Cgroup Driver (beta)"
date: 2024-08-21
slug: cri-cgroup-driver-lookup-now-beta
author: >
Peter Hunt (Red Hat)
-->
<!--
Historically, configuring the correct cgroup driver has been a pain point for users running new
Kubernetes clusters. On Linux systems, there are two different cgroup drivers:
`cgroupfs` and `systemd`. In the past, both the [kubelet](/docs/reference/command-line-tools-reference/kubelet/)
and CRI implementation (like CRI-O or containerd) needed to be configured to use
the same cgroup driver, or else the kubelet would exit with an error. This was a
source of headaches for many cluster admins. However, there is light at the end of the tunnel!
-->
一直以来,为新运行的 Kubernetes 集群配置正确的 cgroup 驱动程序是用户的一个痛点。
在 Linux 系统中,存在两种不同的 cgroup 驱动程序:`cgroupfs` 和 `systemd`
过去,[kubelet](/zh-cn/docs/reference/command-line-tools-reference/kubelet/) 和 CRI
实现(如 CRI-O 或 containerd需要配置为使用相同的 cgroup 驱动程序, 否则 kubelet 会报错并退出。
这让许多集群管理员头疼不已。不过,现在曙光乍现!
<!--
## Automated cgroup driver detection
In v1.28.0, the SIG Node community introduced the feature gate
`KubeletCgroupDriverFromCRI`, which instructs the kubelet to ask the CRI
implementation which cgroup driver to use. A few minor releases of Kubernetes
happened whilst we waited for support to land in the major two CRI implementations
(containerd and CRI-O), but as of v1.31.0, this feature is now beta!
-->
## 自动检测 cgroup 驱动程序
在 v1.28.0 版本中SIG Node 社区引入了 `KubeletCgroupDriverFromCRI` 特性门控,
它指示 kubelet 向 CRI 实现询问使用哪个 cgroup 驱动程序。在两个主要的 CRI 实现containerd
和 CRI-O增加对该功能的支持这段期间Kubernetes 经历了几次小版本发布,但从 v1.31.0 版本开始,此功能现已进入 beta 阶段!
<!--
In addition to setting the feature gate, a cluster admin needs to ensure their
CRI implementation is new enough:
- containerd: Support was added in v2.0.0
- CRI-O: Support was added in v1.28.0
-->
除了设置特性门控之外,集群管理员还需要确保 CRI 实现版本足够新:
- containerdv2.0.0 版本开始支持
- CRI-Ov1.28.0 版本开始支持
<!--
Then, they should ensure their CRI implementation is configured to the
cgroup_driver they would like to use.
-->
然后,他们应该确保配置其 CRI 实现使用他们想要的 cgroup 驱动程序。
<!--
## Future work
Eventually, support for the kubelet's `cgroupDriver` configuration field will be
dropped, and the kubelet will fail to start if the CRI implementation isn't new
enough to have support for this feature.
-->
## 未来工作
最终kubelet 对 `cgroupDriver` 配置字段的支持将会被移除,如果 CRI 实现的版本不够新无法支持此功能kubelet 将无法启动。

View File

@ -240,12 +240,11 @@ Kubernetes 使用 TLS 保护 API 流量;确保在部署集群时采用了 TLS
things: isolation between different applications, and a mechanism to combine
those isolated applications to run on the same host computer. Those two
aspects, isolation and aggregation, mean that runtime security involves
trade-offs and finding an appropriate balance.
identifying trade-offs and finding an appropriate balance.
-->
{{< glossary_tooltip text="容器" term_id="container" >}} 提供了两种功能:
不同应用程序间的隔离和将这些隔离的应用程序合并运行到同一台主机。
隔离和聚合这两个方面意味着运行时安全需要权衡利弊,
找到合适的平衡点。
不同应用程序间的隔离,以及将这些隔离的应用程序合并运行到同一台主机的机制。
隔离和聚合这两个方面意味着运行时安全需要权衡利弊,并找到合适的平衡点。
<!--
Kubernetes relies on a {{< glossary_tooltip text="container runtime" term_id="container-runtime" >}}

View File

@ -157,14 +157,16 @@ Kubernetes 还可以为 Pod 和容器设置 `allowPrivilegeEscalation`。当此
<!--
To learn how to implement seccomp in Kubernetes, refer to
[Restrict a Container's Syscalls with seccomp](/docs/tutorials/security/seccomp/).
[Restrict a Container's Syscalls with seccomp](/docs/tutorials/security/seccomp/)
or the [Seccomp node reference](/docs/reference/node/seccomp/)
To learn more about seccomp, see
[Seccomp BPF](https://www.kernel.org/doc/html/latest/userspace-api/seccomp_filter.html)
in the Linux kernel documentation.
-->
要了解如何在 Kubernetes 中实现 seccomp
请参阅[使用 seccomp 限制容器的系统调用](/zh-cn/docs/tutorials/security/seccomp/)。
请参阅[使用 seccomp 限制容器的系统调用](/zh-cn/docs/tutorials/security/seccomp/)或
[Seccomp 节点参考](/zh-cn/docs/reference/node/seccomp/)。
要了解 seccomp 的更多细节,请参阅 Linux 内核文档中的
[Seccomp BPF](https://www.kernel.org/doc/html/latest/userspace-api/seccomp_filter.html)。
@ -506,7 +508,9 @@ of support that you need. For instructions, refer to
* [Learn how to use AppArmor](/docs/tutorials/security/apparmor/)
* [Learn how to use seccomp](/docs/tutorials/security/seccomp/)
* [Learn how to use SELinux](/docs/tasks/configure-pod-container/security-context/#assign-selinux-labels-to-a-container)
* [Seccomp Node Reference](/docs/reference/node/seccomp/)
-->
* [学习如何使用 AppArmor](/zh-cn/docs/tutorials/security/apparmor/)
* [学习如何使用 seccomp](/zh-cn/docs/tutorials/security/seccomp/)
* [学习如何使用 SELinux](/zh-cn/docs/tasks/configure-pod-container/security-context/#assign-selinux-labels-to-a-container)
* [Seccomp 节点参考](/zh-cn/docs/reference/node/seccomp/)

View File

@ -603,7 +603,7 @@ access to the host system and all the processes/files running on that host.
在容器逃逸场景中,攻击者会利用漏洞来访问主机系统以及在该主机上运行的所有进程/文件。
<!--
Virtual machines and userspace kernels are 2 popular approaches to sandboxing. The following
Virtual machines and userspace kernels are two popular approaches to sandboxing. The following
sandboxing implementations are available:
* [gVisor](https://gvisor.dev/) intercepts syscalls from containers and runs them through a

View File

@ -166,12 +166,12 @@ under the `.spec` field path.
<!--
<li><code>user</code> is unset (<code>""</code> / undefined / nil)</li>
<li><code>role</code> is unset (<code>""</code> / undefined / nil)</li>
<li><code>type</code> is unset or one of: <code>container_t, container_init_t, container_kvm_t</code></li>
<li><code>type</code> is unset or one of: <code>container_t, container_init_t, container_kvm_t, container_engine_t</code></li>
<li><code>level</code> is anything</li>
-->
<li><code>user</code> 未设置(<code>""</code> / 未定义 / nil</li>
<li><code>role</code> 未设置(<code>""</code> / 未定义 / nil</li>
<li><code>type</code> 未设置或者取值为 <code>container_t</code><code>container_init_t</code><code>container_kvm_t</code> 之一</li>
<li><code>type</code> 未设置或者取值为 <code>container_t</code><code>container_init_t</code><code>container_kvm_t</code><code>container_engine_t</code> 之一</li>
<li><code>level</code> 是任何取值</li>
</ul>
</td>

View File

@ -0,0 +1,33 @@
---
title: RemoveSelfLink
content_type: feature_gate
_build:
list: never
render: false
stages:
- stage: alpha
defaultValue: false
fromVersion: "1.16"
toVersion: "1.19"
- stage: beta
defaultValue: true
fromVersion: "1.20"
toVersion: "1.23"
- stage: stable
defaultValue: true
fromVersion: "1.24"
toVersion: "1.29"
removed: true
---
<!--
Sets the `.metadata.selfLink` field to blank (empty string) for all
objects and collections. This field has been deprecated since the Kubernetes v1.16
release. When this feature is enabled, the `.metadata.selfLink` field remains part of
the Kubernetes API, but is always unset.
-->
为所有对象和集合将 `.metadata.selfLink` 字段设置为空(空字符串)。
此字段自 Kubernetes v1.16 版本以来已被弃用。
当此特性被启用时,`.metadata.selfLink` 字段仍然是 Kubernetes API 的一部分,但始终不设置。

View File

@ -272,7 +272,7 @@ exec 插件本身至少应通过文件访问许可来实施保护。
<tr>
<td><code>expirationTimestamp</code><br/>
<a href="https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#time-v1-meta"><code>meta/v1.Time</code></a>
<a href="https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.31/#time-v1-meta"><code>meta/v1.Time</code></a>
</td>
<td>
<!-- ExpirationTimestamp indicates a time when the provided credentials expire. -->

View File

@ -0,0 +1,31 @@
---
title: Duration
id: duration
date: 2024-10-05
full_link:
short_description: >
以字符串形式指定的时间间隔,格式符合 Go 语言的
[time.Duration](https://pkg.go.dev/time)
允许使用秒、分钟和小时等不同单位灵活地指定时间。
aka:
tags:
- fundamental
---
<!--
title: Duration
id: duration
date: 2024-10-05
full_link:
short_description: >
A time interval specified as a string in the format accepted by Go's [time.Duration](https://pkg.go.dev/time), allowing for flexible time specifications using various units like seconds, minutes, and hours.
aka:
tags:
- fundamental
-->
<!--
In Kubernetes APIs, a duration must be non-negative and is typically expressed with a suffix.
For example, `5s` for five seconds or `1m30s` for one minute and thirty seconds.
-->
在 Kubernetes API 中duration 必须是非负的,通常带有后缀单位。
例如,`5s` 表示五秒,`1m30s` 表示一分三十秒。

View File

@ -9,7 +9,8 @@ aka:
tags:
- fundamental
---
JSON 或 YAML 格式的 Kubernetes API 对象规范。
[JSON](https://www.json.org/json-en.html)
或 [YAML](https://yaml.org/) 格式的 Kubernetes API 对象规约。
<!--
title: Manifest
@ -21,12 +22,15 @@ short_description: >
aka:
tags:
- fundamental
Specification of a Kubernetes API object in JSON or YAML format.
Specification of a Kubernetes API object in [JSON](https://www.json.org/json-en.html)
or [YAML](https://yaml.org/) format.
-->
<!--more-->
<!--
A manifest specifies the desired state of an object that Kubernetes will maintain when you apply the manifest. Each configuration file can contain multiple manifests.
A manifest specifies the desired state of an object that Kubernetes will maintain when you apply the manifest.
For YAML format, each file can contain multiple manifests.
-->
清单指定了在应用该清单时 kubernetes 将维护的对象的期望状态。每个配置文件可包含多个清单。
清单指定在应用该清单时 kubernetes 将维护的对象的期望状态。
对于 YAML 格式,每个文件可包含多个清单。

View File

@ -6,7 +6,7 @@ api_metadata:
content_type: "api_reference"
description: "RuntimeClass 定义集群中支持的容器运行时类。"
title: "RuntimeClass"
weight: 6
weight: 9
---
<!--
api_metadata:
@ -16,7 +16,7 @@ api_metadata:
content_type: "api_reference"
description: "RuntimeClass defines a class of container runtime supported in the cluster."
title: "RuntimeClass"
weight: 6
weight: 9
auto_generated: true
-->
`apiVersion: node.k8s.io/v1`

View File

@ -50,10 +50,14 @@ A node selector requirement is a selector that contains values, a key, and an op
<!--
- **values** ([]string)
*Atomic: will be replaced during a merge*
An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch.
-->
- **values** ([]string)
**原子性:将在合并期间被替换**
一个由字符串值组成的数组。如果 operator 是 `In``NotIn`,则 values 数组不能为空。
如果 operator 为 `Exists``DoesNotExist`,则 values 数组只能为空。
如果 operator 为 `Gt``Lt`,则 values 数组只能包含一个元素,并且该元素会被解释为整数。

View File

@ -6,7 +6,7 @@ api_metadata:
content_type: "api_reference"
description: "VolumeAttachment 抓取将指定卷挂接到指定节点或从指定节点解除挂接指定卷的意图。"
title: "VolumeAttachment"
weight: 7
weight: 11
---
<!--
api_metadata:
@ -16,7 +16,7 @@ api_metadata:
content_type: "api_reference"
description: "VolumeAttachment captures the intent to attach or detach the specified volume to/from the specified node."
title: "VolumeAttachment"
weight: 7
weight: 11
-->
`apiVersion: storage.k8s.io/v1`

View File

@ -1,9 +0,0 @@
---
title: "其他资源"
weight: 10
---
<!--
title: "Other Resources"
weight: 10
auto_generated: true
-->

View File

@ -1,16 +0,0 @@
---
api_metadata:
apiVersion: "admissionregistration.k8s.io/v1beta1"
import: "k8s.io/api/admissionregistration/v1beta1"
kind: "ValidatingAdmissionPolicyBindingList"
content_type: "api_reference"
description: ""
title: "ValidatingAdmissionPolicyBindingList v1beta1"
weight: 1
---
`apiVersion: admissionregistration.k8s.io/v1beta1`
`import "k8s.io/api/admissionregistration/v1beta1"`

View File

@ -395,6 +395,12 @@ IngressStatus 描述 Ingress 的当前状态。
**IngressLoadBalancerStatus 表示负载均衡器的状态。**
- **loadBalancer.ingress** ([]IngressLoadBalancerIngress)
<!--
*Atomic: will be replaced during a merge*
-->
**原子性:将在合并期间被替换**
<!--
ingress is a list containing ingress points for the load-balancer.

View File

@ -8,7 +8,6 @@ description: "Service 是软件服务(例如 mysql的命名抽象包含
title: Service
weight: 1
---
<!--
api_metadata:
apiVersion: "v1"
@ -26,10 +25,10 @@ auto_generated: true
`import "k8s.io/api/core/v1”`
## Service {#Service}
<!--
Service is a named abstraction of software service (for example, mysql) consisting of local port (for example 3306) that the proxy listens on, and the selector that determines which pods will answer requests sent through the proxy.
-->
Service 是软件服务(例如 mysql的命名抽象包含代理要侦听的本地端口例如 3306和一个选择算符
选择算符用来确定哪些 Pod 将响应通过代理发送的请求。
@ -45,8 +44,8 @@ Service 是软件服务(例如 mysql的命名抽象包含代理要侦听
Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
-->
标准的对象元数据。
更多信息: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
标准的对象元数据。更多信息:
https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
- **spec** (<a href="{{< ref "../service-resources/service-v1#ServiceSpec" >}}">ServiceSpec</a>)
@ -63,12 +62,12 @@ Service 是软件服务(例如 mysql的命名抽象包含代理要侦听
Most recently observed status of the service. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
-->
最近观察到的 Service 状态。由系统填充。只读。
更多信息: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
最近观察到的 Service 状态。由系统填充。只读。更多信息:
https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
## ServiceSpec {#ServiceSpec}
<!--
<!--
ServiceSpec describes the attributes that a user creates on a service.
-->
ServiceSpec 描述用户在服务上创建的属性。
@ -77,18 +76,18 @@ ServiceSpec 描述用户在服务上创建的属性。
- **selector** (map[string]string)
<!--
<!--
Route service traffic to pods with label keys and values matching this selector. If empty or not present, the service is assumed to have an external process managing its endpoints, which Kubernetes will not modify. Only applies to types ClusterIP, NodePort, and LoadBalancer. Ignored if type is ExternalName. More info: https://kubernetes.io/docs/concepts/services-networking/service/
-->
将 Service 流量路由到具有与此 selector 匹配的标签键值对的 Pod。
如果为空或不存在则假定该服务有一个外部进程管理其端点Kubernetes 不会修改该端点。
仅适用于 ClusterIP、NodePort 和 LoadBalancer 类型。如果类型为 ExternalName则忽略。
更多信息: https://kubernetes.io/docs/concepts/services-networking/service/
仅适用于 ClusterIP、NodePort 和 LoadBalancer 类型。如果类型为 ExternalName则忽略。更多信息:
https://kubernetes.io/zh-cn/docs/concepts/services-networking/service/
- **ports** ([]ServicePort)
<!--
<!--
*Patch strategy: merge on key `port`*
*Map: unique values on keys `port, protocol` will be kept during a merge*
@ -99,17 +98,17 @@ ServiceSpec 描述用户在服务上创建的属性。
*ServicePort contains information on service's port.*
-->
**Patch strategy:基于键 `type` 合并**
**补丁策略:基于键 `type` 合并**
**Map合并时将保留 type 键的唯一值**
此 Service 公开的端口列表。
更多信息: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies
此 Service 公开的端口列表。更多信息:
https://kubernetes.io/zh-cn/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies
<a name="ServicePort"></a>
**ServicePort 包含有关 ServicePort 的信息。**
<!--
<!--
- **ports.port** (int32), required
The port that will be exposed by this service.
@ -135,8 +134,8 @@ ServiceSpec 描述用户在服务上创建的属性。
如果此值是一个字符串,将在目标 Pod 的容器端口中作为命名端口进行查找。
如果未指定字段,则使用 `port` 字段的值(直接映射)。
对于 clusterIP 为 None 的服务,此字段将被忽略,
应忽略不设或设置为 `port` 字段的取值。
更多信息: https://kubernetes.io/docs/concepts/services-networking/service/#defining-a-service
应忽略不设或设置为 `port` 字段的取值。更多信息:
https://kubernetes.io/zh-cn/docs/concepts/services-networking/service/#defining-a-service
<a name="IntOrString"></a>
**IntOrString 是一种可以保存 int32 或字符串的类型。
@ -145,7 +144,7 @@ ServiceSpec 描述用户在服务上创建的属性。
- **ports.protocol** (string)
<!--
<!--
The IP protocol for this port. Supports "TCP", "UDP", and "SCTP". Default is TCP.
-->
@ -153,7 +152,7 @@ ServiceSpec 描述用户在服务上创建的属性。
- **ports.name** (string)
<!--
<!--
The name of this port within the service. This must be a DNS_LABEL. All ports within a ServiceSpec must have unique names. When considering the endpoints for a Service, this must match the 'name' field in the EndpointPort. Optional if only one ServicePort is defined on this service.
-->
@ -164,7 +163,7 @@ ServiceSpec 描述用户在服务上创建的属性。
- **ports.nodePort** (int32)
<!--
<!--
The port on each node on which this service is exposed when type is NodePort or LoadBalancer. Usually assigned by the system. If a value is specified, in-range, and not in use it will be used, otherwise the operation will fail. If not specified, a port will be allocated if this Service requires one. If this field is specified when creating a Service which does not need it, creation will fail. This field will be wiped when updating a Service to no longer need it (e.g. changing type from NodePort to ClusterIP). More info: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport
-->
@ -172,8 +171,8 @@ ServiceSpec 描述用户在服务上创建的属性。
通常由系统分配。如果指定了一个在范围内且未使用的值,则将使用该值,否则操作将失败。
如果在创建的 Service 需要该端口时未指定该字段,则会分配端口。
如果在创建不需要该端口的 Service时指定了该字段则会创建失败。
当更新 Service 时,如果不再需要此字段(例如,将类型从 NodePort 更改为 ClusterIP这个字段将被擦除。
更多信息: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport
当更新 Service 时,如果不再需要此字段(例如,将类型从 NodePort 更改为 ClusterIP这个字段将被擦除。更多信息:
https://kubernetes.io/zh-cn/docs/concepts/services-networking/service/#type-nodeport
- **ports.appProtocol** (string)
@ -182,6 +181,7 @@ ServiceSpec 描述用户在服务上创建的属性。
richer behavior for protocols that they understand. This field follows standard Kubernetes label syntax.
Valid values are either:
-->
此端口的应用协议,用作实现的提示,为他们理解的协议提供更丰富的行为。此字段遵循标准
Kubernetes 标签语法,有效值包括:
@ -208,7 +208,7 @@ ServiceSpec 描述用户在服务上创建的属性。
- **type** (string)
<!--
<!--
type determines how the Service is exposed. Defaults to ClusterIP. Valid options are ExternalName, ClusterIP, NodePort, and LoadBalancer. "ClusterIP" allocates a cluster-internal IP address for load-balancing to endpoints. Endpoints are determined by the selector or if that is not specified, by manual construction of an Endpoints object or EndpointSlice objects. If clusterIP is "None", no virtual IP is allocated and the endpoints are published as a set of endpoints rather than a virtual IP. "NodePort" builds on ClusterIP and allocates a port on every node which routes to the same endpoints as the clusterIP. "LoadBalancer" builds on NodePort and creates an external load-balancer (if supported in the current cloud) which routes to the same endpoints as the clusterIP. "ExternalName" aliases this service to the specified externalName. Several other fields do not apply to ExternalName services. More info: https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types
-->
@ -219,18 +219,18 @@ ServiceSpec 描述用户在服务上创建的属性。
如果 clusterIP 为 `None`,则不分配虚拟 IP并且 Endpoints 作为一组端点而不是虚拟 IP 发布。
`NodePort` 建立在 ClusterIP 之上,并在每个节点上分配一个端口,该端口路由到与 clusterIP 相同的 Endpoints。
`LoadBalancer` 基于 NodePort 构建并创建一个外部负载均衡器(如果当前云支持),该负载均衡器路由到与 clusterIP 相同的 Endpoints。
`externalName` 将此 Service 别名为指定的 externalName。其他几个字段不适用于 ExternalName Service。
更多信息: https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types
`externalName` 将此 Service 别名为指定的 externalName。其他几个字段不适用于 ExternalName Service。更多信息:
https://kubernetes.io/zh-cn/docs/concepts/services-networking/service/#publishing-services-service-types
- **ipFamilies** ([]string)
<!--
<!--
*Atomic: will be replaced during a merge*
-->
**原子: 将在合并期间被替换**
<!--
<!--
IPFamilies is a list of IP families (e.g. IPv4, IPv6) assigned to this service. This field is usually assigned automatically based on cluster configuration and the ipFamilyPolicy field. If this field is specified manually, the requested family is available in the cluster, and ipFamilyPolicy allows it, it will be used; otherwise creation of the service will fail. This field is conditionally mutable: it allows for adding or removing a secondary IP family, but it does not allow changing the primary IP family of the Service. Valid values are "IPv4" and "IPv6". This field only applies to Services of types ClusterIP, NodePort, and LoadBalancer, and does apply to "headless" services. This field will be wiped when updating a Service to type ExternalName.
-->
@ -252,7 +252,7 @@ ServiceSpec 描述用户在服务上创建的属性。
- **ipFamilyPolicy** (string)
<!--
<!--
IPFamilyPolicy represents the dual-stack-ness requested or required by this Service. If there is no value provided, then this field will be set to SingleStack. Services can be "SingleStack" (a single IP family), "PreferDualStack" (two IP families on dual-stack configured clusters or a single IP family on single-stack clusters), or "RequireDualStack" (two IP families on dual-stack configured clusters, otherwise fail). The ipFamilies and clusterIPs fields depend on the value of this field. This field will be wiped when updating a service to type ExternalName.
-->
@ -266,7 +266,7 @@ ServiceSpec 描述用户在服务上创建的属性。
- **clusterIP** (string)
<!--
<!--
clusterIP is the IP address of the service and is usually assigned randomly. If an address is specified manually, is in-range (as per system configuration), and is not in use, it will be allocated to the service; otherwise creation of the service will fail. This field may not be changed through updates unless the type field is also being changed to ExternalName (which requires this field to be blank) or the type field is being changed from ExternalName (in which case this field may optionally be specified, as describe above). Valid values are "None", empty string (""), or a valid IP address. Setting this to "None" makes a "headless service" (no virtual IP), which is useful when direct endpoint connections are preferred and proxying is not required. Only applies to types ClusterIP, NodePort, and LoadBalancer. If this field is specified when creating a Service of type ExternalName, creation will fail. This field will be wiped when updating a Service to type ExternalName. More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies
-->
@ -278,17 +278,18 @@ ServiceSpec 描述用户在服务上创建的属性。
clusterIP 为 “None” 时会生成“无头服务”(无虚拟 IP这在首选直接 Endpoint 连接且不需要代理时很有用。
仅适用于 ClusterIP、NodePort、和 LoadBalancer 类型的服务。
如果在创建 ExternalName 类型的 Service 时指定了 clusterIP则创建将失败。
更新 Service type 为 ExternalName 时clusterIP 会被移除。
更多信息: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies
更新 Service type 为 ExternalName 时clusterIP 会被移除。更多信息:
https://kubernetes.io/zh-cn/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies
- **clusterIPs** ([]string)
<!--
<!--
*Atomic: will be replaced during a merge*
-->
**原子: 将在合并期间被替换**
<!--
<!--
ClusterIPs is a list of IP addresses assigned to this service, and are usually assigned randomly. If an address is specified manually, is in-range (as per system configuration), and is not in use, it will be allocated to the service; otherwise creation of the service will fail. This field may not be changed through updates unless the type field is also being changed to ExternalName (which requires this field to be empty) or the type field is being changed from ExternalName (in which case this field may optionally be specified, as describe above). Valid values are "None", empty string (""), or a valid IP address. Setting this to "None" makes a "headless service" (no virtual IP), which is useful when direct endpoint connections are preferred and proxying is not required. Only applies to types ClusterIP, NodePort, and LoadBalancer. If this field is specified when creating a Service of type ExternalName, creation will fail. This field will be wiped when updating a Service to type ExternalName. If this field is not specified, it will be initialized from the clusterIP field. If this field is specified, clients must ensure that clusterIPs[0] and clusterIP have the same value.
-->
@ -303,18 +304,18 @@ ServiceSpec 描述用户在服务上创建的属性。
更新 Service type 为 ExternalName 时,该字段将被移除。如果未指定此字段,则将从 clusterIP 字段初始化。
如果指定 clusterIPs客户端必须确保 clusterIPs[0] 和 clusterIP 一致。
<!--
<!--
This field may hold a maximum of two entries (dual-stack IPs, in either order). These IPs must correspond to the values of the ipFamilies field. Both clusterIPs and ipFamilies are governed by the ipFamilyPolicy field. More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies
-->
clusterIPs 最多可包含两个条目(双栈系列,按任意顺序)。
这些 IP 必须与 ipFamilies 的值相对应。
clusterIP 和 ipFamilies 都由 ipFamilyPolicy 管理。
更多信息: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies
clusterIP 和 ipFamilies 都由 ipFamilyPolicy 管理。更多信息:
https://kubernetes.io/zh-cn/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies
- **externalIPs** ([]string)
<!--
<!--
*Atomic: will be replaced during a merge*
externalIPs is a list of IP addresses for which nodes in the cluster will also accept traffic for this service. These IPs are not managed by Kubernetes. The user is responsible for ensuring that traffic arrives at a node with this IP. A common example is external load-balancers that are not part of the Kubernetes system.
@ -328,19 +329,20 @@ ServiceSpec 描述用户在服务上创建的属性。
- **sessionAffinity** (string)
<!--
<!--
Supports "ClientIP" and "None". Used to maintain session affinity. Enable client IP based session affinity. Must be ClientIP or None. Defaults to None. More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies
-->
支持 “ClientIP” 和 “None”。用于维护会话亲和性。
启用基于客户端 IP 的会话亲和性。必须是 ClientIP 或 None。默认为 None。
更多信息: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies
启用基于客户端 IP 的会话亲和性。必须是 ClientIP 或 None。默认为 None。更多信息:
https://kubernetes.io/zh-cn/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies
- **loadBalancerIP** (string)
<!--
<!--
Only applies to Service Type: LoadBalancer. This feature depends on whether the underlying cloud-provider supports specifying the loadBalancerIP when a load balancer is created. This field will be ignored if the cloud-provider does not support the feature. Deprecated: This field was under-specified and its meaning varies across implementations. Using it is non-portable and it may not support dual-stack. Users are encouraged to use implementation-specific annotations when available.
-->
仅适用于服务类型LoadBalancer。此功能取决于底层云提供商是否支持负载均衡器。
如果云提供商不支持该功能,该字段将被忽略。
已弃用:该字段信息不足,且其含义因实现而异。此字段是不可移植的,并且可能不支持双栈。。
@ -348,7 +350,7 @@ ServiceSpec 描述用户在服务上创建的属性。
- **loadBalancerSourceRanges** ([]string)
<!--
<!--
*Atomic: will be replaced during a merge*
If specified and supported by the platform, this will restrict traffic through the cloud-provider load-balancer will be restricted to the specified client IPs. This field will be ignored if the cloud-provider does not support the feature." More info: https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/
@ -357,12 +359,12 @@ ServiceSpec 描述用户在服务上创建的属性。
**原子:将在合并期间被替换**
如果设置了此字段并且被平台支持,将限制通过云厂商的负载均衡器的流量到指定的客户端 IP。
如果云提供商不支持该功能,该字段将被忽略。
更多信息: https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/
如果云提供商不支持该功能,该字段将被忽略。更多信息:
https://kubernetes.io/zh-cn/docs/tasks/access-application-cluster/create-external-load-balancer/
- **loadBalancerClass** (string)
<!--
<!--
loadBalancerClass is the class of the load balancer implementation this Service belongs to. If specified, the value of this field must be a label-style identifier, with an optional prefix, e.g. "internal-vip" or "example.com/internal-vip". Unprefixed names are reserved for end-users. This field can only be set when the Service type is 'LoadBalancer'. If not set, the default load balancer implementation is used, today this is typically done through the cloud provider integration, but should apply for any default implementation. If set, it is assumed that a load balancer implementation is watching for Services with a matching class. Any default load balancer implementation (e.g. cloud providers) should ignore Services that set this field. This field can only be set when creating or updating a Service to type 'LoadBalancer'. Once set, it can not be changed. This field will be wiped when a service is updated to a non 'LoadBalancer' type.
-->
@ -377,7 +379,7 @@ ServiceSpec 描述用户在服务上创建的属性。
- **externalName** (string)
<!--
<!--
externalName is the external reference that discovery mechanisms will return as an alias for this service (e.g. a DNS CNAME record). No proxying will be involved. Must be a lowercase RFC-1123 hostname (https://tools.ietf.org/html/rfc1123) and requires `type` to be "ExternalName".
-->
@ -387,9 +389,10 @@ ServiceSpec 描述用户在服务上创建的属性。
- **externalTrafficPolicy** (string)
<!--
<!--
externalTrafficPolicy describes how nodes distribute service traffic they receive on one of the Service's "externally-facing" addresses (NodePorts, ExternalIPs, and LoadBalancer IPs). If set to "Local", the proxy will configure the service in a way that assumes that external load balancers will take care of balancing the service traffic between nodes, and so each node will deliver traffic only to the node-local endpoints of the service, without masquerading the client source IP. (Traffic mistakenly sent to a node with no endpoints will be dropped.) The default value, "Cluster", uses the standard behavior of routing to all endpoints evenly (possibly modified by topology and other features). Note that traffic sent to an External IP or LoadBalancer IP from within the cluster will always get "Cluster" semantics, but clients sending to a NodePort from within the cluster may need to take traffic policy into account when picking a node.
-->
externalTrafficPolicy 描述了节点如何分发它们在 Service 的“外部访问”地址
NodePort、ExternalIP 和 LoadBalancer IP接收到的服务流量。
如果设置为 “Local”代理将以一种假设外部负载均衡器将负责在节点之间服务流量负载均衡
@ -401,18 +404,20 @@ ServiceSpec 描述用户在服务上创建的属性。
- **internalTrafficPolicy** (string)
<!--
<!--
InternalTrafficPolicy describes how nodes distribute service traffic they receive on the ClusterIP. If set to "Local", the proxy will assume that pods only want to talk to endpoints of the service on the same node as the pod, dropping the traffic if there are no local endpoints. The default value, "Cluster", uses the standard behavior of routing to all endpoints evenly (possibly modified by topology and other features).
-->
InternalTrafficPolicy 描述节点如何分发它们在 ClusterIP 上接收到的服务流量。
internalTrafficPolicy 描述节点如何分发它们在 ClusterIP 上接收到的服务流量。
如果设置为 “Local”代理将假定 Pod 只想与在同一节点上的服务端点通信,如果没有本地端点,它将丢弃流量。
“Cluster” 默认将流量路由到所有端点(可能会根据拓扑和其他特性进行修改)。
- **healthCheckNodePort** (int32)
<!--
<!--
healthCheckNodePort specifies the healthcheck nodePort for the service. This only applies when type is set to LoadBalancer and externalTrafficPolicy is set to Local. If a value is specified, is in-range, and is not in use, it will be used. If not specified, a value will be automatically allocated. External systems (e.g. load-balancers) can use this port to determine if a given node holds endpoints for this service or not. If this field is specified when creating a Service which does not need it, creation will fail. This field will be wiped when updating a Service to no longer need it (e.g. changing type). This field cannot be updated once set.
-->
healthCheckNodePort 指定 Service 的健康检查节点端口。
仅适用于 type 为 LoadBalancer 且 externalTrafficPolicy 设置为 Local 的情况。
如果为此字段设定了一个值,该值在合法范围内且没有被使用,则使用所指定的值。
@ -423,7 +428,7 @@ ServiceSpec 描述用户在服务上创建的属性。
- **publishNotReadyAddresses** (boolean)
<!--
<!--
publishNotReadyAddresses indicates that any agent which deals with endpoints for this Service should disregard any indications of ready/not-ready. The primary use case for setting this field is for a StatefulSet's Headless Service to propagate SRV DNS records for its Pods for the purpose of peer discovery. The Kubernetes controllers that generate Endpoints and EndpointSlice resources for Services interpret this to mean that all endpoints are considered "ready" even if the Pods themselves are not. Agents which consume only Kubernetes generated endpoints through the Endpoints or EndpointSlice resources can safely assume this behavior.
-->
@ -436,12 +441,13 @@ ServiceSpec 描述用户在服务上创建的属性。
- **sessionAffinityConfig** (SessionAffinityConfig)
<!--
<!--
sessionAffinityConfig contains the configurations of session affinity.
<a name="SessionAffinityConfig"></a>
*SessionAffinityConfig represents the configurations of session affinity.*
-->
sessionAffinityConfig 包含会话亲和性的配置。
<a name="SessionAffinityConfig"></a>
@ -449,7 +455,7 @@ ServiceSpec 描述用户在服务上创建的属性。
- **sessionAffinityConfig.clientIP** (ClientIPConfig)
<!--
<!--
clientIP contains the configurations of Client IP based session affinity.
<a name="ClientIPConfig"></a>
@ -463,7 +469,7 @@ ServiceSpec 描述用户在服务上创建的属性。
- **sessionAffinityConfig.clientIP.timeoutSeconds** (int32)
<!--
<!--
timeoutSeconds specifies the seconds of ClientIP type session sticky time. The value must be >0 && \<=86400(for 1 day) if ServiceAffinity == "ClientIP". Default value is 10800(for 3 hours).
-->
@ -472,7 +478,7 @@ ServiceSpec 描述用户在服务上创建的属性。
- **allocateLoadBalancerNodePorts** (boolean)
<!--
<!--
allocateLoadBalancerNodePorts defines if NodePorts will be automatically allocated for services with type LoadBalancer. Default is "true". It may be set to "false" if the cluster load-balancer does not rely on NodePorts. If the caller requests specific NodePorts (by specifying a value), those requests will be respected, regardless of this field. This field may only be set for services with type LoadBalancer and will be cleared if the type is changed to any other type.
-->
@ -495,7 +501,7 @@ ServiceSpec 描述用户在服务上创建的属性。
## ServiceStatus {#ServiceStatus}
<!--
<!--
ServiceStatus represents the current status of a service.
-->
ServiceStatus 表示 Service 的当前状态。
@ -504,7 +510,7 @@ ServiceStatus 表示 Service 的当前状态。
- **conditions** ([]Condition)
<!--
<!--
*Patch strategy: merge on key `type`*
*Map: unique values on key type will be kept during a merge*
@ -543,25 +549,25 @@ ServiceStatus 表示 Service 的当前状态。
**Time 是 time.Time 的包装类,支持正确地序列化为 YAML 和 JSON。
为 time 包提供的许多工厂方法提供了包装类。**
<!--
<!--
- **conditions.message** (string), required
-->
- **conditions.message** (string),必需
<!--
<!--
message is a human readable message indicating details about the transition. This may be an empty string.
-->
message 是人类可读的消息,有关转换的详细信息,可以是空字符串。
<!--
<!--
- **conditions.reason** (string), required
-->
- **conditions.reason** (string),必需
<!--
<!--
reason contains a programmatic identifier indicating the reason for the condition's last transition. Producers of specific condition types may define expected values and meanings for this field, and whether the values are considered a guaranteed API. The value should be a CamelCase string. This field may not be empty.
-->
@ -569,33 +575,33 @@ ServiceStatus 表示 Service 的当前状态。
特定条件类型的生产者可以定义该字段的预期值和含义,以及这些值是否被视为有保证的 API。
该值应该是 CamelCase 字符串且不能为空。
<!--
<!--
- **conditions.status** (string), required
-->
- **conditions.status** (string),必需
<!--
<!--
status of the condition, one of True, False, Unknown.
-->
condition 的状态True、False、Unknown 之一。
<!--
<!--
- **conditions.type** (string), required
-->
- **conditions.type** (string),必需
<!--
<!--
type of condition in CamelCase or in foo.example.com/CamelCase.
-->
CamelCase 或 foo.example.com/CamelCase 中的条件类型
condition 的类型,格式为 CamelCase 或 foo.example.com/CamelCase。
- **conditions.observedGeneration** (int64)
<!--
<!--
observedGeneration represents the .metadata.generation that the condition was set based upon. For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date with respect to the current state of the instance.
-->
@ -605,7 +611,7 @@ ServiceStatus 表示 Service 的当前状态。
- **loadBalancer** (LoadBalancerStatus)
<!--
<!--
LoadBalancer contains the current status of the load-balancer, if one is present.
<a name="LoadBalancerStatus"></a>
@ -619,7 +625,7 @@ ServiceStatus 表示 Service 的当前状态。
- **loadBalancer.ingress** ([]LoadBalancerIngress)
<!--
<!--
*Atomic: will be replaced during a merge*
Ingress is a list containing ingress points for the load-balancer. Traffic intended for the service should be sent to these ingress points.
@ -637,7 +643,7 @@ ServiceStatus 表示 Service 的当前状态。
- **loadBalancer.ingress.hostname** (string)
<!--
<!--
Hostname is set for load-balancer ingress points that are DNS based (typically AWS load-balancers)
-->
@ -645,7 +651,7 @@ ServiceStatus 表示 Service 的当前状态。
- **loadBalancer.ingress.ip** (string)
<!--
<!--
IP is set for load-balancer ingress points that are IP based (typically GCE or OpenStack load-balancers)
-->
@ -659,6 +665,7 @@ ServiceStatus 表示 Service 的当前状态。
Setting this to "Proxy" indicates that traffic is delivered to the node or pod with the destination set to the node's IP and node
port or the pod's IP and port. Service implementations may use this information to adjust traffic routing.
-->
ipMode 指定负载平衡器 IP 的行为方式,并且只能在设置了 ip 字段时指定。
将其设置为 `VIP` 表示流量将传送到节点,并将目标设置为负载均衡器的 IP 和端口。
将其设置为 `Proxy` 表示将流量传送到节点或 Pod并将目标设置为节点的 IP 和节点端口或 Pod 的 IP 和端口。
@ -666,46 +673,47 @@ ServiceStatus 表示 Service 的当前状态。
- **loadBalancer.ingress.ports** ([]PortStatus)
<!--
<!--
*Atomic: will be replaced during a merge*
-->
**Atomic:将在合并期间被替换**
**原子:将在合并期间被替换**
<!--
Ports is a list of records of service ports If used, every port defined in the service should have an entry in it -->
<!--
Ports is a list of records of service ports If used, every port defined in the service should have an entry in it
-->
ports 是 Service 的端口列表。如果设置了此字段Service 中定义的每个端口都应该在此列表中。
<a name="PortStatus"></a>
<!--
<!--
- **loadBalancer.ingress.ports.port** (string), required
-->
- **loadBalancer.ingress.ports.port** (int32),必需
<!--
<!--
Port is the port number of the service port of which status is recorded here
-->
port 是所记录的服务端口状态的端口号。
<!--
<!--
- **loadBalancer.ingress.ports.protocol** (string), required
-->
- **loadBalancer.ingress.ports.protocol** (string),必需
<!--
<!--
Protocol is the protocol of the service port of which status is recorded here The supported values are: "TCP", "UDP", "SCTP"
-->
protocol 是所记录的服务端口状态的协议。支持的值为:`TCP`、`UDP`、`SCTP`
protocol 是所记录的服务端口状态的协议。支持的值为:“TCP”、“UDP”、“SCTP”
- **loadBalancer.ingress.ports.error** (string)
<!--
<!--
Error is to record the problem with the service port The format of the error shall comply with the following rules: - built-in error values shall be specified in this file and those shall use
CamelCase names
- cloud provider specific error values must have names that comply with the
@ -719,7 +727,7 @@ ServiceStatus 表示 Service 的当前状态。
## ServiceList {#ServiceList}
<!--
<!--
ServiceList holds a list of services.
-->
@ -733,14 +741,14 @@ ServiceList 包含一个 Service 列表。
- **metadata** (<a href="{{< ref "../common-definitions/list-meta#ListMeta" >}}">ListMeta</a>)
<!--
<!--
Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
-->
标准的列表元数据。更多信息:
https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
<!--
<!--
- **items** ([]<a href="{{< ref "../service-resources/service-v1#Service" >}}">Service</a>), required
List of services
@ -749,7 +757,7 @@ ServiceList 包含一个 Service 列表。
Service 列表。
<!--
<!--
## Operations {#Operations}
-->
@ -762,7 +770,6 @@ ServiceList 包含一个 Service 列表。
#### HTTP Request
-->
### `get` 读取指定的 Service
#### HTTP 请求
@ -807,7 +814,7 @@ GET /api/v1/namespaces/{namespace}/services/{name}
401: Unauthorized
<!--
<!--
### `get` read status of the specified Service
#### HTTP Request
@ -856,7 +863,7 @@ GET /api/v1/namespaces/{namespace}/services/{name}/status
401: Unauthorized
<!--
<!--
### `list` list or watch objects of kind Service
#### HTTP Request
@ -977,7 +984,7 @@ GET /api/v1/namespaces/{namespace}/services
401: Unauthorized
<!--
<!--
### `list` list or watch objects of kind Service
#### HTTP Request
@ -1090,7 +1097,7 @@ GET /api/v1/services
401: Unauthorized
<!--
<!--
### `create` create a Service
#### HTTP Request
@ -1163,7 +1170,7 @@ POST /api/v1/namespaces/{namespace}/services
401: Unauthorized
<!--
<!--
### `update` replace the specified Service
#### HTTP Request
@ -1242,7 +1249,7 @@ PUT /api/v1/namespaces/{namespace}/services/{name}
401: Unauthorized
<!--
<!--
### `update` replace status of the specified Service
#### HTTP Request
@ -1321,7 +1328,7 @@ PUT /api/v1/namespaces/{namespace}/services/{name}/status
401: Unauthorized
<!--
<!--
### `patch` partially update the specified Service
#### HTTP Request
@ -1408,7 +1415,7 @@ PATCH /api/v1/namespaces/{namespace}/services/{name}
401: Unauthorized
<!--
<!--
### `patch` partially update status of the specified Service
#### HTTP Request
@ -1495,7 +1502,7 @@ PATCH /api/v1/namespaces/{namespace}/services/{name}/status
401: Unauthorized
<!--
<!--
### `delete` delete a Service
#### HTTP Request
@ -1574,7 +1581,7 @@ DELETE /api/v1/namespaces/{namespace}/services/{name}
401: Unauthorized
<!--
<!--
### `deletecollection` delete collection of Service
#### HTTP Request

View File

@ -6,7 +6,7 @@ api_metadata:
content_type: "api_reference"
description: "CronJob 代表单个定时作业 (Cron Job) 的配置。"
title: "CronJob"
weight: 10
weight: 11
---
<!--
@ -17,7 +17,7 @@ kind: "CronJob"
content_type: "api_reference"
description: "CronJob represents the configuration of a single cron job."
title: "CronJob"
weight: 10
weight: 11
auto_generated: true
-->

View File

@ -6,7 +6,7 @@ api_metadata:
content_type: "api_reference"
description: "水平 Pod 自动缩放器的配置。"
title: "HorizontalPodAutoscaler"
weight: 11
weight: 12
---
<!--
@ -17,7 +17,7 @@ api_metadata:
content_type: "api_reference"
description: "configuration of a horizontal pod autoscaler."
title: "HorizontalPodAutoscaler"
weight: 11
weight: 12
auto_generated: true
-->

View File

@ -6,7 +6,7 @@ api_metadata:
content_type: "api_reference"
description: "HorizontalPodAutoscaler 是水平 Pod 自动扩缩器的配置,它根据指定的指标自动管理实现 scale 子资源的任何资源的副本数。"
title: "HorizontalPodAutoscaler"
weight: 12
weight: 13
---
<!--
api_metadata:
@ -16,7 +16,7 @@ api_metadata:
content_type: "api_reference"
description: "HorizontalPodAutoscaler is the configuration for a horizontal pod autoscaler, which automatically manages the replica count of any resource implementing the scale subresource based on the metrics specified."
title: "HorizontalPodAutoscaler"
weight: 12
weight: 13
auto_generated: true
-->

View File

@ -6,7 +6,7 @@ api_metadata:
content_type: "api_reference"
description: "PodTemplate 描述一种模板,用来为预定义的 Pod 生成副本。"
title: "PodTemplate"
weight: 2
weight: 3
---
<!--
@ -17,7 +17,7 @@ api_metadata:
content_type: "api_reference"
description: "PodTemplate describes a template for creating copies of a predefined pod."
title: "PodTemplate"
weight: 2
weight: 3
auto_generated: true
-->

View File

@ -290,14 +290,13 @@ or use a different optimization. However, as of Kubernetes version {{< skew curr
it is required by kubectl. When present, the value of this annotation must be a comma separated list
of the group-kinds, in the fully-qualified name format, i.e. `<resource>.<group>`.
-->
此注解处于 alpha 阶段。
此注解处于 Alpha 阶段。
对于 Kubernetes {{< skew currentVersion >}} 版本,如果定义它们的
{{< glossary_tooltip term_id="CustomResourceDefinition" text="CustomResourceDefinition" >}}
打了 `applyset.kubernetes.io/is-parent-type` 标签,
那么你可以在 Secret、ConfigMap 或自定义资源上使用此注解。
规范的部分功能用来实现
[在 kubectl 中基于 ApplySet 的删除](/zh-cn/docs/tasks/manage-kubernetes-objects/declarative-config/#alternative-kubectl-apply-f-directory-prune)。
规范的部分功能用来实现[在 kubectl 中基于 ApplySet 的删除](/zh-cn/docs/tasks/manage-kubernetes-objects/declarative-config/#alternative-kubectl-apply-f-directory-prune)。
此注解应用于父对象,这些父对象用于跟踪 ApplySet 以优化 ApplySet 成员对象列表。
它在 AppySet 规范中是可选的,因为工具可以执行发现或使用不同的优化。
然而,对于 Kubernetes {{< skew currentVersion >}} 版本,它是 kubectl 必需的。
@ -333,14 +332,13 @@ of the group-kinds, in the fully-qualified name format, i.e. `<resource>.<group>
用于:作为 ApplySet 父对象使用的对象。
此注解处于 alpha 阶段。
此注解处于 Alpha 阶段。
对于 Kubernetes {{< skew currentVersion >}} 版本, 如果定义它们的
{{< glossary_tooltip term_id="CustomResourceDefinition" text="CustomResourceDefinition" >}}
打了 `applyset.kubernetes.io/is-parent-type` 标签,
那么你可以在 Secret、ConfigMaps 或自定义资源上使用此注解。
那么你可以在 Secret、ConfigMap 或自定义资源上使用此注解。
规范的部分功能用来实现
[在 kubectl 中基于 ApplySet 的删除](/zh-cn/docs/tasks/manage-kubernetes-objects/declarative-config/#alternative-kubectl-apply-f-directory-prune)。
规范的部分功能用来实现[在 kubectl 中基于 ApplySet 的删除](/zh-cn/docs/tasks/manage-kubernetes-objects/declarative-config/#alternative-kubectl-apply-f-directory-prune)。
此注解应用于父对象,这些父对象用于跟踪 ApplySet 以优化 ApplySet 成员对象列表。
它在 AppySet 规范中是可选的,因为工具可以执行发现或使用不同的优化。
然而,对于 Kubernetes {{< skew currentVersion >}} 版本,它是 kubectl 必需的。
@ -355,7 +353,7 @@ Example: `applyset.kubernetes.io/contains-group-resources: "certificates.cert-ma
Used on: Objects being used as ApplySet parents.
-->
### applyset.kubernetes.io/contains-group-resources (已弃用) {#applyset-kubernetes-io-contains-group-resources}
### applyset.kubernetes.io/contains-group-resources(已弃用) {#applyset-kubernetes-io-contains-group-resources}
类别:注解
@ -380,8 +378,7 @@ of the group-kinds, in the fully-qualified name format, i.e. `<resource>.<group>
CustomResourceDefinition 打了 `applyset.kubernetes.io/is-parent-type` 标签,
那么你可以在 Secret、ConfigMap 或自定义资源上使用此注解。
规范的部分功能用来实现
[在 kubectl 中基于 ApplySet 的删除](/zh-cn/docs/tasks/manage-kubernetes-objects/declarative-config/#alternative-kubectl-apply-f-directory-prune)。
规范的部分功能用来实现[在 kubectl 中基于 ApplySet 的删除](/zh-cn/docs/tasks/manage-kubernetes-objects/declarative-config/#alternative-kubectl-apply-f-directory-prune)。
此注解应用于父对象,这些父对象用于跟踪 ApplySet 以优化 ApplySet 成员对象列表。
它在 AppySet 规范中是可选的,因为工具可以执行发现或使用不同的优化。
然而,对于 Kubernetes {{< skew currentVersion >}} 版本,它是 kubectl 必需的。
@ -427,17 +424,16 @@ There is no relation between the value of this label and object UID.
用于:作为 ApplySet 父对象使用的对象。
此注解处于 alpha 阶段。
此注解处于 Alpha 阶段。
对于 Kubernetes {{< skew currentVersion >}} 版本, 如果定义它们的
{{< glossary_tooltip term_id="CustomResourceDefinition" text="CustomResourceDefinition" >}}
打了 `applyset.kubernetes.io/is-parent-type` 标签,那么你可以在 Secret、ConfigMaps 或自定义资源上使用此注解。
打了 `applyset.kubernetes.io/is-parent-type` 标签,那么你可以在 Secret、ConfigMap 或自定义资源上使用此注解。
规范的部分功能用来实现
[在 kubectl 中基于 ApplySet 的删除](/zh-cn/docs/tasks/manage-kubernetes-objects/declarative-config/#alternative-kubectl-apply-f-directory-prune)。
规范的部分功能用来实现[在 kubectl 中基于 ApplySet 的删除](/zh-cn/docs/tasks/manage-kubernetes-objects/declarative-config/#alternative-kubectl-apply-f-directory-prune)。
此标签使对象成为 AppySet 父对象。
它的值是 ApplySet 的唯一 ID该 ID 派生自父对象本身的标识。
该 ID **必须** 是所在对象的 group-kind-name-namespace 的 hash 的 base64 编码(使用 RFC4648 的 URL 安全编码),
格式为: `<base64(sha256(<name>.<namespace>.<kind>.<group>))>`
格式为:`<base64(sha256(<name>.<namespace>.<kind>.<group>))>`。
此标签的值与对象 UID 之间没有关系。
<!--
@ -465,11 +461,10 @@ not being a valid parent for ApplySets, omit this label.
用于:自定义资源 CRD
此注解处于 alpha 阶段。
规范的部分功能用来实现
[在 kubectl 中基于 ApplySet 的删除](/zh-cn/docs/tasks/manage-kubernetes-objects/declarative-config/#alternative-kubectl-apply-f-directory-prune)。
此注解处于 Alpha 阶段。
规范的部分功能用来实现[在 kubectl 中基于 ApplySet 的删除](/zh-cn/docs/tasks/manage-kubernetes-objects/declarative-config/#alternative-kubectl-apply-f-directory-prune)。
你可以在 {{< glossary_tooltip term_id="CustomResourceDefinition" text="CustomResourceDefinition" >}} (CRD) 上设置这个标签,
以将它定义的自定义资源类型(而不是 CRD 本身)标识为 ApplySet 的允许父类。
以将它定义的自定义资源类型(而不是 CRD 本身)标识为 ApplySet 的允许父类。
这个标签唯一允许的值是 `"true"`;如果你想将一个 CRD 标记为不是 ApplySet 的有效父级,请省略这个标签。
<!--
@ -496,9 +491,8 @@ label on the parent object.
用于:所有对象。
此注解处于 alpha 阶段。
规范的部分功能用来实现
[在 kubectl 中基于 ApplySet 的删除](/zh-cn/docs/tasks/manage-kubernetes-objects/declarative-config/#alternative-kubectl-apply-f-directory-prune)。
此注解处于 Alpha 阶段。
规范的部分功能用来实现[在 kubectl 中基于 ApplySet 的删除](/zh-cn/docs/tasks/manage-kubernetes-objects/declarative-config/#alternative-kubectl-apply-f-directory-prune)。
此标签使对象成为 ApplySet 的成员。
标签的值 **必须** 与父对象上的 `applyset.kubernetes.io/id` 标签的值相匹配。
@ -530,13 +524,12 @@ The value must be in the format `<toolname>/<semver>`.
用于:作为 ApplySet 父对象使用的对象。
此注解处于 alpha 阶段。
此注解处于 Alpha 阶段。
对于 Kubernetes {{< skew currentVersion >}} 版本, 如果定义它们的
{{< glossary_tooltip term_id="CustomResourceDefinition" text="CustomResourceDefinition" >}}
打了 `applyset.kubernetes.io/is-parent-type` 标签,那么你可以在 Secret、ConfigMaps 或自定义资源上使用此注解。
打了 `applyset.kubernetes.io/is-parent-type` 标签,那么你可以在 Secret、ConfigMap 或自定义资源上使用此注解。
规范的部分功能用来实现
[在 kubectl 中基于 ApplySet 的删除](/zh-cn/docs/tasks/manage-kubernetes-objects/declarative-config/#alternative-kubectl-apply-f-directory-prune)。
规范的部分功能用来实现[在 kubectl 中基于 ApplySet 的删除](/zh-cn/docs/tasks/manage-kubernetes-objects/declarative-config/#alternative-kubectl-apply-f-directory-prune)。
此注解应用于父对象,这些父对象用于跟踪 ApplySet 以指示哪个工具管理 AppySet。
工具应该拒绝改变属于其他工具 ApplySets。
该值必须采用 `<toolname>/<semver>` 格式。
@ -1085,13 +1078,13 @@ Type: Label
This label has been deprecated. Please use [`kubernetes.io/os`](#kubernetes-io-os) instead.
-->
### beta.kubernetes.io/arch (已弃用) {#beta-kubernetes-io-arch}
### beta.kubernetes.io/arch(已弃用) {#beta-kubernetes-io-arch}
类别:标签
此标签已被弃用。请改用 [`kubernetes.io/arch`](#kubernetes-io-arch)。
### beta.kubernetes.io/os (已弃用) {#beta-kubernetes-io-os}
### beta.kubernetes.io/os(已弃用) {#beta-kubernetes-io-os}
类别:标签
@ -1164,6 +1157,27 @@ Service 上的这个注解表示 Endpoints 控制器是否应该继续为未准
这些 Service 的 Endpoints 保留其 DNS 记录,并从 kubelet 启动 Pod 中的所有容器并将其标记为
**Running** 的那一刻起继续接收 Service 的流量,直到 kubelet 停止所有容器并从 API 服务器删除 Pod 为止。
<!--
### autoscaling.alpha.kubernetes.io/behavior (deprecated) {#autoscaling-alpha-kubernetes-io-behavior}
Type: Annotation
Used on: HorizontalPodAutoscaler
This annotation was used to configure the scaling behavior for a HorizontalPodAutoscaler (HPA) in earlier Kubernetes versions.
It allowed you to specify how the HPA should scale pods up or down, including setting stabilization windows and scaling policies.
Setting this annotation has no effect in any supported release of Kubernetes.
-->
### autoscaling.alpha.kubernetes.io/behavior已弃用 {#autoscaling-alpha-kubernetes-io-behavior}
类别:注解
用于HorizontalPodAutoscaler
此注解曾在早期的 Kubernetes 版本中用于配置 HorizontalPodAutoscalerHPA的扩缩容行为。
它允许你指定 HPA 应如何扩容或缩容 Pod包括设置稳定窗口和扩缩容策略。
在所有受支持的 Kubernetes 版本中,设置此注解没有任何效果。
<!--
### kubernetes.io/hostname {#kubernetesiohostname}
@ -1315,9 +1329,14 @@ backend set:
你可以向特定的 Worker 节点添加标签,以将这些节点从外部负载均衡器使用的后端服务器列表中去除。
以下命令可用于从后端集的后端服务器列表中排除一个 Worker 节点:
<!--
```shell
kubectl label nodes <node-name> node.kubernetes.io/exclude-from-external-load-balancers=true
```
-->
```shell
kubectl label nodes <节点名称> node.kubernetes.io/exclude-from-external-load-balancers=true
```
<!--
### controller.kubernetes.io/pod-deletion-cost {#pod-deletion-cost}
@ -1475,7 +1494,7 @@ bin dir (default `/opt/cni/bin`).
Type: Label
-->
### beta.kubernetes.io/instance-type (已弃用) {#beta-kubernetes-io-instance-type}
### beta.kubernetes.io/instance-type(已弃用) {#beta-kubernetes-io-instance-type}
类别:标签
@ -1521,7 +1540,7 @@ Kubelet 使用云驱动定义的实例类型填充它。
Type: Label
-->
### failure-domain.beta.kubernetes.io/region (已弃用) {#failure-domainbetakubernetesioregion}
### failure-domain.beta.kubernetes.io/region(已弃用) {#failure-domainbetakubernetesioregion}
类别:标签
@ -1538,7 +1557,7 @@ Starting in v1.17, this label is deprecated in favor of
Type: Label
-->
### failure-domain.beta.kubernetes.io/zone (已弃用) {#failure-domainbetakubernetesiozone}
### failure-domain.beta.kubernetes.io/zone(已弃用) {#failure-domainbetakubernetesiozone}
类别:标签
@ -1853,7 +1872,7 @@ Used on: PersistentVolumeClaim
This annotation has been deprecated since v1.23.
See [volume.kubernetes.io/storage-provisioner](#volume-kubernetes-io-storage-provisioner).
-->
### volume.beta.kubernetes.io/storage-provisioner (已弃用) {#volume-beta-kubernetes-io-storage-provisioner}
### volume.beta.kubernetes.io/storage-provisioner(已弃用) {#volume-beta-kubernetes-io-storage-provisioner}
类别:注解
@ -2031,7 +2050,7 @@ For example, if the in-tree cloud provider storage type is `CSIMigrationvSphere`
-->
### storage.alpha.kubernetes.io/migrated-plugins {#storagealphakubernetesiomigrated-plugins}
:注解
:注解
例子:`storage.alpha.kubernetes.io/migrated-plugins: "kubernetes.io/cinder"`
@ -2350,7 +2369,7 @@ Used on: Pod
The annotation is used to run Windows containers with Hyper-V isolation.
-->
### experimental.windows.kubernetes.io/isolation-type (已弃用) {#experimental-windows-kubernetes-io-isolation-type}
### experimental.windows.kubernetes.io/isolation-type(已弃用) {#experimental-windows-kubernetes-io-isolation-type}
类别:注解
@ -2429,7 +2448,7 @@ Type: Annotation
Used on: Ingress
-->
### kubernetes.io/ingress.class (已弃用) {#kubernetes-io-ingress-class}
### kubernetes.io/ingress.class(已弃用) {#kubernetes-io-ingress-class}
类别:注解
@ -2777,7 +2796,7 @@ Used on: Jobs
The presence of this annotation on a Job used to indicate that the control plane is
[tracking the Job status using finalizers](/docs/concepts/workloads/controllers/job/#job-tracking-with-finalizers).
-->
### batch.kubernetes.io/job-tracking (已弃用) {#batch-kubernetes-io-job-tracking}
### batch.kubernetes.io/job-tracking(已弃用) {#batch-kubernetes-io-job-tracking}
类别:注解
@ -3171,7 +3190,8 @@ This allows the Pods on the out-of-service node to recover quickly on a differen
Refer to [Non-graceful node shutdown](/docs/concepts/architecture/nodes/#non-graceful-node-shutdown)
for further details about when and how to use this taint.
-->
有关何时以及如何使用此污点的更多详细信息,请参阅[非正常节点关闭](/zh-cn/docs/concepts/architecture/nodes/#non-graceful-node-shutdown)。
有关何时以及如何使用此污点的更多详细信息,
请参阅[非正常节点关闭](/zh-cn/docs/concepts/architecture/nodes/#non-graceful-node-shutdown)。
{{< /caution >}}
<!--
@ -3297,8 +3317,8 @@ if there is one running on a node. It's used for informative use only.
用于:节点
这个注解记录 NFD-[worker](https://kubernetes-sigs.github.io/node-feature-discovery/stable/usage/nfd-worker.html)
的版本(如果在节点上运行了一个 NFD-worker 的话)
只用于提供信息。
的版本(如果在节点上运行了一个 NFD-worker 的话)
此注解只用于提供信息。
<!--
### nfd.node.kubernetes.io/feature-labels
@ -3380,8 +3400,8 @@ the nodes where NFD is running. To learn more about NFD and
its components go to its official [documentation](https://kubernetes-sigs.github.io/node-feature-discovery/stable/get-started/).
-->
这些节点特性发现Node Feature Discovery, NFD的标签或注解仅适用于运行 NFD 的节点。
要了解关于 NFD 及其组件的信息,请访问官方
[文档](https://kubernetes-sigs.github.io/node-feature-discovery/stable/get-started/)。
要了解关于 NFD 及其组件的信息,
请访问官方[文档](https://kubernetes-sigs.github.io/node-feature-discovery/stable/get-started/)。
{{< /note >}}
### service.beta.kubernetes.io/aws-load-balancer-access-log-emit-interval (beta) {#service-beta-kubernetes-io-aws-load-balancer-access-log-emit-interval}
@ -3624,9 +3644,8 @@ evenly across the registered targets in its availability zone only.
用于Service
与 AWS 弹性负载均衡集成的云控制器管理器会根据此注解配置负载均衡器。
如果你将此注解设置为 "true",每个负载均衡器节点将在所有启用的
[可用区](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-regions-availability-zones.html#concepts-availability-zones)中的注册目标上均匀地分发请求。
与 AWS 弹性负载均衡集成的云控制器管理器会根据此注解配置负载均衡器。如果你将此注解设置为 "true"
每个负载均衡器节点将在所有启用的[可用区](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-regions-availability-zones.html#concepts-availability-zones)中的注册目标上均匀地分发请求。
如果你禁用跨区负载均衡,则每个负载均衡器节点仅在其可用区中跨注册目标均匀地分发请求。
### service.beta.kubernetes.io/aws-load-balancer-eip-allocations (beta) {#service-beta-kubernetes-io-aws-load-balancer-eip-allocations}
@ -4471,7 +4490,7 @@ learn the supported way to specify seccomp restrictions for a Pod.
用于Pod
v1.25 之前的 Kubernetes 允许你使用此注解配置 seccomp 行为。
请参考 [使用 seccomp 限制容器的系统调用](/zh-cn/docs/tutorials/security/seccomp/)
请参考[使用 seccomp 限制容器的系统调用](/zh-cn/docs/tutorials/security/seccomp/)
了解为 Pod 指定 seccomp 限制的受支持方法。
<!--
@ -4492,7 +4511,7 @@ learn the supported way to specify seccomp restrictions for a Pod.
用于Pod
v1.25 之前的 Kubernetes 允许你使用此注解配置 seccomp 行为。
请参考 [使用 seccomp 限制容器的系统调用](/zh-cn/docs/tutorials/security/seccomp/)
请参考[使用 seccomp 限制容器的系统调用](/zh-cn/docs/tutorials/security/seccomp/)
了解为 Pod 指定 seccomp 限制的受支持方法。
### snapshot.storage.kubernetes.io/allow-volume-mode-change {#allow-volume-mode-change}
@ -4705,9 +4724,9 @@ ignores that node while calculating Topology Aware Hints.
用来指示该节点用于运行控制平面组件的标记标签。Kubeadm 工具将此标签应用于其管理的控制平面节点。
其他集群管理工具通常也会设置此污点。
你可以使用此标签来标记控制平面节点,以便更容易地将 Pod 仅安排到这些节点上,或者避免在控制平面上运行 Pod。
如果设置了此标签,[EndpointSlice 控制器](/zh-cn/docs/concepts/services-networking/topology-aware-routing/#implementation-control-plane)
在计算拓扑感知提示时将忽略该节点。
你可以使用此标签来标记控制平面节点,以便更容易地将 Pod 仅安排到这些节点上,
或者避免在控制平面上运行 Pod。如果设置了此标签,
[EndpointSlice 控制器](/zh-cn/docs/concepts/services-networking/topology-aware-routing/#implementation-control-plane)在计算拓扑感知提示时将忽略该节点。
<!--
### node-role.kubernetes.io/control-plane {#node-role-kubernetes-io-control-plane-taint}
@ -4737,9 +4756,14 @@ Kubeadm 应用在控制平面节点上的污点, 用来限制启动 Pod并且
如果应用此污点,则控制平面节点只允许对其进行关键工作负载调度。可以在特定节点上使用以下命令手动删除此污染。
<!--
```shell
kubectl taint nodes <node-name> node-role.kubernetes.io/control-plane:NoSchedule-
```
-->
```shell
kubectl taint nodes <节点名称> node-role.kubernetes.io/control-plane:NoSchedule-
```
<!--
### node-role.kubernetes.io/master (deprecated) {#node-role-kubernetes-io-master-taint}

View File

@ -31,6 +31,28 @@ of `type` other than
[`ExternalName`](/zh-cn/docs/concepts/services-networking/service/#externalname)
以外的{{< glossary_tooltip term_id="service" text="服务">}},实现**虚拟 IP** 机制。
<!--
Each instance of kube-proxy watches the Kubernetes {{< glossary_tooltip
term_id="control-plane" text="control plane" >}} for the addition and
removal of Service and EndpointSlice {{< glossary_tooltip
term_id="object" text="objects" >}}. For each Service, kube-proxy
calls appropriate APIs (depending on the kube-proxy mode) to configure
the node to capture traffic to the Service's `clusterIP` and `port`,
and redirect that traffic to one of the Service's endpoints
(usually a Pod, but possibly an arbitrary user-provided IP address). A control
loop ensures that the rules on each node are reliably synchronized with
the Service and EndpointSlice state as indicated by the API server.
{{< figure src="/images/docs/services-iptables-overview.svg" title="Virtual IP mechanism for Services, using iptables mode" class="diagram-medium" >}}
-->
kube-proxy 的每个实例都会监视 Kubernetes {{< glossary_tooltip text="控制平面" term_id="control-plane" >}} 中
Service 和 EndpointSlice {{< glossary_tooltip text="对象" term_id="object" >}} 的添加和删除。对于每个
Servicekube-proxy 调用适当的 API取决于 kube-proxy 模式)来配置节点,以捕获流向 Service 的 `clusterIP``port`
的流量,并将这些流量重定向到 Service 的某个端点(通常是 Pod但也可能是用户提供的任意 IP 地址)。一个控制回路确保每个节点上的规则与
API 服务器指示的 Service 和 EndpointSlice 状态可靠同步。
{{< figure src="/zh-cn/docs/images/services-iptables-overview.svg" title="iptables 模式下 Service 的虚拟 IP 机制" class="diagram-medium" >}}
<!--
A question that pops up every now and then is why Kubernetes relies on
proxying to forward inbound traffic to backends. What about other
@ -107,21 +129,27 @@ The kube-proxy starts up in different modes, which are determined by its configu
On Linux nodes, the available modes for kube-proxy are:
[`iptables`](#proxy-mode-iptables)
: A mode where the kube-proxy configures packet forwarding rules using iptables, on Linux.
: A mode where the kube-proxy configures packet forwarding rules using iptables.
[`ipvs`](#proxy-mode-ipvs)
: a mode where the kube-proxy configures packet forwarding rules using ipvs.
[`nftables`](#proxy-mode-nftables)
: a mode where the kube-proxy configures packet forwarding rules using nftables.
-->
kube-proxy 会根据不同配置以不同的模式启动。
在 Linux 节点上kube-proxy 的可用模式是:
[`iptables`](#proxy-mode-iptables)
: kube-proxy 在 Linux 上使用 iptables 配置数据包转发规则的一种模式。
: kube-proxy 使用 iptables 配置数据包转发规则的一种模式。
[`ipvs`](#proxy-mode-ipvs)
: kube-proxy 使用 ipvs 配置数据包转发规则的一种模式。
[`nftables`](#proxy-mode-nftables)
: kube-proxy 使用 nftables 配置数据包转发规则的一种模式。
<!--
There is only one mode available for kube-proxy on Windows:
@ -137,64 +165,19 @@ Windows 上的 kube-proxy 只有一种模式可用:
### `iptables` proxy mode {#proxy-mode-iptables}
_This proxy mode is only available on Linux nodes._
In this mode, kube-proxy configures packet forwarding rules using the
iptables API of the kernel netfilter subsystem. For each endpoint, it
installs iptables rules which, by default, select a backend Pod at
random.
-->
### `iptables` 代理模式 {#proxy-mode-iptables}
**此代理模式仅适用于 Linux 节点。**
<!--
In this mode, kube-proxy watches the Kubernetes
{{< glossary_tooltip term_id="control-plane" text="control plane" >}} for the addition and
removal of Service and EndpointSlice {{< glossary_tooltip term_id="object" text="objects." >}}
For each Service, it installs
iptables rules, which capture traffic to the Service's `clusterIP` and `port`,
and redirect that traffic to one of the Service's
backend sets. For each endpoint, it installs iptables rules which
select a backend Pod.
-->
在这种模式下kube-proxy 监视 Kubernetes
{{< glossary_tooltip text="控制平面" term_id="control-plane" >}},获知对 Service 和 EndpointSlice
{{< glossary_tooltip text="对象" term_id="object" >}}的添加和删除操作。
对于每个 Servicekube-proxy 会添加 iptables 规则,这些规则捕获流向 Service 的 `clusterIP``port` 的流量,
并将这些流量重定向到 Service 后端集合中的其中之一。
对于每个端点,它会添加指向一个特定后端 Pod 的 iptables 规则。
<!--
By default, kube-proxy in iptables mode chooses a backend at random.
Using iptables to handle traffic has a lower system overhead, because traffic
is handled by Linux netfilter without the need to switch between userspace and the
kernel space. This approach is also likely to be more reliable.
-->
默认情况下iptables 模式下的 kube-proxy 会随机选择一个后端。
使用 iptables 处理流量的系统开销较低,因为流量由 Linux netfilter 处理,
无需在用户空间和内核空间之间切换。这种方案也更为可靠。
<!--
If kube-proxy is running in iptables mode and the first Pod that's selected
does not respond, the connection fails. This is different from the old `userspace`
mode: in that scenario, kube-proxy would detect that the connection to the first
Pod had failed and would automatically retry with a different backend Pod.
-->
如果 kube-proxy 以 iptables 模式运行,并且它选择的第一个 Pod 没有响应,
那么连接会失败。这与用户空间模式不同:
在后者这种情况下kube-proxy 会检测到与第一个 Pod 的连接失败,
并会自动用不同的后端 Pod 重试。
<!--
You can use Pod [readiness probes](/docs/concepts/workloads/pods/pod-lifecycle/#container-probes)
to verify that backend Pods are working OK, so that kube-proxy in iptables mode
only sees backends that test out as healthy. Doing this means you avoid
having traffic sent via kube-proxy to a Pod that's known to have failed.
-->
你可以使用 Pod [就绪探针](/zh-cn/docs/concepts/workloads/pods/pod-lifecycle/#container-probes)来验证后端 Pod 是否健康。
这样可以避免 kube-proxy 将流量发送到已知失败的 Pod 上。
<!--
{{< figure src="/images/docs/services-iptables-overview.svg" title="Virtual IP mechanism for Services, using iptables mode" class="diagram-medium" >}}
-->
{{< figure src="/zh-cn/docs/images/services-iptables-overview.svg" title="iptables 模式下 Service 的虚拟 IP 机制" class="diagram-medium" >}}
在这种模式下kube-proxy 使用内核 netfilter 子系统的 iptables API
配置数据包转发规则。对于每个端点kube-proxy 会添加 iptables
规则,这些规则默认情况下会随机选择一个后端 Pod。
<!--
#### Example {#packet-processing-iptables}
@ -244,8 +227,10 @@ through a load-balancer, though in those cases the client IP address does get al
<!--
#### Optimizing iptables mode performance
In large clusters (with tens of thousands of Pods and Services), the
iptables mode of kube-proxy may take a long time to update the rules
In iptables mode, kube-proxy creates a few iptables rules for every
Service, and a few iptables rules for each endpoint IP address. In
clusters with tens of thousands of Pods and Services, this means tens
of thousands of iptables rules, and kube-proxy may take a long time to update the rules
in the kernel when Services (or their EndpointSlices) change. You can adjust the syncing
behavior of kube-proxy via options in the [`iptables` section](/docs/reference/config-api/kube-proxy-config.v1alpha1/#kubeproxy-config-k8s-io-v1alpha1-KubeProxyIPTablesConfiguration)
of the
@ -253,13 +238,12 @@ kube-proxy [configuration file](/docs/reference/config-api/kube-proxy-config.v1a
(which you specify via `kube-proxy --config <path>`):
-->
#### 优化 iptables 模式性能 {#optimizing-iptables-mode-performance}
在大型集群(有数万个 Pod 和 Service当 Service或其 EndpointSlice发生变化时
iptables 模式的 kube-proxy 在更新内核中的规则时可能要用较长时间。
你可以通过(`kube-proxy --config <path>` 指定的kube-proxy
[配置文件](/zh-cn/docs/reference/config-api/kube-proxy-config.v1alpha1/)的
[`iptables` 节](/zh-cn/docs/reference/config-api/kube-proxy-config.v1alpha1/#kubeproxy-config-k8s-io-v1alpha1-KubeProxyIPTablesConfiguration)中的选项来调整
kube-proxy 的同步行为:
在 iptables 模式下kube-proxy 为每个 Service 创建一些 iptables 规则,并为每个端点
IP 地址创建一些 iptables 规则。在拥有数万个 Pod 和 Service 的集群中,这意味着数万个
iptables 规则,当 Service或其 EndpointSlice发生变化时kube-proxy
在更新内核中的规则时可能要用很长时间。你可以通过(`kube-proxy --config <path>` 指定的)
kube-proxy [配置文件](/zh-cn/docs/reference/config-api/kube-proxy-config.v1alpha1/)的
[`iptables` 章节](/zh-cn/docs/reference/config-api/kube-proxy-config.v1alpha1/#kubeproxy-config-k8s-io-v1alpha1-KubeProxyIPTablesConfiguration)中的选项来调整 kube-proxy 的同步行为:
```yaml
...
@ -389,17 +373,11 @@ _This proxy mode is only available on Linux nodes._
**此代理模式仅适用于 Linux 节点。**
<!--
In `ipvs` mode, kube-proxy watches Kubernetes Services and EndpointSlices,
calls `netlink` interface to create IPVS rules accordingly and synchronizes
IPVS rules with Kubernetes Services and EndpointSlices periodically.
This control loop ensures that IPVS status matches the desired state.
When accessing a Service, IPVS directs traffic to one of the backend Pods.
In `ipvs` mode, kube-proxy uses the kernel IPVS and iptables APIs to
create rules to redirect traffic from Service IPs to endpoint IPs.
-->
`ipvs` 模式下kube-proxy 监视 Kubernetes Service 和 EndpointSlice
然后调用 `netlink` 接口创建 IPVS 规则,
并定期与 Kubernetes Service 和 EndpointSlice 同步 IPVS 规则。
该控制回路确保 IPVS 状态与期望的状态保持一致。
访问 Service 时IPVS 会将流量导向到某一个后端 Pod。
`ipvs` 模式下kube-proxy 使用内核 IPVS 和 iptables API
创建规则,将流量从 Service IP 重定向到端点 IP。
<!--
The IPVS proxy mode is based on netfilter hook function that is similar to
@ -407,14 +385,14 @@ iptables mode, but uses a hash table as the underlying data structure and works
in the kernel space.
That means kube-proxy in IPVS mode redirects traffic with lower latency than
kube-proxy in iptables mode, with much better performance when synchronizing
proxy rules. Compared to the other proxy modes, IPVS mode also supports a
proxy rules. Compared to the iptables proxy mode, IPVS mode also supports a
higher throughput of network traffic.
-->
IPVS 代理模式基于 netfilter 回调函数,类似于 iptables 模式,
但它使用哈希表作为底层数据结构,在内核空间中生效。
这意味着 IPVS 模式下的 kube-proxy 比 iptables 模式下的 kube-proxy
重定向流量的延迟更低,同步代理规则时性能也更好。
其他代理模式相比IPVS 模式还支持更高的网络流量吞吐量。
iptables 代理模式相比IPVS 模式还支持更高的网络流量吞吐量。
<!--
IPVS provides more options for balancing traffic to backend Pods;
@ -497,12 +475,12 @@ the node before starting kube-proxy.
When kube-proxy starts in IPVS proxy mode, it verifies whether IPVS
kernel modules are available. If the IPVS kernel modules are not detected, then kube-proxy
falls back to running in iptables proxy mode.
exits with an error.
-->
要在 IPVS 模式下运行 kube-proxy必须在启动 kube-proxy 之前确保节点上的 IPVS 可用。
当 kube-proxy 以 IPVS 代理模式启动时,它会验证 IPVS 内核模块是否可用。
如果未检测到 IPVS 内核模块,则 kube-proxy 会退回到 iptables 代理模式运行
如果未检测到 IPVS 内核模块,则 kube-proxy 会退出并报错
{{< /note >}}
<!--
@ -510,6 +488,136 @@ falls back to running in iptables proxy mode.
-->
{{< figure src="/zh-cn/docs/images/services-ipvs-overview.svg" title="IPVS 模式下 Service 的虚拟 IP 地址机制" class="diagram-medium" >}}
<!--
### `nftables` proxy mode {#proxy-mode-nftables}
{{< feature-state feature_gate_name="NFTablesProxyMode" >}}
_This proxy mode is only available on Linux nodes, and requires kernel
5.13 or later._
-->
### `nftables` 代理模式 {#proxy-mode-nftables}
{{< feature-state feature_gate_name="NFTablesProxyMode" >}}
**此代理模式仅适用于 Linux 节点,并且需要 5.13 或更高的内核版本。**
<!--
In this mode, kube-proxy configures packet forwarding rules using the
nftables API of the kernel netfilter subsystem. For each endpoint, it
installs nftables rules which, by default, select a backend Pod at
random.
-->
在这种模式下kube-proxy 使用内核 netfilter 子系统的 nftables API
配置数据包转发规则。对于每个端点,它会添加 nftables
规则,这些规则默认情况下会随机选择一个后端 Pod。
<!--
The nftables API is the successor to the iptables API and is designed
to provide better performance and scalability than iptables. The
`nftables` proxy mode is able to process changes to service endpoints
faster and more efficiently than the `iptables` mode, and is also able
to more efficiently process packets in the kernel (though this only
becomes noticeable in clusters with tens of thousands of services).
-->
nftables API 是 iptables API 的后继,旨在提供比 iptables 更好的性能和可扩展性。
`nftables` 代理模式能够比 `iptables` 模式更快、更高效地处理服务端点的变化,
并且在内核中处理数据包的效率也更高(尽管这只有在拥有数万个服务的集群中才会比较明显)。
<!--
As of Kubernetes {{< skew currentVersion >}}, the `nftables` mode is
still relatively new, and may not be compatible with all network
plugins; consult the documentation for your network plugin.
-->
在 Kubernetes {{< skew currentVersion >}} 中,`nftables`
模式仍然相对较新,可能还不兼容所有的网络插件;请查阅你的网络插件文档。
<!--
#### Migrating from `iptables` mode to `nftables`
Users who want to switch from the default `iptables` mode to the
`nftables` mode should be aware that some features work slightly
differently the `nftables` mode:
-->
#### 从 `iptables` 模式到 `nftables` 模式的迁移 {#migrating-from-iptables-mode-to-nftables}
想要从默认的 `iptables` 模式切换到 `nftables` 模式的用户应注意,在
`nftables` 模式下,一些特性的工作方式略有不同:
<!--
- **NodePort interfaces**: In `iptables` mode, by default,
[NodePort services](/docs/concepts/services-networking/service/#type-nodeport)
are reachable on all local IP addresses. This is usually not what
users want, so the `nftables` mode defaults to
`--nodeport-addresses primary`, meaning NodePort services are only
reachable on the node's primary IPv4 and/or IPv6 addresses. You can
override this by specifying an explicit value for that option:
e.g., `--nodeport-addresses 0.0.0.0/0` to listen on all (local)
IPv4 IPs.
-->
- **NodePort 接口**:在 `iptables` 模式下,默认情况下,
[NodePort 服务](/zh-cn/docs/concepts/services-networking/service/#type-nodeport) 可以在所有本地
IP 地址上访问。这通常不是用户想要的,因此 `nftables` 模式默认使用 `--nodeport-addresses primary`,这意味着
NodePort 服务只能在节点的主 IPv4 和/或 IPv6 地址上访问。你可以通过为该选项指定一个明确的值来覆盖此设置:例如,使用
`--nodeport-addresses 0.0.0.0/0` 以监听所有本地IPv4 IP。
<!--
- **NodePort services on `127.0.0.1`**: In `iptables` mode, if the
`--nodeport-addresses` range includes `127.0.0.1` (and the option
`--iptables-localhost-nodeports false` option is not passed), then
NodePort services are reachable even on "localhost" (`127.0.0.1`).
In `nftables` mode (and `ipvs` mode), this will not work. If you
are not sure if you are depending on this functionality, you can
check kube-proxy's
`iptables_localhost_nodeports_accepted_packets_total` metric; if it
is non-0, that means that some client has connected to a NodePort
service via `127.0.0.1`.
-->
- **`127.0.0.1` 上的 NodePort 服务**:在 `iptables` 模式下,如果
`--nodeport-addresses` 范围包括 `127.0.0.1`(且未传递 `--iptables-localhost-nodeports false` 选项),
则 NodePort 服务甚至可以在 "localhost" (`127.0.0.1`) 上访问。
`nftables` 模式(和 `ipvs` 模式)下,这将不起作用。如果你不确定是否依赖此功能,
可以检查 kube-proxy 的 `iptables_localhost_nodeports_accepted_packets_total` 指标;
如果该值非 0则表示某些客户端已通过 `127.0.0.1` 连接到 NodePort 服务。
<!--
- **NodePort interaction with firewalls**: The `iptables` mode of
kube-proxy tries to be compatible with overly-agressive firewalls;
for each NodePort service, it will add rules to accept inbound
traffic on that port, in case that traffic would otherwise be
blocked by a firewall. This approach will not work with firewalls
based on nftables, so kube-proxy's `nftables` mode does not do
anything here; if you have a local firewall, you must ensure that
it is properly configured to allow Kubernetes traffic through
(e.g., by allowing inbound traffic on the entire NodePort range).
-->
- **NodePort 与防火墙的交互**kube-proxy 的 `iptables` 模式尝试与过于激进的防火墙兼容;
对于每个 NodePort 服务,它会添加规则以接受该端口的入站流量,以防该流量被防火墙阻止。
这种方法不适用于基于 nftables 的防火墙,因此 kube-proxy 的 `nftables` 模式在这里不会做任何事情;
如果你有本地防火墙,必须确保其配置正确以允许 Kubernetes 流量通过(例如,允许整个 NodePort 范围的入站流量)。
<!--
- **Conntrack bug workarounds**: Linux kernels prior to 6.1 have a
bug that can result in long-lived TCP connections to service IPs
being closed with the error "Connection reset by peer". The
`iptables` mode of kube-proxy installs a workaround for this bug,
but this workaround was later found to cause other problems in some
clusters. The `nftables` mode does not install any workaround by
default, but you can check kube-proxy's
`iptables_ct_state_invalid_dropped_packets_total` metric to see if
your cluster is depending on the workaround, and if so, you can run
kube-proxy with the option `--conntrack-tcp-be-liberal` to work
around the problem in `nftables` mode.
-->
- **Conntrack 错误规避**6.1 之前的 Linux 内核存在一个错误,可能导致与服务 IP 的长时间
TCP 连接被关闭并出现“Connection reset by peer对方重置连接”的错误。kube-proxy 的 `iptables`
模式为此错误配备了一个修复程序,但后来发现该修复程序在某些集群中会导致其他问题。
`nftables` 模式默认不安装任何修复程序,但你可以检查 kube-proxy 的
`iptables_ct_state_invalid_dropped_packets_total`
指标,看看你的集群是否依赖于该修复程序,如果是,你可以使用 `--conntrack-tcp-be-liberal`
选项运行 kube-proxy以在 `nftables` 模式下解决该问题。
<!--
### `kernelspace` proxy mode {#proxy-mode-kernelspace}
@ -665,17 +773,17 @@ Kubernetes 通过从为 {{< glossary_tooltip text="API 服务器" term_id="kube-
`service-cluster-ip-range` CIDR 范围内为每个 Service 分配自己的 IP 地址来实现这一点。
<!--
#### IP address allocation tracking
### IP address allocation tracking
To ensure each Service receives a unique IP, an internal allocator atomically
To ensure each Service receives a unique IP address, an internal allocator atomically
updates a global allocation map in {{< glossary_tooltip term_id="etcd" >}}
prior to creating each Service. The map object must exist in the registry for
Services to get IP address assignments, otherwise creations will
fail with a message indicating an IP address could not be allocated.
-->
#### IP 地址分配追踪
### IP 地址分配追踪 {#ip-address-allocation-tracking}
为了确保每个 Service 都获得唯一的 IP内部分配器在创建每个 Service
为了确保每个 Service 都获得唯一的 IP 地址,内部分配器在创建每个 Service
之前更新 {{< glossary_tooltip term_id="etcd" >}} 中的全局分配映射,这种更新操作具有原子性。
映射对象必须存在于数据库中,这样 Service 才能获得 IP 地址分配,
否则创建将失败,并显示无法分配 IP 地址。
@ -684,50 +792,67 @@ fail with a message indicating an IP address could not be allocated.
In the control plane, a background controller is responsible for creating that
map (needed to support migrating from older versions of Kubernetes that used
in-memory locking). Kubernetes also uses controllers to check for invalid
assignments (e.g. due to administrator intervention) and for cleaning up allocated
assignments (for example: due to administrator intervention) and for cleaning up allocated
IP addresses that are no longer used by any Services.
-->
在控制平面中,后台控制器负责创建该映射(从使用内存锁定的旧版本的 Kubernetes 迁移时需要这一映射)。
Kubernetes 还使用控制器来检查无效的分配(例如,因管理员干预而导致无效分配)
以及清理已分配但没有 Service 使用的 IP 地址。
{{< feature-state for_k8s_version="v1.27" state="alpha" >}}
<!--
#### IP address allocation tracking using the Kubernetes API {#ip-address-objects}
-->
#### 使用 Kubernetes API 跟踪IP 地址分配 {#ip-address-objects}
{{< feature-state feature_gate_name="MultiCIDRServiceAllocator" >}}
<!--
If you enable the `MultiCIDRServiceAllocator`
[feature gate](/docs/reference/command-line-tools-reference/feature-gates/) and the
[`networking.k8s.io/v1alpha1` API group](/docs/tasks/administer-cluster/enable-disable-api/),
the control plane replaces the existing etcd allocator with a new one, using IPAddress
objects instead of an internal global allocation map. The ClusterIP address
associated to each Service will have a referenced IPAddress object.
the control plane replaces the existing etcd allocator with a revised implementation
that uses IPAddress and ServiceCIDR objects instead of an internal global allocation map.
Each cluster IP address associated to a Service then references an IPAddress object.
-->
如果你启用 `MultiCIDRServiceAllocator` [特性门控](/zh-cn/docs/reference/command-line-tools-reference/feature-gate/)和
[`networking.k8s.io/v1alpha1` API 组](/zh-cn/docs/tasks/administer-cluster/enable-disable-api/)
控制平面将用一个新的分配器替换现有的 etcd 分配器,使用 IPAddress 对象而不是内部的全局分配映射。
与每个 Service 关联的 ClusterIP 地址将有一个对应的 IPAddress 对象。
控制平面用一个改进后的实现替换现有的 etcd 分配器,使用 IPAddress 和 ServiceCIDR
对象而不是内部的全局分配映射。与某 Service 关联的每个 ClusterIP 地址将有一个对应的
IPAddress 对象。
<!--
The background controller is also replaced by a new one to handle the new IPAddress
objects and the migration from the old allocator model.
Enabling the feature gate also replaces a background controller with an alternative
that handles the IPAddress objects and supports migration from the old allocator model.
Kubernetes {{< skew currentVersion >}} does not support migrating from IPAddress
objects to the internal allocation map.
-->
后台控制器也被一个新的控制器取代,来处理新的 IPAddress 对象和从旧的分配器模型的迁移。
启用该特性门控还会用替代实现将后台控制器替换,来处理 IPAddress 对象并支持从旧的分配器模型迁移。
Kubernetes {{< skew currentVersion >}} 不支持从 IPAddress 对象迁移到内部分配映射。
<!--
One of the main benefits of the new allocator is that it removes the size limitations
for the `service-cluster-ip-range`, there is no limitations for IPv4 and for IPv6
users can use masks equal or larger than /64 (previously it was /108).
One of the main benefits of the revised allocator is that it removes the size limitations
for the IP address range that can be used for the cluster IP address of Services.
With `MultiCIDRServiceAllocator` enabled, there are no limitations for IPv4, and for IPv6
you can use IP address netmasks that are a /64 or smaller (as opposed to /108 with the
legacy implementation).
-->
新分配器的主要好处之一是它取消了对 `service-cluster-ip-range` 的大小限制,对 IPv4 没有大小限制,
对于 IPv6 用户可以使用等于或大于 /64 的掩码(以前是 /108
改进后的分配器的主要优点之一是它取消了对可用于 Service 的集群 IP 地址的范围大小限制。
启用 `MultiCIDRServiceAllocator` 后,对 IPv4 没有大小限制,而对于
IPv6你可以使用等于或小于 /64 的 IP 地址子网掩码(与旧实现中的 /108 相比)。
<!--
Users now will be able to inspect the IP addresses assigned to their Services, and
Kubernetes extensions such as the [Gateway](https://gateway-api.sigs.k8s.io/) API, can use this new
IPAddress object kind to enhance the Kubernetes networking capabilities, going beyond the limitations of
the built-in Service API.
Making IP address allocations available via the API means that you as a cluster administrator
can allow users to inspect the IP addresses assigned to their Services.
Kubernetes extensions, such as the [Gateway API](/docs/concepts/services-networking/gateway/),
can use the IPAddress API to extend Kubernetes' inherent networking capabilities.
-->
用户现在能够检查分配给他们的 Service 的 IP 地址Kubernetes 扩展,
如 [Gateway](https://gateway-api.sigs.k8s.io/) API
可以使用这个新的 IPAddress 对象类别来增强 Kubernetes 的网络能力,解除内置 Service API 的限制。
通过 API 提供 IP 地址分配,意味着作为集群管理员,你可以允许用户检查分配给他们的 Service 的 IP 地址。
Kubernetes 扩展(例如 [Gateway API](/zh-cn/docs/concepts/services-networking/gateway/)
可以使用 IPAddress API 来扩展 Kubernetes 的固有网络功能。
<!--
Here is a brief example of a user querying for IP addresses:
-->
以下是用户查询 IP 地址的简短示例:
```shell
kubectl get services
@ -747,11 +872,56 @@ NAME PARENTREF
2001:db8:1:2::1 services/default/kubernetes
2001:db8:1:2::a services/kube-system/kube-dns
```
<!--
Kubernetes also allow users to dynamically define the available IP ranges for Services using
ServiceCIDR objects. During bootstrap, a default ServiceCIDR object named `kubernetes` is created
from the value of the `--service-cluster-ip-range` command line argument to kube-apiserver:
-->
Kubernetes 还允许用户使用 ServiceCIDR 对象动态定义 Service 的可用 IP 范围。在引导过程中,集群会根据
kube-apiserver 的 `--service-cluster-ip-range` 命令行参数的值创建一个名为
`kubernetes` 的默认 ServiceCIDR 对象:
```shell
kubectl get servicecidrs
```
```
NAME CIDRS AGE
kubernetes 10.96.0.0/28 17m
```
<!--
#### IP address ranges for Service virtual IP addresses {#service-ip-static-sub-range}
Users can create or delete new ServiceCIDR objects to manage the available IP ranges for Services:
-->
#### Service 虚拟 IP 地址的地址段 {#service-ip-static-sub-range}
用户可以创建或删除新的 ServiceCIDR 对象来管理 Service 的可用 IP 范围:
```shell
cat <<'EOF' | kubectl apply -f -
apiVersion: networking.k8s.io/v1beta1
kind: ServiceCIDR
metadata:
name: newservicecidr
spec:
cidrs:
- 10.96.0.0/24
EOF
```
```
servicecidr.networking.k8s.io/newcidr1 created
```
```shell
kubectl get servicecidrs
```
```
NAME CIDRS AGE
kubernetes 10.96.0.0/28 17m
newservicecidr 10.96.0.0/24 7m
```
<!--
### IP address ranges for Service virtual IP addresses {#service-ip-static-sub-range}
-->
### Service 虚拟 IP 地址的地址段 {#service-ip-static-sub-range}
{{< feature-state for_k8s_version="v1.26" state="stable" >}}
@ -775,16 +945,6 @@ Kubernetes 优先通过从高段中选择来为 Service 分配动态 IP 地址
这意味着如果要将特定 IP 地址分配给 `type: ClusterIP` Service
则应手动从**低**段中分配 IP 地址。该方法降低了分配导致冲突的风险。
<!--
If you disable the `ServiceIPStaticSubrange`
[feature gate](/docs/reference/command-line-tools-reference/feature-gates/) then Kubernetes
uses a single shared pool for both manually and dynamically assigned IP addresses,
that are used for `type: ClusterIP` Services.
-->
如果你禁用 `ServiceIPStaticSubrange`
[特性门控](/zh-cn/docs/reference/command-line-tools-reference/feature-gates/)
则 Kubernetes 用于手动分配和动态分配的 IP 共享单个地址池,这适用于 `type: ClusterIP` 的 Service。
<!--
## Traffic policies
-->
@ -837,6 +997,122 @@ relevant Service.
如果流量策略为 `Local` 并且没有本地节点端点,
那么 kube-proxy 不会转发与相关 Service 相关的任何流量。
<!--
If `Cluster` is specified all nodes are eligible load balancing targets _as long as_
the node is not being deleted and kube-proxy is healthy. In this mode: load balancer
health checks are configured to target the service proxy's readiness port and path.
In the case of kube-proxy this evaluates to: `${NODE_IP}:10256/healthz`. kube-proxy
will return either an HTTP code 200 or 503. kube-proxy's load balancer health check
endpoint returns 200 if:
-->
如果指定了 `Cluster`,则所有节点都可以作为负载均衡目标,**只要**节点没有被删除且
kube-proxy 是健康的。在这种模式下:负载均衡器健康检查被配置为针对服务代理的就绪端口和路径。对于
kube-proxy这个健康检查端点为`${NODE_IP}:10256/healthz`。kube-proxy 将返回 HTTP
状态码 200 或 503。如果满足以下条件kube-proxy 的负载均衡器健康检查端点将返回 200
<!--
1. kube-proxy is healthy, meaning:
- it's able to progress programming the network and isn't timing out while doing
so (the timeout is defined to be: **2 × `iptables.syncPeriod`**); and
2. the node is not being deleted (there is no deletion timestamp set for the Node).
-->
1. kube-proxy 是健康的,意味着:
- 它能够继续进行网络编程,并且在此过程中不会超时(超时时间定义为:**2 × `iptables.syncPeriod`**);并且
2. 节点没有被删除Node 对象上没有设置删除时间戳)。
<!--
The reason why kube-proxy returns 503 and marks the node as not
eligible when it's being deleted, is because kube-proxy supports connection
draining for terminating nodes. A couple of important things occur from the point
of view of a Kubernetes-managed load balancer when a node _is being_ / _is_ deleted.
-->
kube-proxy 在节点被删除时返回 503 并将节点标记为不符合条件的原因在于
kube-proxy 对处于终止过程中的节点支持连接腾空。从 Kubernetes 管理的负载均衡器的角度来看,
当节点**正在**/ **已**被删除时,会发生一些重要的事情。
<!--
While deleting:
* kube-proxy will start failing its readiness probe and essentially mark the
node as not eligible for load balancer traffic. The load balancer health
check failing causes load balancers which support connection draining to
allow existing connections to terminate, and block new connections from
establishing.
-->
当节点被删除时:
* kube-proxy 的就绪探针将开始失败,并将该节点标记为不胜任接收负载均衡器流量。
负载均衡器健康检查失败会导致支持连接排空的负载均衡器允许现有连接终止,并阻止新连接建立。
<!--
When deleted:
* The service controller in the Kubernetes cloud controller manager removes the
node from the referenced set of eligible targets. Removing any instance from
the load balancer's set of backend targets immediately terminates all
connections. This is also the reason kube-proxy first fails the health check
while the node is deleting.
-->
当节点被删除后:
* Kubernetes 云控制器管理器中的服务控制器会将节点从所引用的候选目标集中移除。
从负载均衡器的后端目标集中移除任何实例会立即终止所有连接。
这也是 kube-proxy 在节点删除过程中首先使健康检查失败的原因。
<!--
It's important to note for Kubernetes vendors that if any vendor configures the
kube-proxy readiness probe as a liveness probe: that kube-proxy will start
restarting continuously when a node is deleting until it has been fully deleted.
kube-proxy exposes a `/livez` path which, as opposed to the `/healthz` one, does
**not** consider the Node's deleting state and only its progress programming the
network. `/livez` is therefore the recommended path for anyone looking to define
a livenessProbe for kube-proxy.
-->
需要注意的是,对于 Kubernetes 供应商,如果任何供应商将
kube-proxy 的就绪探针配置为存活探针当节点正在删除直到完全删除时kube-proxy
将开始不断重启。kube-proxy 公开了一个 `/livez` 路径,与 `/healthz` 路径不同,
`/livez` 路径**不**考虑节点的删除状态,仅考虑其网络编程进度。因此,对于任何希望为
kube-proxy 定义存活探针的人来说,推荐使用 `/livez` 路径。
<!--
Users deploying kube-proxy can inspect both the readiness / liveness state by
evaluating the metrics: `proxy_livez_total` / `proxy_healthz_total`. Both
metrics publish two series, one with the 200 label and one with the 503 one.
-->
部署 kube-proxy 的用户可以通过评估指标 `proxy_livez_total` / `proxy_healthz_total`
来检查就绪/存活状态。这两个指标都发布了两个序列,一个带有 200 标签,另一个带有 503 标签。
<!--
For `Local` Services: kube-proxy will return 200 if
1. kube-proxy is healthy/ready, and
2. has a local endpoint on the node in question.
Node deletion does **not** have an impact on kube-proxy's return
code for what concerns load balancer health checks. The reason for this is:
deleting nodes could end up causing an ingress outage should all endpoints
simultaneously be running on said nodes.
-->
对于 `Local` Service如果满足以下条件kube-proxy 将返回 200
1. kube-proxy 是健康/就绪的,并且
2. 在相关节点上有一个本地端点。
对于负载均衡器健康检查而言,节点删除**不会**对 kube-proxy
的返回代码产生影响。原因是:如果所有端点同时在上述节点上运行,则删除节点最终可能会导致入站流量中断。
<!--
The Kubernetes project recommends that cloud provider integration code
configures load balancer health checks that target the service proxy's healthz
port. If you are using or implementing your own virtual IP implementation,
that people can use instead of kube-proxy, you should set up a similar health
checking port with logic that matches the kube-proxy implementation.
-->
Kubernetes 项目建议云提供商集成代码配置负载均衡器健康检查,以针对服务代理的 healthz 端口。
如果你正在使用或实现自己的虚拟 IP 实现,供人们使用它替代 kube-proxy你应该设置一个类似的健康检查端口
其逻辑应与 kube-proxy 实现相匹配。
<!--
### Traffic to terminating endpoints
-->
@ -884,6 +1160,172 @@ pool.
并逐渐降低指向那些处于终止过程中的 Pod 的流量。
到 Pod 完成终止时,外部负载均衡器应该已经发现节点的健康检查失败并从后端池中完全移除该节点。
<!--
## Traffic Distribution
{{< feature-state feature_gate_name="ServiceTrafficDistribution" >}}
The `spec.trafficDistribution` field within a Kubernetes Service allows you to
express preferences for how traffic should be routed to Service endpoints.
Implementations like kube-proxy use the `spec.trafficDistribution` field as a
guideline. The behavior associated with a given preference may subtly differ
between implementations.
-->
## 流量分发 {#traffic-distribution}
{{< feature-state feature_gate_name="ServiceTrafficDistribution" >}}
Kubernetes Service 中的 `spec.trafficDistribution` 字段允许你定义流量应如何路由到
Service 端点的偏好。像 kube-proxy 这样的实现会将 `spec.trafficDistribution`
字段作为指导。不同实现之间,与给定偏好相关的行为可能会略有不同。
<!--
`PreferClose` with kube-proxy
: For kube-proxy, this means prioritizing sending traffic to endpoints within
the same zone as the client. The EndpointSlice controller updates
EndpointSlices with `hints` to communicate this preference, which kube-proxy
then uses for routing decisions. If a client's zone does not have any
available endpoints, traffic will be routed cluster-wide for that client.
In the absence of any value for `trafficDistribution`, the default routing
strategy for kube-proxy is to distribute traffic to any endpoint in the cluster.
-->
`PreferClose` 与 kube-proxy 结合
: 对于 kube-proxy这意味着优先将流量发送到与客户端位于同一区域的端点。
EndpointSlice 控制器使用 `hints` 来更新 EndpointSlices 以传达此偏好,
之后kube-proxy 会使用这些提示进行路由决策。如果客户端的区域没有可用的端点,
则流量将在整个集群范围内路由。
如果 `trafficDistribution` 没有任何值kube-proxy 的默认路由策略是将流量分配到集群中的任一端点。
<!--
### Comparison with `service.kubernetes.io/topology-mode: Auto`
The `trafficDistribution` field with `PreferClose` and the
`service.kubernetes.io/topology-mode: Auto` annotation both aim to prioritize
same-zone traffic. However, there are key differences in their approaches:
-->
### 与 `service.kubernetes.io/topology-mode: Auto` 的比较 {#comparison-with-service-kubernetes-io-topology-mode-auto}
`trafficDistribution` 字段中的 `PreferClose`
`service.kubernetes.io/topology-mode: Auto` 注解都旨在优先处理同一区域的流量。
然而,它们的方法存在一些关键差异:
<!--
* `service.kubernetes.io/topology-mode: Auto`: Attempts to distribute traffic
proportionally across zones based on allocatable CPU resources. This heuristic
includes safeguards (such as the [fallback
behavior](/docs/concepts/services-networking/topology-aware-routing/#three-or-more-endpoints-per-zone)
for small numbers of endpoints) and could lead to the feature being disabled
in certain scenarios for load-balancing reasons. This approach sacrifices some
predictability in favor of potential load balancing.
-->
* `service.kubernetes.io/topology-mode: Auto`:尝试根据可分配的 CPU
资源在各区域之间按比例分配流量。此启发式方法包括一些保障措施
(例如针对少量端点的[回退行为](/zh-cn/docs/concepts/services-networking/topology-aware-routing/#three-or-more-endpoints-per-zone)
并在某些场景下可能因负载均衡原因导致该特性被禁用。这种方法在一定程度上牺牲了可预测性,
以换取潜在的负载均衡。
<!--
* `trafficDistribution: PreferClose`: This approach aims to be slightly simpler
and more predictable: "If there are endpoints in the zone, they will receive
all traffic for that zone, if there are no endpoints in a zone, the traffic
will be distributed to other zones". While the approach may offer more
predictability, it does mean that you are in control of managing a [potential
overload](#considerations-for-using-traffic-distribution-control).
-->
* `trafficDistribution: PreferClose`:这种方法偏重更简单和更可预测:
“如果区域内有端点,它们将接收该区域的所有流量;如果区域内没有端点,流量将分配到其他区域”。
虽然这种方法可能提供更多的可预测性,但这意味着你需要管理[潜在的过载](#considerations-for-using-traffic-distribution-control)。
<!--
If the `service.kubernetes.io/topology-mode` annotation is set to `Auto`, it
will take precedence over `trafficDistribution`. (The annotation may be deprecated
in the future in favour of the `trafficDistribution` field).
-->
如果 `service.kubernetes.io/topology-mode` 注解设置为 `Auto`,它将优先于
`trafficDistribution`。(该注解将来可能会被弃用,取而代之的是 `trafficDistribution` 字段)。
<!--
### Interaction with Traffic Policies
When compared to the `trafficDistribution` field, the traffic policy fields
(`externalTrafficPolicy` and `internalTrafficPolicy`) are meant to offer a
stricter traffic locality requirements. Here's how `trafficDistribution`
interacts with them:
-->
### 与流量策略的交互 {#interaction-with-traffic-policies}
`trafficDistribution` 字段相比,流量策略字段
`externalTrafficPolicy` 和 `internalTrafficPolicy`)旨在提供更严格的流量局域化要求。
以下是 `trafficDistribution` 与它们的交互方式:
<!--
* Precedence of Traffic Policies: For a given Service, if a traffic policy
(`externalTrafficPolicy` or `internalTrafficPolicy`) is set to `Local`, it
takes precedence over `trafficDistribution: PreferClose` for the corresponding
traffic type (external or internal, respectively).
-->
* 流量策略的优先序:对于给定的 Service如果流量策略
`externalTrafficPolicy` 或 `internalTrafficPolicy`)设置为 `Local`
则它优先于相应流量类型(分别为外部或内部)的 `trafficDistribution: PreferClose`
<!--
* `trafficDistribution` Influence: For a given Service, if a traffic policy
(`externalTrafficPolicy` or `internalTrafficPolicy`) is set to `Cluster` (the
default), or if the fields are not set, then `trafficDistribution:
PreferClose` guides the routing behavior for the corresponding traffic type
(external or internal, respectively). This means that an attempt will be made
to route traffic to an endpoint that is in the same zone as the client.
-->
* `trafficDistribution` 的影响:对于给定的 Service如果流量策略
`externalTrafficPolicy` 或 `internalTrafficPolicy`)设置为 `Cluster`(默认值),
或者这些字段未设置,那么 `trafficDistribution: PreferClose` 将指导相应流量类型
(分别为外部或内部)的路由行为。这意味着 kube-proxy 将尝试将流量路由到与客户端位于同一区域的端点。
<!--
### Considerations for using traffic distribution control
* **Increased Probability of Overloaded Endpoints:** The `PreferClose`
heuristic will attempt to route traffic to the closest healthy endpoints
instead of spreading that traffic evenly across all endpoints. If you do not
have a sufficient number of endpoints within a zone, they may become
overloaded. This is especially likely if incoming traffic is not
proportionally distributed across zones. To mitigate this, consider the
following strategies:
* [Pod Topology Spread
Constraints](/docs/concepts/scheduling-eviction/topology-spread-constraints/):
Use Pod Topology Spread Constraints to distribute your pods more evenly
across zones.
* Zone-specific Deployments: If you expect to see skewed traffic patterns,
create a separate Deployment for each zone. This approach allows the
separate workloads to scale independently. There are also workload
management addons available from the ecosystem, outside the Kubernetes
project itself, that can help here.
-->
### 使用流量分配控制的注意事项 {#considerations-for-using-traffic-distribution-control}
* **端点过载的概率增加:** `PreferClose` 启发式方法将尝试将流量路由到最近的健康端点,
而不是将流量均匀分布到所有端点。如果某个区域内的端点数量不足,它们可能会过载。
如果传入流量在各区域之间分布不均,这种情况更有可能发生。为减轻这种情况,请考虑以下策略:
* [Pod 拓扑分布约束](/zh-cn/docs/concepts/scheduling-eviction/topology-spread-constraints/)
使用 Pod 拓扑分布约束在各区域之间更均匀地分布你的 Pod。
* 区域特定的 Deployment如果你预计会看到不均衡的流量模式
可以为每个区域创建一个单独的 Deployment。这种方法允许独立扩展各个工作负载。
生态系统中还有一些 Kubernetes 项目之外的工作负载管理插件,可以在这方面提供帮助。
<!--
* **Implementation-specific behavior:** Each dataplane implementation may handle
this field slightly differently. If you're using an implementation other than
kube-proxy, refer the documentation specific to that implementation to
understand how this field is being handled.
-->
* **特定于具体实现的行为:** 各个数据平面实现处理此字段的方式可能会稍有不同。
如果你使用的是 kube-proxy 以外的实现,请参阅该实现的特定文档以了解该实现是如何处理此字段的。
## {{% heading "whatsnext" %}}
<!--

View File

@ -20,7 +20,11 @@ This section contains the following reference topics about nodes:
* [Node Labels Populated By The Kubelet](/docs/reference/node/node-labels)
* [Local Files And Paths Used By The Kubelet](/docs/reference/node/kubelet-files)
* [Node `.status` information](/docs/reference/node/node-status/)
* [Seccomp information](/docs/reference/node/seccomp/)
-->
本部分包含以下有关节点的参考主题:
@ -31,8 +35,11 @@ This section contains the following reference topics about nodes:
* [由 kubelet 填充的节点标签](/zh-cn/docs/reference/node/node-labels)
* [由 kubelet 使用的本地文件和路径](/zh-cn/docs/reference/node/kubelet-files)
* [节点 `.status` 信息](/zh-cn/docs/reference/node/node-status/)
*
* [Seccomp 信息](/zh-cn/docs/reference/node/seccomp/)
<!--
You can also read node reference details from elsewhere in the

View File

@ -0,0 +1,271 @@
---
content_type: reference
title: Seccomp 和 Kubernetes
weight: 80
---
<!--
content_type: reference
title: Seccomp and Kubernetes
weight: 80
-->
<!-- overview -->
<!--
Seccomp stands for secure computing mode and has been a feature of the Linux
kernel since version 2.6.12. It can be used to sandbox the privileges of a
process, restricting the calls it is able to make from userspace into the
kernel. Kubernetes lets you automatically apply seccomp profiles loaded onto a
{{< glossary_tooltip text="node" term_id="node" >}} to your Pods and containers.
-->
Seccomp 表示安全计算Secure Computing模式自 2.6.12 版本以来,一直是 Linux 内核的一个特性。
它可以用来沙箱化进程的权限,限制进程从用户态到内核态的调用。
Kubernetes 能使你自动将加载到{{< glossary_tooltip text="节点" term_id="node" >}}上的
seccomp 配置文件应用到你的 Pod 和容器。
<!--
## Seccomp fields
-->
## Seccomp 字段 {#seccomp-fields}
{{< feature-state for_k8s_version="v1.19" state="stable" >}}
<!--
There are four ways to specify a seccomp profile for a
{{< glossary_tooltip text="pod" term_id="pod" >}}:
- for the whole Pod using [`spec.securityContext.seccompProfile`](/docs/reference/kubernetes-api/workload-resources/pod-v1/#security-context)
- for a single container using [`spec.containers[*].securityContext.seccompProfile`](/docs/reference/kubernetes-api/workload-resources/pod-v1/#security-context-1)
- for an (restartable / sidecar) init container using [`spec.initContainers[*].securityContext.seccompProfile`](/docs/reference/kubernetes-api/workload-resources/pod-v1/#security-context-1)
- for an [ephermal container](/docs/concepts/workloads/pods/ephemeral-containers) using [`spec.ephemeralContainers[*].securityContext.seccompProfile`](/docs/reference/kubernetes-api/workload-resources/pod-v1/#security-context-2)
-->
有四种方式可以为 {{< glossary_tooltip text="Pod" term_id="pod" >}} 指定 seccomp 配置文件:
- 为整个 Pod 使用
[`spec.securityContext.seccompProfile`](/zh-cn/docs/reference/kubernetes-api/workload-resources/pod-v1/#security-context)
- 为单个容器使用
[`spec.containers[*].securityContext.seccompProfile`](/zh-cn/docs/reference/kubernetes-api/workload-resources/pod-v1/#security-context-1)
- 为(可重启/边车Init 容器使用
[`spec.initContainers[*].securityContext.seccompProfile`](/zh-cn/docs/reference/kubernetes-api/workload-resources/pod-v1/#security-context-1)
- 为[临时容器](/zh-cn/docs/concepts/workloads/pods/ephemeral-containers)使用
[`spec.ephemeralContainers[*].securityContext.seccompProfile`](/zh-cn/docs/reference/kubernetes-api/workload-resources/pod-v1/#security-context-2)
{{% code_sample file="pods/security/seccomp/fields.yaml" %}}
<!--
The Pod in the example above runs as `Unconfined`, while the
`ephemeral-container` and `init-container` specifically defines
`RuntimeDefault`. If the ephemeral or init container would not have set the
`securityContext.seccompProfile` field explicitly, then the value would be
inherited from the Pod. The same applies to the container, which runs a
`Localhost` profile `my-profile.json`.
Generally speaking, fields from (ephemeral) containers have a higher priority
than the Pod level value, while containers which do not set the seccomp field
inherit the profile from the Pod.
-->
上面的示例中的 Pod 以 `Unconfined` 运行,而 `ephemeral-container`
`init-container` 独立设置了 `RuntimeDefault`
如果临时容器或 Init 容器没有明确设置 `securityContext.seccompProfile` 字段,
则此值将从 Pod 继承。同样的机制也适用于运行 `Localhost` 配置文件 `my-profile.json` 的容器。
一般来说,(临时)容器的字段优先级高于 Pod 层级的值,而未设置 seccomp 字段的容器则从 Pod 继承配置。
{{< note >}}
<!--
It is not possible to apply a seccomp profile to a Pod or container running with
`privileged: true` set in the container's `securityContext`. Privileged
containers always run as `Unconfined`.
-->
你不可以将 seccomp 配置文件应用到在容器的 `securityContext` 中设置了 `privileged: true`
Pod 或容器。特权容器始终以 `Unconfined` 运行。
{{< /note >}}
<!--
The following values are possible for the `seccompProfile.type`:
`Unconfined`
: The workload runs without any seccomp restrictions.
-->
对于 `seccompProfile.type`,可以使用以下值:
`Unconfined`
: 工作负载在没有任何 seccomp 限制的情况下运行。
<!--
`RuntimeDefault`
: A default seccomp profile defined by the
{{< glossary_tooltip text="container runtime" term_id="container-runtime" >}}
is applied. The default profiles aim to provide a strong set of security
defaults while preserving the functionality of the workload. It is possible that
the default profiles differ between container runtimes and their release
versions, for example when comparing those from
{{< glossary_tooltip text="CRI-O" term_id="cri-o" >}} and
{{< glossary_tooltip text="containerd" term_id="containerd" >}}.
-->
`RuntimeDefault`
: 由{{< glossary_tooltip text="容器运行时" term_id="container-runtime" >}}定义的默认
seccomp 配置文件被应用。这个默认的配置文件旨在提供一套强大的安全默认值,同时保持工作负载的功能不受影响。
不同的容器运行时及其版本之间的默认配置文件可能会有所不同,
例如在比较 {{< glossary_tooltip text="CRI-O" term_id="cri-o" >}} 和
{{< glossary_tooltip text="containerd" term_id="containerd" >}} 的默认配置文件时就会发现不同。
<!--
`Localhost`
: The `localhostProfile` will be applied, which has to be available on the node
disk (on Linux it's `/var/lib/kubelet/seccomp`). The availability of the seccomp
profile is verified by the
{{< glossary_tooltip text="container runtime" term_id="container-runtime" >}}
on container creation. If the profile does not exist, then the container
creation will fail with a `CreateContainerError`.
-->
`Localhost`
: `localhostProfile` 将被应用,这一配置必须位于节点磁盘上(在 Linux 上是 `/var/lib/kubelet/seccomp`)。
在创建容器时,{{< glossary_tooltip text="容器运行时" term_id="container-runtime" >}}会验证 seccomp
配置文件的可用性。如果此配置文件不存在,则容器创建将失败,并报错 `CreateContainerError`
<!--
### `Localhost` profiles
Seccomp profiles are JSON files following the scheme defined by the
[OCI runtime specification](https://github.com/opencontainers/runtime-spec/blob/f329913/config-linux.md#seccomp).
A profile basically defines actions based on matched syscalls, but also allows
to pass specific values as arguments to syscalls. For example:
-->
### `Localhost` 配置文件 {#localhost-profiles}
Seccomp 配置文件是遵循
[OCI 运行时规范](https://github.com/opencontainers/runtime-spec/blob/f329913/config-linux.md#seccomp)定义的
JSON 文件。配置文件主要根据所匹配的系统调用来定义操作,但也允许将特定值作为参数传递给系统调用。例如:
```json
{
"defaultAction": "SCMP_ACT_ERRNO",
"defaultErrnoRet": 38,
"syscalls": [
{
"names": [
"adjtimex",
"alarm",
"bind",
"waitid",
"waitpid",
"write",
"writev"
],
"action": "SCMP_ACT_ALLOW"
}
]
}
```
<!--
The `defaultAction` in the profile above is defined as `SCMP_ACT_ERRNO` and
will return as fallback to the actions defined in `syscalls`. The error is
defined as code `38` via the `defaultErrnoRet` field.
-->
上述配置文件中的 `defaultAction` 被定义为 `SCMP_ACT_ERRNO`,并可回退至 `syscalls` 中所定义的操作。
此错误通过 `defaultErrnoRet` 字段被定义为代码 `38`
<!--
The following actions are generally possible:
`SCMP_ACT_ERRNO`
: Return the specified error code.
`SCMP_ACT_ALLOW`
: Allow the syscall to be executed.
`SCMP_ACT_KILL_PROCESS`
: Kill the process.
-->
通常可以使用以下操作:
`SCMP_ACT_ERRNO`
: 返回指定的错误码。
`SCMP_ACT_ALLOW`
: 允许执行系统调用。
`SCMP_ACT_KILL_PROCESS`
: 杀死进程。
<!--
`SCMP_ACT_KILL_THREAD` and `SCMP_ACT_KILL`
: Kill only the thread.
`SCMP_ACT_TRAP`
: Throw a `SIGSYS` signal.
`SCMP_ACT_NOTIFY` and `SECCOMP_RET_USER_NOTIF`.
: Notify the user space.
`SCMP_ACT_TRACE`
: Notify a tracing process with the specified value.
`SCMP_ACT_LOG`
: Allow the syscall to be executed after the action has been logged to syslog or
auditd.
-->
`SCMP_ACT_KILL_THREAD``SCMP_ACT_KILL`
: 仅杀死线程。
`SCMP_ACT_TRAP`
: 发送 `SIGSYS` 信号。
`SCMP_ACT_NOTIFY``SECCOMP_RET_USER_NOTIF`
: 通知用户空间。
`SCMP_ACT_TRACE`
: 使用指定的值通知跟踪进程。
`SCMP_ACT_LOG`
: 在将操作记录到 syslog 或 auditd 之后,允许执行系统调用。
<!--
Some actions like `SCMP_ACT_NOTIFY` or `SECCOMP_RET_USER_NOTIF` may be not
supported depending on the container runtime, OCI runtime or Linux kernel
version being used. There may be also further limitations, for example that
`SCMP_ACT_NOTIFY` cannot be used as `defaultAction` or for certain syscalls like
`write`. All those limitations are defined by either the OCI runtime
([runc](https://github.com/opencontainers/runc),
[crun](https://github.com/containers/crun)) or
[libseccomp](https://github.com/seccomp/libseccomp).
-->
`SCMP_ACT_NOTIFY``SECCOMP_RET_USER_NOTIF` 这类操作可能不被支持,
具体取决于所使用的容器运行时、OCI 运行时或 Linux 内核版本。也可能存在其他限制,
例如 `SCMP_ACT_NOTIFY` 不能用作 `defaultAction` 或用于某些系统调用(如 `write`)。
所有这些限制由 OCI 运行时
[runc](https://github.com/opencontainers/runc)、[crun](https://github.com/containers/crun)
或 [libseccomp](https://github.com/seccomp/libseccomp) 所定义。
<!--
The `syscalls` JSON array contains a list of objects referencing syscalls by
their respective `names`. For example, the action `SCMP_ACT_ALLOW` can be used
to create a whitelist of allowed syscalls as outlined in the example above. It
would also be possible to define another list using the action `SCMP_ACT_ERRNO`
but a different return (`errnoRet`) value.
It is also possible to specify the arguments (`args`) passed to certain
syscalls. More information about those advanced use cases can be found in the
[OCI runtime spec](https://github.com/opencontainers/runtime-spec/blob/f329913/config-linux.md#seccomp)
and the [Seccomp Linux kernel documentation](https://www.kernel.org/doc/Documentation/prctl/seccomp_filter.txt).
-->
`syscalls` JSON 数组包含对象列表,每个对象通过系统调用的 `names` 引用系统调用。
例如,`SCMP_ACT_ALLOW` 操作可用于创建包含如上例所示的系统调用的白名单。
也可以使用 `SCMP_ACT_ERRNO` 操作定义另一个列表,但会有不同的返回值(`errnoRet`)。
你还可以指定传递给某些系统调用的参数(`args`)。有关这些高级用例的细节,请参见
[OCI 运行时规范](https://github.com/opencontainers/runtime-spec/blob/f329913/config-linux.md#seccomp)
和 [Seccomp Linux 内核文档](https://www.kernel.org/doc/Documentation/prctl/seccomp_filter.txt)。
<!--
## Further reading
- [Restrict a Container's Syscalls with seccomp](/docs/tutorials/security/seccomp/)
- [Pod Security Standards](/docs/concepts/security/pod-security-standards/)
-->
## 进一步阅读 {#further-reading}
- [使用 seccomp 限制容器的系统调用](/zh-cn/docs/tutorials/security/seccomp/)
- [Pod 安全标准](/zh-cn/docs/concepts/security/pod-security-standards/)

View File

@ -224,6 +224,275 @@ virtual resource type would be used if that becomes necessary.
取决于对象是什么,每个子资源所支持的动词有所不同 - 参见 [API 文档](/zh-cn/docs/reference/kubernetes-api/)以了解更多信息。
跨多个资源来访问其子资源是不可能的 - 如果需要这一能力,则通常意味着需要一种新的虚拟资源类型了。
<!--
## HTTP media types {#alternate-representations-of-resources}
Over HTTP, Kubernetes supports JSON and Protobuf wire encodings.
-->
## HTTP 媒体类型 {#alternate-representations-of-resources}
通过 HTTPKubernetes 支持 JSON 和 Protobuf 网络编码格式。
{{% note %}}
<!--
Although YAML is widely used to define Kubernetes manifests locally, Kubernetes does not
support the [`application/yaml`](https://www.rfc-editor.org/rfc/rfc9512.html) media type
for API operations.
All JSON documents are valid YAML, so you can also use a JSON API response anywhere that is
expecting a YAML input.
-->
虽然 YAML 被广泛用于本地定义 Kubernetes 清单,但 Kubernetes 不支持
[`application/yaml`](https://www.rfc-editor.org/rfc/rfc9512.html) 媒体类型用于 API 操作。
所有的 JSON 文档都是有效的 YAML因此你也可以在所有期望输入 YAML 的地方使用 JSON API 响应。
{{% /note %}}
<!--
By default, Kubernetes returns objects in [JSON serialization](#json-encoding), using the
`application/json` media type. Although JSON is the default, clients may request the more
efficient binary [Protobuf representation](#protobuf-encoding) for better performance at scale.
The Kubernetes API implements standard HTTP content type negotiation: passing an
`Accept` header with a `GET` call will request that the server tries to return
a response in your preferred media type. If you want to send an object in Protobuf to
the server for a `PUT` or `POST` request, you must set the `Content-Type` request header
appropriately.
-->
默认情况下Kubernetes 使用 `application/json` 媒体类型以 [JSON 序列化](#json-encoding)返回对象。
虽然 JSON 是默认类型,但客户端可以请求更高效的二进制
[Protobuf 表示](#protobuf-encoding),以便在大规模环境中获得更好的性能。
Kubernetes API 实现了标准的 HTTP 内容类型协商:
使用 `GET` 调用传递 `Accept` 头时将请求服务器尝试以你首选的媒体类型返回响应。
如果你想通过 `PUT``POST` 请求将对象以 Protobuf 发送到服务器,则必须相应地设置 `Content-Type` 请求头。
<!--
If you request an available media type, the API server returns a response with a suitable
`Content-Type`; if none of the media types you request are supported, the API server returns
a `406 Not acceptable` error message.
All built-in resource types support the `application/json` media type.
-->
如果你请求了可用的媒体类型API 服务器会以合适的 `Content-Type` 返回响应;
如果你请求的媒体类型都不被支持API 服务器会返回 `406 Not acceptable` 错误消息。
所有内置资源类型都支持 `application/json` 媒体类型。
<!--
### JSON resource encoding {#json-encoding}
The Kubernetes API defaults to using [JSON](https://www.json.org/json-en.html) for encoding
HTTP message bodies.
For example:
-->
### JSON 资源编码 {#json-encoding}
Kubernetes API 默认使用 [JSON](https://www.json.org/json-en.html) 来编码 HTTP 消息体。
例如:
<!--
1. List all of the pods on a cluster, without specifying a preferred format
-->
1. 在不指定首选格式的情况下,列举集群中的所有 Pod
```
GET /api/v1/pods
---
200 OK
Content-Type: application/json
… JSON encoded collection of Pods (PodList object)
```
<!--
1. Create a pod by sending JSON to the server, requesting a JSON response.
-->
2. 通过向服务器发送 JSON 并请求 JSON 响应来创建 Pod。
```
POST /api/v1/namespaces/test/pods
Content-Type: application/json
Accept: application/json
… JSON encoded Pod object
---
200 OK
Content-Type: application/json
{
"kind": "Pod",
"apiVersion": "v1",
}
```
<!--
### Kubernetes Protobuf encoding {#protobuf-encoding}
Kubernetes uses an envelope wrapper to encode Protobuf responses. That wrapper starts
with a 4 byte magic number to help identify content in disk or in etcd as Protobuf
(as opposed to JSON), and then is followed by a Protobuf encoded wrapper message, which
describes the encoding and type of the underlying object and then contains the object.
-->
### Kubernetes Protobuf 编码 {#protobuf-encoding}
Kubernetes 使用封套形式来对 Protobuf 响应进行编码。
封套外层由 4 个字节的特殊数字开头,便于从磁盘文件或 etcd 中辩识 Protobuf
格式的(而不是 JSON数据。
接下来存放的是 Protobuf 编码的封套消息,其中描述下层对象的编码和类型,最后才是对象本身。
<!--
For example:
-->
例如:
<!--
1. List all of the pods on a cluster in Protobuf format.
-->
1. 以 Protobuf 格式列举集群中的所有 Pod。
```
GET /api/v1/pods
Accept: application/vnd.kubernetes.protobuf
---
200 OK
Content-Type: application/vnd.kubernetes.protobuf
… JSON encoded collection of Pods (PodList object)
```
<!--
1. Create a pod by sending Protobuf encoded data to the server, but request a response
in JSON.
-->
2. 通过向服务器发送 Protobuf 编码的数据创建 Pod但请求以 JSON 形式接收响应:
```
POST /api/v1/namespaces/test/pods
Content-Type: application/vnd.kubernetes.protobuf
Accept: application/json
… binary encoded Pod object
---
200 OK
Content-Type: application/json
{
"kind": "Pod",
"apiVersion": "v1",
...
}
```
<!--
You can use both techniques together and use Kubernetes' Protobuf encoding to interact with any API that
supports it, for both reads and writes. Only some API resource types are [compatible](#protobuf-encoding-compatibility)
with Protobuf.
-->
你可以将这两种技术结合使用,利用 Kubernetes 的 Protobuf 编码与所有支持的 API 进行读写交互。
只有某些 API 资源类型与 Protobuf [兼容](#protobuf-encoding-compatibility)。
<a id="protobuf-encoding-idl" />
<!--
The wrapper format is:
-->
封套格式如下:
<!--
```
A four byte magic number prefix:
Bytes 0-3: "k8s\x00" [0x6b, 0x38, 0x73, 0x00]
An encoded Protobuf message with the following IDL:
message Unknown {
// typeMeta should have the string values for "kind" and "apiVersion" as set on the JSON object
optional TypeMeta typeMeta = 1;
// raw will hold the complete serialized object in protobuf. See the protobuf definitions in the client libraries for a given kind.
optional bytes raw = 2;
// contentEncoding is encoding used for the raw data. Unspecified means no encoding.
optional string contentEncoding = 3;
// contentType is the serialization method used to serialize 'raw'. Unspecified means application/vnd.kubernetes.protobuf and is usually
// omitted.
optional string contentType = 4;
}
message TypeMeta {
// apiVersion is the group/version for this type
optional string apiVersion = 1;
// kind is the name of the object schema. A protobuf definition should exist for this object.
optional string kind = 2;
}
```
-->
```
四个字节的特殊数字前缀:
字节 0-3: "k8s\x00" [0x6b, 0x38, 0x73, 0x00]
使用下面 IDL 来编码的 Protobuf 消息:
message Unknown {
// typeMeta 应该包含 "kind" 和 "apiVersion" 的字符串值,就像
// 对应的 JSON 对象中所设置的那样
optional TypeMeta typeMeta = 1;
// raw 中将保存用 protobuf 序列化的完整对象。
// 参阅客户端库中为指定 kind 所作的 protobuf 定义
optional bytes raw = 2;
// contentEncoding 用于 raw 数据的编码格式。未设置此值意味着没有特殊编码。
optional string contentEncoding = 3;
// contentType 包含 raw 数据所采用的序列化方法。
// 未设置此值意味着 application/vnd.kubernetes.protobuf且通常被忽略
optional string contentType = 4;
}
message TypeMeta {
// apiVersion 是 type 对应的组名/版本
optional string apiVersion = 1;
// kind 是对象模式定义的名称。此对象应该存在一个 protobuf 定义。
optional string kind = 2;
}
```
{{< note >}}
<!--
Clients that receive a response in `application/vnd.kubernetes.protobuf` that does
not match the expected prefix should reject the response, as future versions may need
to alter the serialization format in an incompatible way and will do so by changing
the prefix.
-->
收到 `application/vnd.kubernetes.protobuf` 格式响应的客户端在响应与预期的前缀不匹配时应该拒绝响应,
因为将来的版本可能需要以某种不兼容的方式更改序列化格式,并且这种更改是通过变更前缀完成的。
{{< /note >}}
<!--
#### Compatibility with Kubernetes Protobuf {#protobuf-encoding-compatibility}
Not all API resource types support Kubernetes' Protobuf encoding; specifically, Protobuf isn't
available for resources that are defined as
{{< glossary_tooltip term_id="CustomResourceDefinition" text="CustomResourceDefinitions" >}}
or are served via the
{{< glossary_tooltip text="aggregation layer" term_id="aggregation-layer" >}}.
As a client, if you might need to work with extension types you should specify multiple
content types in the request `Accept` header to support fallback to JSON.
For example:
-->
#### 与 Kubernetes Protobuf 的兼容性 {#protobuf-encoding-compatibility}
并非所有 API 资源类型都支持 Protobuf具体来说Protobuf
不适用于定义为 {{< glossary_tooltip term_id="CustomResourceDefinition" text="CustomResourceDefinitions" >}}
或通过{{< glossary_tooltip text="聚合层" term_id="aggregation-layer" >}}提供服务的资源。
作为客户端,如果你可能需要使用扩展类型,则应在请求 `Accept` 请求头中指定多种内容类型以支持回退到 JSON。例如
```
Accept: application/vnd.kubernetes.protobuf, application/json
```
<!--
## Efficient detection of changes
@ -925,195 +1194,6 @@ extensions, you should make requests that specify multiple content types in the
Accept: application/json;as=Table;g=meta.k8s.io;v=v1, application/json
```
<!--
## Alternate representations of resources
By default, Kubernetes returns objects serialized to JSON with content type
`application/json`. This is the default serialization format for the API. However,
clients may request the more efficient
[Protobuf representation](#protobuf-encoding) of these objects for better performance at scale.
The Kubernetes API implements standard HTTP content type negotiation: passing an
`Accept` header with a `GET` call will request that the server tries to return
a response in your preferred media type, while sending an object in Protobuf to
the server for a `PUT` or `POST` call means that you must set the `Content-Type`
header appropriately.
-->
## 资源的其他表示形式 {#alternate-representations-of-resources}
默认情况下Kubernetes 返回序列化为 JSON 的对象,内容类型为 `application/json`
这是 API 的默认序列化格式。
但是,客户端可能会使用更有效的 [Protobuf 表示](#protobuf-encoding) 请求这些对象,
以获得更好的大规模性能。Kubernetes API 实现标准的 HTTP 内容类型协商:
带有 `Accept` 请求头部的 `GET` 调用会请求服务器尝试以你的首选媒体类型返回响应,
而将 Protobuf 中的对象发送到服务器以进行 `PUT``POST` 调用意味着你必须适当地设置
`Content-Type` 请求头。
<!--
The server will return a response with a `Content-Type` header if the requested
format is supported, or the `406 Not acceptable` error if none of the media types you
requested are supported. All built-in resource types support the `application/json`
media type.
See the Kubernetes [API reference](/docs/reference/kubernetes-api/) for a list of
supported content types for each API.
For example:
-->
如果支持请求的格式,服务器将返回带有 `Content-Type` 标头的响应,
如果不支持你请求的媒体类型,则返回 `406 Not Acceptable` 错误。
所有内置资源类型都支持 `application/json` 媒体类型。
有关每个 API 支持的内容类型列表,请参阅 Kubernetes [API 参考](/zh-cn/docs/reference/kubernetes-api/)。
例如:
<!--
1. List all of the pods on a cluster in Protobuf format.
-->
1. 以 Protobuf 格式列举集群上的所有 Pod
```
GET /api/v1/pods
Accept: application/vnd.kubernetes.protobuf
---
200 OK
Content-Type: application/vnd.kubernetes.protobuf
... binary encoded PodList object
```
<!--
2. Create a pod by sending Protobuf encoded data to the server, but request a response in JSON.
-->
2. 通过向服务器发送 Protobuf 编码的数据创建 Pod但请求以 JSON 形式接收响应:
```
POST /api/v1/namespaces/test/pods
Content-Type: application/vnd.kubernetes.protobuf
Accept: application/json
... binary encoded Pod object
---
200 OK
Content-Type: application/json
{
"kind": "Pod",
"apiVersion": "v1",
...
}
```
<!--
Not all API resource types support Protobuf; specifically, Protobuf isn't available for
resources that are defined as
{{< glossary_tooltip term_id="CustomResourceDefinition" text="CustomResourceDefinitions" >}}
or are served via the
{{< glossary_tooltip text="aggregation layer" term_id="aggregation-layer" >}}.
As a client, if you might need to work with extension types you should specify multiple
content types in the request `Accept` header to support fallback to JSON.
For example:
-->
并非所有 API 资源类型都支持 Protobuf具体来说
Protobuf 不适用于定义为 {{< glossary_tooltip term_id="CustomResourceDefinition" text="CustomResourceDefinitions" >}}
或通过{{< glossary_tooltip text="聚合层" term_id="aggregation-layer" >}}提供服务的资源。
作为客户端,如果你可能需要使用扩展类型,则应在请求 `Accept` 请求头中指定多种内容类型以支持回退到 JSON。
例如:
```
Accept: application/vnd.kubernetes.protobuf, application/json
```
<!--
### Kubernetes Protobuf encoding {#protobuf-encoding}
Kubernetes uses an envelope wrapper to encode Protobuf responses. That wrapper starts
with a 4 byte magic number to help identify content in disk or in etcd as Protobuf
(as opposed to JSON), and then is followed by a Protobuf encoded wrapper message, which
describes the encoding and type of the underlying object and then contains the object.
The wrapper format is:
-->
### Kubernetes Protobuf 编码 {#protobuf-encoding}
Kubernetes 使用封套形式来对 Protobuf 响应进行编码。
封套外层由 4 个字节的特殊数字开头,便于从磁盘文件或 etcd 中辩识 Protobuf
格式的(而不是 JSON数据。
接下来存放的是 Protobuf 编码的封套消息,其中描述下层对象的编码和类型,最后
才是对象本身。
封套格式如下:
<!--
```
A four byte magic number prefix:
Bytes 0-3: "k8s\x00" [0x6b, 0x38, 0x73, 0x00]
An encoded Protobuf message with the following IDL:
message Unknown {
// typeMeta should have the string values for "kind" and "apiVersion" as set on the JSON object
optional TypeMeta typeMeta = 1;
// raw will hold the complete serialized object in protobuf. See the protobuf definitions in the client libraries for a given kind.
optional bytes raw = 2;
// contentEncoding is encoding used for the raw data. Unspecified means no encoding.
optional string contentEncoding = 3;
// contentType is the serialization method used to serialize 'raw'. Unspecified means application/vnd.kubernetes.protobuf and is usually
// omitted.
optional string contentType = 4;
}
message TypeMeta {
// apiVersion is the group/version for this type
optional string apiVersion = 1;
// kind is the name of the object schema. A protobuf definition should exist for this object.
optional string kind = 2;
}
```
-->
```
四个字节的特殊数字前缀:
字节 0-3: "k8s\x00" [0x6b, 0x38, 0x73, 0x00]
使用下面 IDL 来编码的 Protobuf 消息:
message Unknown {
// typeMeta 应该包含 "kind" 和 "apiVersion" 的字符串值,就像
// 对应的 JSON 对象中所设置的那样
optional TypeMeta typeMeta = 1;
// raw 中将保存用 protobuf 序列化的完整对象。
// 参阅客户端库中为指定 kind 所作的 protobuf 定义
optional bytes raw = 2;
// contentEncoding 用于 raw 数据的编码格式。未设置此值意味着没有特殊编码。
optional string contentEncoding = 3;
// contentType 包含 raw 数据所采用的序列化方法。
// 未设置此值意味着 application/vnd.kubernetes.protobuf且通常被忽略
optional string contentType = 4;
}
message TypeMeta {
// apiVersion 是 type 对应的组名/版本
optional string apiVersion = 1;
// kind 是对象模式定义的名称。此对象应该存在一个 protobuf 定义。
optional string kind = 2;
}
```
{{< note >}}
<!--
Clients that receive a response in `application/vnd.kubernetes.protobuf` that does
not match the expected prefix should reject the response, as future versions may need
to alter the serialization format in an incompatible way and will do so by changing
the prefix.
-->
收到 `application/vnd.kubernetes.protobuf` 格式响应的客户端在响应与预期的前缀不匹配时应该拒绝响应,
因为将来的版本可能需要以某种不兼容的方式更改序列化格式,
并且这种更改是通过变更前缀完成的。
{{< /note >}}
<!--
## Resource deletion

View File

@ -67,7 +67,7 @@ Process namespace sharing is enabled using the `shareProcessNamespace` field of
2. 获取容器 `shell`,执行 `ps`
```shell
kubectl attach -it nginx -c shell
kubectl exec -it nginx -c shell -- /bin/sh
```
<!--

View File

@ -982,7 +982,7 @@ the Deployment and / or StatefulSet be removed from their
a change to that object is applied, for example via `kubectl apply -f
deployment.yaml`, this will instruct Kubernetes to scale the current number of Pods
to the value of the `spec.replicas` key. This may not be
desired and could be troublesome when an HPA is active.
desired and could be troublesome when an HPA is active, resulting in thrashing or flapping behavior.
-->
### 将 Deployment 和 StatefulSet 迁移到水平自动扩缩 {#migrating-deployments-and-statefulsets-to-horizontal-autoscaling}
@ -991,7 +991,7 @@ desired and could be troublesome when an HPA is active.
Deployment 和/或 StatefulSet 的 `spec.replicas` 的值。
如果不这样做,则只要应用对该对象的更改,例如通过 `kubectl apply -f deployment.yaml`
这将指示 Kubernetes 将当前 Pod 数量扩缩到 `spec.replicas` 键的值。这可能不是所希望的,
并且当 HPA 处于活动状态时可能会很麻烦。
并且当 HPA 处于活动状态时,可能会导致波动或反复变化的行为,进而带来麻烦。
<!--
Keep in mind that the removal of `spec.replicas` may incur a one-time

View File

@ -54,6 +54,7 @@ MySQL 设置都使用的是不安全的默认值,这是因为我们想把重
- Some familiarity with MySQL helps, but this tutorial aims to present
general patterns that should be useful for other systems.
- You are using the default namespace or another namespace that does not contain any conflicting objects.
- You need to have a AMD64-compatible CPU.
-->
- 本教程假定你熟悉
[PersistentVolumes](/zh-cn/docs/concepts/storage/persistent-volumes/)
@ -63,6 +64,7 @@ MySQL 设置都使用的是不安全的默认值,这是因为我们想把重
[ConfigMap](/zh-cn/docs/tasks/configure-pod-container/configure-pod-configmap/)。
- 熟悉 MySQL 会有所帮助,但是本教程旨在介绍对其他系统应该有用的常规模式。
- 你正在使用默认命名空间或不包含任何冲突对象的另一个命名空间。
- 你需要拥有一块兼容 AMD64 架构的 CPU。
## {{% heading "objectives" %}}

View File

@ -0,0 +1,27 @@
apiVersion: v1
kind: Pod
metadata:
name: pod
spec:
securityContext:
seccompProfile:
type: Unconfined
ephemeralContainers:
- name: ephemeral-container
image: debian
securityContext:
seccompProfile:
type: RuntimeDefault
initContainers:
- name: init-container
image: debian
securityContext:
seccompProfile:
type: RuntimeDefault
containers:
- name: container
image: docker.io/library/debian:stable
securityContext:
seccompProfile:
type: Localhost
localhostProfile: my-profile.json

View File

@ -33,7 +33,7 @@
}
.gridPage p:not(.announcement-main > p) {
color: rgb(26,26,26);
color: rgb(255,255,255);
margin-left: 0 !important;
padding-left: 0 !important;
font-weight: 300 !important;