Merge pull request #49969 from michellengnx/merged-main-dev-1.33

Merge main branch into dev-1.33
pull/49994/head
Kubernetes Prow Robot 2025-03-02 04:24:55 -08:00 committed by GitHub
commit ca2982863f
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
104 changed files with 2413 additions and 758 deletions

View File

@ -19,6 +19,342 @@
border-spacing: 6px;
}
div.feature-state-notice {
background-color: $feature;
border-radius: 0.75rem;
padding: 1rem;
margin-bottom: 1em;
font-size: 1.2em;
> .feature-state-name::before {
content: '';
color: $primary;
}
> .feature-state-name {
display: inline-block;
font-size: 0.95em;
font-weight: bold;
color: #000;
background-color: $feature;
}
code {
color: #000;
font-size: 1em;
font-family: inherit; // don't treat as actual code
background-color: $feature;
}
margin-right: 2em;
max-width: 80%;
}
.includecode .highlight {
margin-top: 0;
margin-bottom: 0;
}
// Customize color for warning callouts
.alert.alert-warning {
border-color: #d9534f;
hr {
border-top-color: #d9534f;
}
}
// Bootstrap doesn't have a “caution” color. Add this here.
.alert.alert-caution {
border-color: #f0ad4e;
hr {
border-top-color: #f0ad4e;
}
}
// Make note (aka info) callouts render the heading as if it
// is bold running text.
.alert.alert-info {
border-color: #428bca;
h4, h4.alert-heading {
color: #000;
display: block;
float: initial;
font-size: 1rem;
padding: 0;
padding-right: 0.5rem;
margin: 0;
line-height: 1.5; // match paragraph style
}
}
// Only danger headings need large text (per Docsy)
// For other callouts, use the size of the running text
.alert:not(.alert-danger) h4 {
font-size: 1em;
}
// All callout headings should be bold
.alert {
border-style: solid;
border-width: 0 0 0 4px;
border-radius: 0 0 2rem 0;
h4 {
font-weight: bold;
font-style: initial;
}
}
.glossary-tooltip {
display: inline-block;
border-bottom: 1px dotted black;
color: $black;
background: transparent;
text-decoration: none !important;
}
/* code samples */
.code-sample > .copy-code-icon {
cursor: pointer;
display: flex;
gap: 1rem;
justify-content: right;
padding: 0.2rem;
}
/* CSS for 'figure' full-screen display */
/* Define styles for full-screen overlay */
.figure-fullscreen-overlay {
position: fixed;
inset: 0;
z-index: 9999;
background-color: rgba(255, 255, 255, 0.95); /* White background with some transparency */
display: flex;
justify-content: center;
align-items: center;
padding: calc(5% + 20px);
box-sizing: border-box;
}
/* CSS class to scale the image when zoomed */
.figure-zoomed {
transform: scale(1.2);
}
/* Define styles for full-screen image */
.figure-fullscreen-img {
max-width: 100%;
max-height: 100%;
object-fit: contain; /* Maintain aspect ratio and fit within the container */
}
/* Define styles for close button */
.figure-close-button {
position: absolute;
top: 1%;
right: 2%;
cursor: pointer;
font-size: calc(min(5vw + 10px, 4rem));
color: $primary;
background-color: rgba(255, 255, 255, 0.25);
}
/* Sidebar menu */
#td-sidebar-menu {
#m-docs span, small {
visibility: hidden;
}
#m-docs small {
visibility: collapse; // if supported
}
}
/* Styles for CVE table */
table tr.cve-status-open, table tr.cve-status-unknown {
> td.cve-item-summary {
font-weight: bold;
}
}
/* Styles for general tables */
table td.value-not-applicable {
text-align: center;
}
// <details> shortcode
details > summary {
margin-bottom: 1em;
color: $primary;
background: transparent;
}
details:not([open]) > summary:after {
content: '';
display: inline-block;
}
// glossary
body.glossary {
main {
ul.glossary-terms > li {
list-style-type: none;
padding: 0.5em;
padding-bottom: calc(min(1em, 0.25em + 0.25vh ));
margin: 0;
margin-top: calc(min(1.5em, 0.2em + 1vh ));
}
ul.glossary-terms > li.hide {
display: none;
}
ul.glossary-terms > li:has(.term-anchor:target) {
border-left: 0.3em solid $primary;
background: rgba($secondary, 0.2);
}
#tag-container {
float: left;
max-width: calc(max(80%, 100em));
border-top: 1px solid $secondary;
border-bottom: 1px solid $secondary;
padding-top: 0.5em 0;
margin: 2em 0;
> p {
display: inline-block;
padding-top: 0.2em;
}
.hide {
display: none;
}
.tag-option {
border-radius: 0.33em;
padding: 0.75em;
margin: 1em;
margin-top: 0.2em;
float: left;
font-weight: bold;
}
.tag-description {
margin-left: auto;
margin-right: auto;
padding: 0.2em;
padding-bottom: 0.8em;
text-align: center;
}
.canonical-tag {
color: white;
background-color: $secondary;
}
.canonical-tag a {
color: inherit;
background: transparent;
text-decoration: none !important;
}
.active-tag {
color: $white;
background-color: $primary;
}
}
.term-anchor:target + .term-name > span {
color: $primary;
}
.term-anchor:target {
visibility: initial;
}
.glossary-term-name {
font-weight: bold;
display: inline-block;
padding-left: 0.25em;
padding-right: 0.25em;
}
.glossary-aka {
display: inline-block;
padding-left: 0.25em;
padding-right: 0.25em;
padding-bottom: 0.25em;
}
#glossary-details-before {
margin-top: 3em;
font-style: italic;
clear: both;
}
.preview-text {
display: inline-block;
margin-bottom: 0.2em;
}
.preview-text + * {
margin-top: 0.2em;
}
.term-definition {
margin-left: calc(min(2em, 0.5em + 0.75vw));
.hide {
display: none;
}
}
.glossary-aka {
font-style: italic;
}
.preview-text p {
display: inline;
}
.permalink {
display: inline-block;
width: 0.9em;
height: 0.9em;
padding-left: 0.1em;
&.hide {
visibility: hidden;
}
}
.term-anchor {
display: block;
position: relative;
top: -4rem; // adjust scrolling to target
visibility: hidden;
}
.invisible {
visibility: hidden;
}
}
}
/* Table content */
.tab-content table{
border-collapse: separate;
border-spacing: 6px;
}
.tab-pane {
border-radius: 0.25rem;
padding: 0 16px 16px;
@ -30,159 +366,6 @@
}
}
table tr.cve-status-open, table tr.cve-status-unknown {
> td.cve-item-summary {
font-weight: bold;
}
}
.launch-cards {
padding: 0;
display: grid;
grid-template-columns: repeat(3, 1fr);
row-gap: 1em;
.launch-card {
display: flex;
padding: 0 30px 0 0;
.card-content{
width: fit-content;
display: flex;
flex-direction: column;
margin: 0;
row-gap: 1em;
h2 {
font-size: 1.75em;
padding: 0.5em 0;
margin: 0;
a {
display: none;
}
}
p {
margin: 0;
}
ul {
list-style: none;
height: fit-content;
line-height: 1.6;
padding: 0;
margin-block-end: auto;
}
br {
display: none;
}
button {
height: min-content;
width: auto;
padding: .5em 1em;
cursor: pointer;
box-sizing: border-box;
}
}
}
@media only screen and (max-width: 1000px) {
grid-template-columns: 1fr;
.launch-card {
width: 100%;
}
}
}
// blockquotes and callouts
body {
.alert {
// Override Docsy styles
padding: 0.4rem 0.4rem 0.4rem 1rem;
border-top: 1px solid #eee;
border-bottom: 1px solid #eee;
border-right: 1px solid #eee;
border-radius: 0.25em;
border-left-width: 0.5em; // fallback in case calc() is missing
background: #fff;
color: #000;
margin-top: 0.5em;
margin-bottom: 0.5em;
}
// Set minimum width and radius for alert color
.alert {
border-left-width: calc(max(0.5em, 4px));
border-top-left-radius: calc(max(0.5em, 4px));
border-bottom-left-radius: calc(max(0.5em, 4px));
padding-top: 0.75rem;
}
.alert.alert-caution {
border-left-color: #f0ad4e;
}
.alert.alert-info {
border-left-color: #428bca;
h4, h4.alert-heading {
color: #000;
display: block;
float: initial;
font-size: 1rem;
padding: 0;
padding-right: 0.5rem;
margin: 0;
line-height: 1.5;
font-weight: bolder;
}
}
.alert.alert-caution {
border-left-color: #f0ad4e;
h4, h4.alert-heading {
font-size: 1em;
font-weight: bold;
}
}
.alert.alert-warning {
border-left-color: #d9534f;
}
.alert.third-party-content {
border-left-color: #444;
}
h1:first-of-type + .alert.callout {
margin-top: 1.5em;
}
div.feature-state-notice {
background-color: #daeaf9;
border-radius: 0.75rem;
padding: 1rem;
margin-bottom: 1em;
font-size: 1.2em;
> .feature-state-name::before {
content: '';
color: $primary;
}
> .feature-state-name {
display: inline-block;
font-size: 0.95em;
font-weight: bold;
color: #000;
background-color: #daeaf9;
}
code {
color: #000;
font-size: 1em;
background-color: #daeaf9;
}
margin-right: 2em;
max-width: 80%;
}
}
// Special color for third party content disclaimers
.alert.third-party-content { border-left-color: #222 };
@ -206,203 +389,76 @@ body {
background: #f8f9cb;
}
.deprecation-warning, .pageinfo.deprecation-warning {
padding: clamp(10px, 2vmin, 20px);
margin: clamp(10px, 1vh, 20px) 0;
background-color: #faf5b6;
color: #000;
}
#mainContent .launch-content {
.deprecation-warning.outdated-blog, .pageinfo.deprecation-warning.outdated-blog {
background-color: $primary;
color: $white;
}
.launch-cards {
padding: 0;
display: grid;
grid-template-columns: repeat(3, 1fr);
row-gap: 1em;
.launch-card {
display: flex;
padding: 0 30px 0 0;
.card-content{
width: fit-content;
display: flex;
flex-direction: column;
margin: 0;
row-gap: 1em;
h2 {
font-size: 1.75em;
padding: 0.5em 0;
margin: 0;
a {
display: none;
}
}
body.td-home .deprecation-warning, body.td-blog .deprecation-warning, body.td-documentation .deprecation-warning {
border-radius: 3px;
}
p {
margin: 0;
}
.deprecation-warning p:only-child {
margin-bottom: 0;
}
ul {
list-style: none;
height: fit-content;
line-height: 1.6;
padding: 0;
margin-block-end: auto;
}
.td-documentation .td-content > .highlight {
max-width: initial;
width: 100%;
}
body.td-home #deprecation-warning {
max-width: 1000px;
margin-top: 2.5rem;
margin-left: auto;
margin-right: auto;
}
// <details> shortcode
details > summary {
margin-bottom: 1em;
color: $primary;
background: transparent;
}
details:not([open]) > summary:after {
content: '';
display: inline-block;
}
// glossary
body.glossary {
main {
ul.glossary-terms > li {
list-style-type: none;
padding: 0.5em;
padding-bottom: calc(min(0.5em, 0.25em + 0.15vh ));
margin: 0;
margin-top: calc(min(1.0em, 0.25em + 0.15vh ));
}
ul.glossary-terms > li.hide {
display: none;
}
ul.glossary-terms > li:has(.term-anchor:target) {
border-left: 0.3em solid $primary;
background: rgba(#999999, 0.2);
}
#tag-container {
float: left;
max-width: calc(max(80%, 100em));
border-top: 1px solid #999999;
border-bottom: 1px solid #999999;
padding-top: 0.5em 0;
margin: 2em 0;
> p {
display: inline-block;
padding-top: 0.2em;
}
.hide {
display: none;
}
.tag-option {
border-radius: 0.33em;
padding: 0.5em;
padding-left: 0.6em;
padding-right: 0.75em;
margin: 0.75em;
margin-top: 0.1em;
float: left;
font-weight: bold;
font-size: 0.925em;
}
.tag-option:not(.canonical-tag):hover {
outline: 1.5px solid $primary;
}
.tag-description {
margin-left: auto;
margin-right: auto;
padding: 0.2em;
padding-bottom: 0.8em;
text-align: center;
}
.canonical-tag {
color: white;
background-color: #999999;
}
.canonical-tag a {
color: inherit;
background: transparent;
text-decoration: none !important;
}
.active-tag {
color: $white;
background-color: $primary;
}
// darken on hover
.canonical-tag:hover {
background: darken(#999999, 15%)
}
.canonical-tag.active-tag:hover {
background: darken($primary, 10%);
}
}
.term-anchor:target + .term-name > span {
color: $primary;
}
.term-anchor:target {
visibility: initial;
}
.glossary-term-name {
font-weight: bold;
display: inline-block;
padding-left: 0.25em;
padding-right: 0.25em;
}
.glossary-aka {
display: inline-block;
padding-left: 0.25em;
padding-right: 0.25em;
padding-bottom: 0.25em;
}
#glossary-details-before {
margin-top: 3em;
font-style: italic;
clear: both;
}
.preview-text {
display: inline-block;
margin-bottom: 0.2em;
}
.preview-text + * {
margin-top: 0.2em;
}
.term-definition {
margin-left: calc(min(2em, 0.5em + 0.75vw));
.hide {
br {
display: none;
}
}
button {
height: min-content;
width: auto;
padding: .5em 1em;
cursor: pointer;
box-sizing: border-box;
}
}
}
.glossary-aka {
font-style: italic;
}
.preview-text p {
display: inline;
}
.permalink {
display: inline-block;
background-image: url(../images/link.png);
background-repeat: no-repeat;
background-size: contain;
width: 1em;
height: 1em;
padding-left: 0.1em;
}
.term-name:hover {
color: $primary;
}
.term-name:not(:hover) > .permalink {
visibility: hidden;
}
.term-anchor {
display: block;
position: relative;
top: -4rem; // adjust scrolling to target
visibility: hidden;
}
.invisible {
visibility: hidden;
@media only screen and (max-width: 1000px) {
grid-template-columns: 1fr;
.launch-card {
width: 100%;
}
}
}
}
/* SCSS Related to the list of metris in Kubernetes */
/* SCSS related to the list of metrics in Kubernetes */
main {
div.metric:nth-of-type(odd) { // Look & Feel , Aesthetics
// Look & Feel , Aesthetics
div.metric:nth-of-type(odd) {
background-color: $light-grey;
}
div.metrics {
.metric {
div:empty{
display: none;
@ -415,12 +471,13 @@ main {
padding:.75em .75em .75em .75em;
.metric_name{
font-family: $font-family-monospace;
font-size: large;
font-weight: bold;
word-break: break-word;
}
label{
label {
font-weight: bold;
margin-right: .5em;
}
@ -435,20 +492,16 @@ main {
li.metric_labels_varying{
span{
display: inline-block;
background-color: rgb(240, 239, 239);
background-color: $metric-labels-varying;
padding: 0 0.5em;
margin-right: .35em;
font-family: monospace;
border: 1px solid rgb(230 , 230 , 230);
border: 1px solid $metric-labels-varying-border;
border-radius: 5%;
margin-bottom: .35em;
}
}
}
}
}
}

View File

@ -9,6 +9,13 @@ $medium-grey: #4c4c4c;
$dark-grey: #303030;
$white: #ffffff;
// feature gate colors
$feature: #daeaf9;
$feature-inset: #eeeeee;
$metric-labels-varying: #efefef;
$metric-labels-varying-border: #e2e2e2;
// tooltip
$tooltip-bg: #555;
$tooltip-arrow-color: $tooltip-bg !default;

View File

@ -79,7 +79,8 @@ IaaS প্রদায়ক | লিঙ্ক |
আইবিএম ক্লাউড | https://www.ibm.com/cloud/security |
মাইক্রোসফট আজওর | https://docs.microsoft.com/en-us/azure/security/azure-security |
অরাকেল ক্লাউড ইন্ফ্রাস্ট্রাকচার | https://www.oracle.com/security |
VMware vSphere | https://www.vmware.com/security/hardening-guides |
টেনসেন্ট ক্লাউড | https://www.tencentcloud.com/solutions/data-security-and-information-protection |
ভিএমওয়্যার ভিস্ফিয়ার | https://www.vmware.com/solutions/security/hardening-guides |
{{< /table >}}

View File

@ -0,0 +1,214 @@
---
layout: blog
title: "Spotlight on SIG etcd"
slug: sig-etcd-spotlight
canonicalUrl: https://www.kubernetes.dev/blog/2025/02/19/sig-etcd-spotlight
date: 2025-03-04
author: "Frederico Muñoz (SAS Institute)"
---
In this SIG etcd spotlight we talked with [James Blair](https://github.com/jmhbnz), [Marek
Siarkowicz](https://github.com/serathius), [Wenjia Zhang](https://github.com/wenjiaswe), and
[Benjamin Wang](https://github.com/ahrtr) to learn a bit more about this Kubernetes Special Interest
Group.
## Introducing SIG etcd
**Frederico: Hello, thank you for the time! Lets start with some introductions, could you tell us a
bit about yourself, your role and how you got involved in Kubernetes.**
**Benjamin:** Hello, I am Benjamin. I am a SIG etcd Tech Lead and one of the etcd maintainers. I
work for VMware, which is part of the Broadcom group. I got involved in Kubernetes & etcd & CSI
([Container Storage Interface](https://github.com/container-storage-interface/spec/blob/master/spec.md))
because of work and also a big passion for open source. I have been working on Kubernetes & etcd
(and also CSI) since 2020.
**James:** Hey team, Im James, a co-chair for SIG etcd and etcd maintainer. I work at Red Hat as a
Specialist Architect helping people adopt cloud native technology. I got involved with the
Kubernetes ecosystem in 2019. Around the end of 2022 I noticed how the etcd community and project
needed help so started contributing as often as I could. There is a saying in our community that
"you come for the technology, and stay for the people": for me this is absolutely real, its been a
wonderful journey so far and Im excited to support our community moving forward.
**Marek:** Hey everyone, I'm Marek, the SIG etcd lead. At Google, I lead the GKE etcd team, ensuring
a stable and reliable experience for all GKE users. My Kubernetes journey began with [SIG
Instrumentation](https://github.com/kubernetes/community/tree/master/sig-instrumentation), where I
created and led the [Kubernetes Structured Logging effort](https://kubernetes.io/blog/2020/09/04/kubernetes-1-19-introducing-structured-logs/).
I'm still the main project lead for [Kubernetes Metrics Server](https://kubernetes-sigs.github.io/metrics-server/),
providing crucial signals for autoscaling in Kubernetes. I started working on etcd 3 years ago,
right around the 3.5 release. We faced some challenges, but I'm thrilled to see etcd now the most
scalable and reliable it's ever been, with the highest contribution numbers in the project's
history. I'm passionate about distributed systems, extreme programming, and testing.
**Wenjia:** Hi there, my name is Wenjia, I am the co-chair of SIG etcd and one of the etcd
maintainers. I work at Google as an Engineering Manager, working on GKE (Google Kubernetes Engine)
and GDC (Google Distributed Cloud). I have been working in the area of open source Kubernetes and
etcd since the Kubernetes v1.10 and etcd v3.1 releases. I got involved in Kubernetes because of my
job, but what keeps me in the space is the charm of the container orchestration technology, and more
importantly, the awesome open source community.
## Becoming a Kubernetes Special Interest Group (SIG)
**Frederico: Excellent, thank you. I'd like to start with the origin of the SIG itself: SIG etcd is
a very recent SIG, could you quickly go through the history and reasons behind its creation?**
**Marek**: Absolutely! SIG etcd was formed because etcd is a critical component of Kubernetes,
serving as its data store. However, etcd was facing challenges like maintainer turnover and
reliability issues. [Creating a dedicated SIG](https://etcd.io/blog/2023/introducing-sig-etcd/)
allowed us to focus on addressing these problems, improving development and maintenance processes,
and ensuring etcd evolves in sync with the cloud-native landscape.
**Frederico: And has becoming a SIG worked out as expected? Better yet, are the motivations you just
described being addressed, and to what extent?**
**Marek**: It's been a positive change overall. Becoming a SIG has brought more structure and
transparency to etcd's development. We've adopted Kubernetes processes like KEPs
([Kubernetes Enhancement Proposals](https://github.com/kubernetes/enhancements/blob/master/keps/README.md)
and PRRs ([Production Readiness Reviews](https://github.com/kubernetes/community/blob/master/sig-architecture/production-readiness.md),
which has improved our feature development and release cycle.
**Frederico: On top of those, what would you single out as the major benefit that has resulted from
becoming a SIG?**
**Marek**: The biggest benefits for me was adopting Kubernetes testing infrastructure, tools like
[Prow](https://docs.prow.k8s.io/) and [TestGrid](https://testgrid.k8s.io/). For large projects like
etcd there is just no comparison to the default GitHub tooling. Having known, easy to use, clear
tools is a major boost to the etcd as it makes it much easier for Kubernetes contributors to also
help etcd.
**Wenjia**: Totally agree, while challenges remain, the SIG structure provides a solid foundation
for addressing them and ensuring etcd's continued success as a critical component of the Kubernetes
ecosystem.
The positive impact on the community is another crucial aspect of SIG etcd's success that Id like
to highlight. The Kubernetes SIG structure has created a welcoming environment for etcd
contributors, leading to increased participation from the broader Kubernetes community. We have had
greater collaboration with other SIGs like [SIG API
Machinery](https://github.com/kubernetes/community/blob/master/sig-api-machinery/README.md),
[SIG Scalability](https://github.com/kubernetes/community/tree/master/sig-scalability),
[SIG Testing](https://github.com/kubernetes/community/tree/master/sig-scalability),
[SIG Cluster Lifecycle](https://github.com/kubernetes/community/tree/master/sig-cluster-lifecycle), etc.
This collaboration helps ensure etcd's development aligns with the needs of the wider Kubernetes
ecosystem. The formation of the [etcd Operator Working Group](https://github.com/kubernetes/community/blob/master/wg-etcd-operator/README.md)
under the joint effort between SIG etcd and SIG Cluster Lifecycle exemplifies this successful
collaboration, demonstrating a shared commitment to improving etcd's operational aspects within
Kubernetes.
**Frederico: Since you mentioned collaboration, have you seen changes in terms of contributors and
community involvement in recent months?**
**James**: Yes -- as showing in our
[unique PR author data](https://etcd.devstats.cncf.io/d/23/prs-authors-repository-groups?orgId=1&var-period=m&var-repogroup_name=All&from=1422748800000&to=1738454399000)
we recently hit an all time high in March and are trending in a positive direction:
{{< figure src="stats.png" alt="Unique PR author data stats" >}}
Additionally, looking at our
[overall contributions across all etcd project repositories](https://etcd.devstats.cncf.io/d/74/contributions-chart?orgId=1&from=1422748800000&to=1738454399000&var-period=m&var-metric=contributions&var-repogroup_name=All&var-country_name=All&var-company_name=All&var-company=all)
we are also observing a positive trend showing a resurgence in etcd project activity:
{{< figure src="stats2.png" alt="Overall contributions stats" >}}
## The road ahead
**Frederico: That's quite telling, thank you. In terms of the near future, what are the current
priorities for SIG etcd?**
**Marek**: Reliability is always top of mind - we need to make sure etcd is rock-solid. We're also
working on making etcd easier to use and manage for operators. And we have our sights set on making
etcd a viable standalone solution for infrastructure management, not just for Kubernetes. Oh, and of
course, scaling - we need to ensure etcd can handle the growing demands of the cloud-native world.
**Benjamin**: I agree that reliability should always be our top guiding principle. We need to ensure
not only correctness but also compatibility. Additionally, we should continuously strive to improve
the understandability and maintainability of etcd. Our focus should be on addressing the pain points
that the community cares about the most.
**Frederico: Are there any specific SIGs that you work closely with?**
**Marek**: SIG API Machinery, for sure they own the structure of the data etcd stores, so we're
constantly working together. And SIG Cluster Lifecycle etcd is a key part of Kubernetes clusters,
so we collaborate on the newly created etcd operator Working group.
**Wenjia**: Other than SIG API Machinery and SIG Cluster Lifecycle that Marek mentioned above, SIG
Scalability and SIG Testing is another group that we work closely with.
**Frederico: In a more general sense, how would you list the key challenges for SIG etcd in the
evolving cloud native landscape?**
**Marek**: Well, reliability is always a challenge when you're dealing with critical data. The
cloud-native world is evolving so fast that scaling to meet those demands is a constant effort.
## Getting involved
**Frederico: We're almost at the end of our conversation, but for those interested in in etcd, how
can they get involved?**
**Marek**: We'd love to have them! The best way to start is to join our
[SIG etcd meetings](https://github.com/kubernetes/community/blob/master/sig-etcd/README.md#meetings),
follow discussions on the [etcd-dev mailing list](https://groups.google.com/g/etcd-dev), and check
out our [GitHub issues](https://github.com/etcd-io/etcd/issues). We're always looking for people to
review proposals, test code, and contribute to documentation.
**Wenjia**: I love this question 😀 . There are numerous ways for people interested in contributing
to SIG etcd to get involved and make a difference. Here are some key areas where you can help:
**Code Contributions**:
- _Bug Fixes_: Tackle existing issues in the etcd codebase. Start with issues labeled "good first
issue" or "help wanted" to find tasks that are suitable for newcomers.
- _Feature Development_: Contribute to the development of new features and enhancements. Check the
etcd roadmap and discussions to see what's being planned and where your skills might fit in.
- _Testing and Code Reviews_: Help ensure the quality of etcd by writing tests, reviewing code
changes, and providing feedback.
- _Documentation_: Improve [etcd's documentation](https://etcd.io/docs/) by adding new content,
clarifying existing information, or fixing errors. Clear and comprehensive documentation is
essential for users and contributors.
- _Community Support_: Answer questions on forums, mailing lists, or [Slack channels](https://kubernetes.slack.com/archives/C3HD8ARJ5).
Helping others understand and use etcd is a valuable contribution.
**Getting Started**:
- _Join the community_: Start by joining the etcd community on Slack,
attending SIG meetings, and following the mailing lists. This will
help you get familiar with the project, its processes, and the
people involved.
- _Find a mentor_: If you're new to open source or etcd, consider
finding a mentor who can guide you and provide support. Stay tuned!
Our first cohort of mentorship program was very successful. We will
have a new round of mentorship program coming up.
- _Start small_: Don't be afraid to start with small contributions. Even
fixing a typo in the documentation or submitting a simple bug fix
can be a great way to get involved.
By contributing to etcd, you'll not only be helping to improve a
critical piece of the cloud-native ecosystem but also gaining valuable
experience and skills. So, jump in and start contributing!
**Frederico: Excellent, thank you. Lastly, one piece of advice that
you'd like to give to other newly formed SIGs?**
**Marek**: Absolutely! My advice would be to embrace the established
processes of the larger community, prioritize collaboration with other
SIGs, and focus on building a strong community.
**Wenjia**: Here are some tips I myself found very helpful in my OSS
journey:
- _Be patient_: Open source development can take time. Don't get
discouraged if your contributions aren't accepted immediately or if
you encounter challenges.
- _Be respectful_: The etcd community values collaboration and
respect. Be mindful of others' opinions and work together to achieve
common goals.
- _Have fun_: Contributing to open source should be
enjoyable. Find areas that interest you and contribute in ways that
you find fulfilling.
**Frederico: A great way to end this spotlight, thank you all!**
---
For more information and resources, please take a look at :
1. etcd website: https://etcd.io/
2. etcd GitHub repository: https://github.com/etcd-io/etcd
3. etcd community: https://etcd.io/community/

Binary file not shown.

After

Width:  |  Height:  |  Size: 130 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 268 KiB

View File

@ -108,7 +108,7 @@ Message: Pod was terminated in response to imminent node shutdown.
To provide more flexibility during graceful node shutdown around the ordering
of pods during shutdown, graceful node shutdown honors the PriorityClass for
Pods, provided that you enabled this feature in your cluster. The feature
allows cluster administers to explicitly define the ordering of pods
allows cluster administrators to explicitly define the ordering of pods
during graceful node shutdown based on
[priority classes](/docs/concepts/scheduling-eviction/pod-priority-preemption/#priorityclass).

View File

@ -80,7 +80,7 @@ IBM Cloud | https://www.ibm.com/cloud/security |
Microsoft Azure | https://docs.microsoft.com/en-us/azure/security/azure-security |
Oracle Cloud Infrastructure | https://www.oracle.com/security |
Tencent Cloud | https://www.tencentcloud.com/solutions/data-security-and-information-protection |
VMware vSphere | https://www.vmware.com/security/hardening-guides |
VMware vSphere | https://www.vmware.com/solutions/security/hardening-guides |
{{< /table >}}

View File

@ -535,7 +535,7 @@ built-in [Pod Security Admission Controller](/docs/concepts/security/pod-securit
### What about sandboxed Pods?
There is not currently an API standard that controls whether a Pod is considered sandboxed or
There is currently no API standard that controls whether a Pod is considered sandboxed or
not. Sandbox Pods may be identified by the use of a sandboxed runtime (such as gVisor or Kata
Containers), but there is no standard definition of what a sandboxed runtime is.

View File

@ -111,7 +111,7 @@ in the cluster.
It is also possible to scale workloads based on events, for example using the
[_Kubernetes Event Driven Autoscaler_ (**KEDA**)](https://keda.sh/).
KEDA is a CNCF graduated enabling you to scale your workloads based on the number
KEDA is a CNCF-graduated project enabling you to scale your workloads based on the number
of events to be processed, for example the amount of messages in a queue. There exists
a wide range of adapters for different event sources to choose from.

View File

@ -170,11 +170,16 @@ existing `[languages]` block. The German block, for example, looks like:
```toml
[languages.de]
title = "Kubernetes"
description = "Produktionsreife Container-Verwaltung"
languageName = "Deutsch (German)"
languageNameLatinScript = "Deutsch"
weight = 5
contentDir = "content/de"
weight = 8
languagedirection = "ltr"
[languages.de.params]
time_format_blog = "02.01.2006"
language_alternatives = ["en"]
description = "Produktionsreife Container-Orchestrierung"
languageNameLatinScript = "Deutsch"
```
The language selection bar lists the value for `languageName`. Assign "language
@ -206,15 +211,11 @@ the repository. For example, the two-letter code for German is `de`:
mkdir content/de
```
You also need to create a directory inside `data/i18n` for
You also need to create a directory inside `i18n` for
[localized strings](#site-strings-in-i18n); look at existing localizations
for an example. To use these new strings, you must also create a symbolic link
from `i18n/<localization>.toml` to the actual string configuration in
`data/i18n/<localization>/<localization>.toml` (remember to commit the symbolic
link).
for an example.
For example, for German the strings live in `data/i18n/de/de.toml`, and
`i18n/de.toml` is a symbolic link to `data/i18n/de/de.toml`.
For example, for German the strings live in `i18n/de/de.toml`.
### Localize the community code of conduct
@ -253,6 +254,7 @@ approvers:
- sig-docs-es-owners
labels:
- area/localization
- language/es
```
@ -431,16 +433,16 @@ release: v{{< skew nextMinorVersion >}}.
### Site strings in i18n
Localizations must include the contents of
[`data/i18n/en/en.toml`](https://github.com/kubernetes/website/blob/main/data/i18n/en/en.toml)
[`i18n/en/en.toml`](https://github.com/kubernetes/website/blob/main/i18n/en/en.toml)
in a new language-specific file. Using German as an example:
`data/i18n/de/de.toml`.
`i18n/de/de.toml`.
Add a new localization directory and file to `data/i18n/`. For example, with
Add a new localization directory and file to `i18n/`. For example, with
German (`de`):
```bash
mkdir -p data/i18n/de
cp data/i18n/en/en.toml data/i18n/de/de.toml
mkdir -p i18n/de
cp i18n/en/en.toml i18n/de/de.toml
```
Revise the comments at the top of the file to suit your localization, then

View File

@ -168,13 +168,13 @@ When reviewing, use the following as a starting point.
- Early feedback on blog posts is welcome via a Google Doc or HackMD. Please request input early from the [#sig-docs-blog Slack channel](https://kubernetes.slack.com/archives/CJDHVD54J).
- Before reviewing blog PRs, be familiar with [Submitting blog posts and case studies](/docs/contribute/new-content/blogs-case-studies/).
- We are willing to mirror any blog article that was published to https://kubernetes.dev/blog/ (the contributor blog) provided that:
- the mirrored article has the same publication date as the original (it should have the same publication time too, but you can also set a time stamp up to 12 hours later for special cases)
- the mirrored article has the same publication date as the original (it should have the same publication time too, but you can also set a time stamp up to 12 hours later for special cases)
- for PRs that arrive the original article was merged to https://kubernetes.dev/, there haven't been
(and won't be) any articles published to the main blog between time that the original and mirrored article
[will] publish.
This is because we don't want to add articles to people's feeds, such as RSS, except at the very end of their feed.
- the original article doesn't contravene any strongly recommended review guidelines or community norms.
- You should set the canonical URL for the mirrored article, to the URL of the original article
- You should set the canonical URL for the mirrored article, to the URL of the original article
(you can use a preview to predict the URL and fill this in ahead of actual publication). Use the `canonicalUrl`
field in [front matter](https://gohugo.io/content-management/front-matter/) for this.
- Consider the target audience and whether the blog post is appropriate for kubernetes.io

View File

@ -58,17 +58,17 @@ You can declare a `prerequisites` heading as follows:
```
The `heading` shortcode expects one string parameter.
The heading string parameter matches the prefix of a variable in the `i18n/<lang>.toml` files.
The heading string parameter matches the prefix of a variable in the `i18n/<lang>/<lang>.toml` files.
For example:
`i18n/en.toml`:
`i18n/en/en.toml`:
```toml
[whatsnext_heading]
other = "What's next"
```
`i18n/ko.toml`:
`i18n/ko/ko.toml`:
```toml
[whatsnext_heading]

View File

@ -5,6 +5,8 @@ title: Kubernetes Component SLI Metrics
linkTitle: Service Level Indicator Metrics
content_type: reference
weight: 20
description: >-
High-level indicators for measuring the reliability and performance of Kubernetes components.
---
<!-- overview -->

View File

@ -4,6 +4,8 @@ content_type: reference
weight: 60
reviewers:
- dashpole
description: >-
Provides runtime diagnostics for Kubernetes components, offering insights into component runtime status and configuration flags.
---
@ -67,4 +69,4 @@ authorization-webhook-cache-authorized-ttl=5m0s
authorization-webhook-cache-unauthorized-ttl=30s
authorization-webhook-version=v1beta1
default-watch-cache-size=100
```
```

View File

@ -141,6 +141,13 @@ The types of plugins that can place socket files into that directory are:
(typically `/var/lib/kubelet/plugins_registry`).
### Graceful node shutdown
{{< feature-state feature_gate_name="GracefulNodeShutdown" >}}
[Graceful node shutdown](/docs/concepts/cluster-administration/node-shutdown/#graceful-node-shutdown)
stores state locally at `/var/lib/kubelet/graceful_node_shutdown_state`.
## Security profiles & configuration
### Seccomp

View File

@ -244,7 +244,8 @@ they can follow these steps to inspect the kubelet configuration:
"imagefs.available": "15%",
"memory.available": "100Mi",
"nodefs.available": "10%",
"nodefs.inodesFree": "5%"
"nodefs.inodesFree": "5%",
"imagefs.inodesFree": "5%"
},
"evictionPressureTransitionPeriod": "1m0s",
"enableControllerAttachDetach": true,

View File

@ -35,7 +35,6 @@ configuration.
These tasks will help you to migrate:
* [Check whether Dockershim removal affects you](/docs/tasks/administer-cluster/migrating-from-dockershim/check-if-dockershim-removal-affects-you/)
* [Migrate Docker Engine nodes from dockershim to cri-dockerd](/docs/tasks/administer-cluster/migrating-from-dockershim/migrate-dockershim-dockerd/)
* [Migrating telemetry and security agents from dockershim](/docs/tasks/administer-cluster/migrating-from-dockershim/migrating-telemetry-and-security-agents/)

View File

@ -1,123 +0,0 @@
---
title: "Migrate Docker Engine nodes from dockershim to cri-dockerd"
weight: 20
content_type: task
---
{{% thirdparty-content %}}
This page shows you how to migrate your Docker Engine nodes to use `cri-dockerd`
instead of dockershim. You should follow these steps in these scenarios:
* You want to switch away from dockershim and still use Docker Engine to run
containers in Kubernetes.
* You want to upgrade to Kubernetes v{{< skew currentVersion >}} and your
existing cluster relies on dockershim, in which case you must migrate
from dockershim and `cri-dockerd` is one of your options.
To learn more about the removal of dockershim, read the [FAQ page](/dockershim).
## What is cri-dockerd? {#what-is-cri-dockerd}
In Kubernetes 1.23 and earlier, you could use Docker Engine with Kubernetes,
relying on a built-in component of Kubernetes named _dockershim_.
The dockershim component was removed in the Kubernetes 1.24 release; however,
a third-party replacement, `cri-dockerd`, is available. The `cri-dockerd` adapter
lets you use Docker Engine through the {{<glossary_tooltip term_id="cri" text="Container Runtime Interface">}}.
{{<note>}}
If you already use `cri-dockerd`, you aren't affected by the dockershim removal.
Before you begin, [Check whether your nodes use the dockershim](/docs/tasks/administer-cluster/migrating-from-dockershim/find-out-runtime-you-use/).
{{</note>}}
If you want to migrate to `cri-dockerd` so that you can continue using Docker
Engine as your container runtime, you should do the following for each affected
node:
1. Install `cri-dockerd`.
1. Cordon and drain the node.
1. Configure the kubelet to use `cri-dockerd`.
1. Restart the kubelet.
1. Verify that the node is healthy.
Test the migration on non-critical nodes first.
You should perform the following steps for each node that you want to migrate
to `cri-dockerd`.
## {{% heading "prerequisites" %}}
* [`cri-dockerd`](https://mirantis.github.io/cri-dockerd/usage/install)
installed and started on each node.
* A [network plugin](/docs/concepts/extend-kubernetes/compute-storage-net/network-plugins/).
## Cordon and drain the node
1. Cordon the node to stop new Pods scheduling on it:
```shell
kubectl cordon <NODE_NAME>
```
Replace `<NODE_NAME>` with the name of the node.
1. Drain the node to safely evict running Pods:
```shell
kubectl drain <NODE_NAME> \
--ignore-daemonsets
```
## Configure the kubelet to use cri-dockerd
The following steps apply to clusters set up using the kubeadm tool. If you use
a different tool, you should modify the kubelet using the configuration
instructions for that tool.
1. Open `/var/lib/kubelet/kubeadm-flags.env` on each affected node.
1. Modify the `--container-runtime-endpoint` flag to
`unix:///var/run/cri-dockerd.sock`.
1. Modify the `--container-runtime` flag to `remote`
(unavailable in Kubernetes v1.27 and later).
The kubeadm tool stores the node's socket as an annotation on the `Node` object
in the control plane. To modify this socket for each affected node:
1. Edit the YAML representation of the `Node` object:
```shell
KUBECONFIG=/path/to/admin.conf kubectl edit no <NODE_NAME>
```
Replace the following:
* `/path/to/admin.conf`: the path to the kubectl configuration file,
`admin.conf`.
* `<NODE_NAME>`: the name of the node you want to modify.
1. Change `kubeadm.alpha.kubernetes.io/cri-socket` from
`/var/run/dockershim.sock` to `unix:///var/run/cri-dockerd.sock`.
1. Save the changes. The `Node` object is updated on save.
## Restart the kubelet
```shell
systemctl restart kubelet
```
## Verify that the node is healthy
To check whether the node uses the `cri-dockerd` endpoint, follow the
instructions in [Find out which runtime you use](/docs/tasks/administer-cluster/migrating-from-dockershim/find-out-runtime-you-use/).
The `--container-runtime-endpoint` flag for the kubelet should be `unix:///var/run/cri-dockerd.sock`.
## Uncordon the node
Uncordon the node to let Pods schedule on it:
```shell
kubectl uncordon <NODE_NAME>
```
## {{% heading "whatsnext" %}}
* Read the [dockershim removal FAQ](/dockershim/).
* [Learn how to migrate from Docker Engine with dockershim to containerd](/docs/tasks/administer-cluster/migrating-from-dockershim/change-runtime-containerd/).

View File

@ -17,7 +17,8 @@ Kubernetes.
Service issues exist for pod CNI network setup and tear down in containerd
v1.6.0-v1.6.3 when the CNI plugins have not been upgraded and/or the CNI config
version is not declared in the CNI config files. The containerd team reports, "these issues are resolved in containerd v1.6.4."
version is not declared in the CNI config files. The containerd team reports,
"these issues are resolved in containerd v1.6.4."
With containerd v1.6.0-v1.6.3, if you do not upgrade the CNI plugins and/or
declare the CNI config version, you might encounter the following "Incompatible
@ -59,23 +60,23 @@ your CNI plugins and editing the CNI config files.
Here's an overview of the typical steps for each node:
1. [Safely drain and cordon the
node](/docs/tasks/administer-cluster/safely-drain-node/).
2. After stopping your container runtime and kubelet services, perform the
following upgrade operations:
- If you're running CNI plugins, upgrade them to the latest version.
- If you're using non-CNI plugins, replace them with CNI plugins. Use the
latest version of the plugins.
- Update the plugin configuration file to specify or match a version of the
CNI specification that the plugin supports, as shown in the following ["An
example containerd configuration
file"](#an-example-containerd-configuration-file) section.
- For `containerd`, ensure that you have installed the latest version (v1.0.0
or later) of the CNI loopback plugin.
- Upgrade node components (for example, the kubelet) to Kubernetes v1.24
- Upgrade to or install the most current version of the container runtime.
3. Bring the node back into your cluster by restarting your container runtime
and kubelet. Uncordon the node (`kubectl uncordon <nodename>`).
1. [Safely drain and cordon the node](/docs/tasks/administer-cluster/safely-drain-node/).
1. After stopping your container runtime and kubelet services, perform the
following upgrade operations:
- If you're running CNI plugins, upgrade them to the latest version.
- If you're using non-CNI plugins, replace them with CNI plugins. Use the
latest version of the plugins.
- Update the plugin configuration file to specify or match a version of the
CNI specification that the plugin supports, as shown in the following
["An example containerd configuration file"](#an-example-containerd-configuration-file) section.
- For `containerd`, ensure that you have installed the latest version (v1.0.0 or later)
of the CNI loopback plugin.
- Upgrade node components (for example, the kubelet) to Kubernetes v1.24
- Upgrade to or install the most current version of the container runtime.
1. Bring the node back into your cluster by restarting your container runtime
and kubelet. Uncordon the node (`kubectl uncordon <nodename>`).
## An example containerd configuration file

View File

@ -56,6 +56,7 @@ Huawei Cloud | https://www.huaweicloud.com/intl/en-us/securecenter/overallsafety
IBM Cloud | https://www.ibm.com/cloud/security |
Microsoft Azure | https://docs.microsoft.com/en-us/azure/security/azure-security |
Oracle Cloud Infrastructure | https://www.oracle.com/security |
Tencent Cloud | https://www.tencentcloud.com/solutions/data-security-and-information-protection |
VMware vSphere | https://www.vmware.com/security/hardening-guides |
{{< /table >}}

View File

@ -0,0 +1,33 @@
---
title: "Windows w Kubernetesie"
simple_list: true
weight: 200 # late in list
description: >-
Kubernetes obsługuje węzły działające na systemie Microsoft Windows.
---
Kubernetes obsługuje {{< glossary_tooltip text="węzły" term_id="node" >}}
robocze działające zarówno na systemie Linux, jak i Microsoft Windows.
{{% thirdparty-content single="true" %}}
CNCF i jej macierzysta organizacja Linux Foundation przyjmują neutralne podejście do
kompatybilności w kontekście dostawców. Możliwe jest dołączenie swojego
[serwera Windows](https://www.microsoft.com/en-us/windows-server) jako węzeł roboczy do klastra Kubernetes.
Możesz [zainstalować i skonfigurować kubectl na Windows](/docs/tasks/tools/install-kubectl-windows/)
niezależnie od tego, jakiego systemu operacyjnego używasz w ramach swojego klastra.
Jeśli używasz węzłów Windows, możesz przeczytać:
* [Sieci w Windows](/docs/concepts/services-networking/windows-networking/)
* [Windows storage w Kubernetesie](/docs/concepts/storage/windows-storage/)
* [Zarządzanie zasobami dla węzłów Windows](/docs/concepts/configuration/windows-resource-management/)
* [Konfiguracja RunAsUserName dla Podów Windows i kontenerów](/docs/tasks/configure-pod-container/configure-runasusername/)
* [Utwórz Windows HostProcess Pod](/docs/tasks/configure-pod-container/create-hostprocess-pod/)
* [Konfigurowanie grupowych zarządzanych kont Usług dla Podów i kontenerów Windows](/docs/tasks/configure-pod-container/configure-gmsa/)
* [Bezpieczeństwo dla węzłów Windows](/docs/concepts/security/windows-security/)
* [Wskazówki dotyczące debugowania w systemie Windows](/docs/tasks/debug/debug-cluster/windows/)
* [Przewodnik dotyczący harmonogramowania kontenerów Windows w Kubernetesie](/docs/concepts/windows/user-guide)
lub, aby uzyskać przegląd, przeczytaj:

View File

@ -3,47 +3,80 @@ title: Ściągnij Kubernetesa
type: docs
---
Klaster Kubernetesa dostępny jest w formie plików binarnych dla każdego z jego komponentów i zestawu standardowych aplikacji klienckich wspomagających proces jego szybkiego rozruchu lub obsługi. Składniki Kubernetesa takie jak serwer API mogą być uruchamiane z poziomu obrazów kontenerowych wewnątrz klastra - te ostatnie są także częścią oficjalnego wydania Kubernetesa. Wszystkie pliki binarne i obrazy kontenerowe Kubernetesa udostępniane są dla różnych systemów operacyjnych i architektur sprzętowych.
Klaster Kubernetesa dostępny jest w formie plików binarnych dla każdego z jego
komponentów i zestawu standardowych aplikacji klienckich wspomagających proces jego
szybkiego rozruchu lub obsługi. Składniki Kubernetesa takie jak serwer API mogą być
uruchamiane z poziomu obrazów kontenerowych wewnątrz klastra - te ostatnie są także częścią
oficjalnego wydania Kubernetesa. Wszystkie pliki binarne i obrazy
kontenerowe Kubernetesa udostępniane są dla różnych systemów operacyjnych i architektur sprzętowych.
### kubectl
### kubectl {#kubectl}
<!--overview-->
<!-- overview -->
[kubectl](/docs/reference/kubectl/kubectl/) to narzędzie powłoki umożliwiające wykonywanie komend w klastrze Kubernetesa służących do m.in. uruchamiania aplikacji, zarządzania zasobami klastra i przeglądania logów. Więcej informacji na temat kubectl, w tym pełną list operacji, jakie możesz za jego pomocą wykonać, znajdziesz w [Dokumentacji `kubectl`](/docs/reference/kubectl/).
[kubectl](/docs/reference/kubectl/kubectl/) to narzędzie
powłoki umożliwiające wykonywanie komend w klastrze Kubernetesa.
Możesz użyć kubectl do wdrażania aplikacji, inspekcji i zarządzania zasobami klastra oraz
przeglądania logów. Więcej informacji na temat kubectl, w tym pełną list operacji, jakie
możesz za jego pomocą wykonać, znajdziesz w [Dokumentacji `kubectl`](/docs/reference/kubectl/).
kubectl można zainstalować w rozmaitych systemach z rodziny Linuxa, jak również w systemach macOS i Windows.
Niżej znajdziesz odnośniki do instrukcji instalacji dla preferowanego przez siebie systemu:
kubectl można zainstalować w rozmaitych systemach z rodziny Linuxa, jak również w systemach macOS i Windows. Niżej znajdziesz odnośniki do instrukcji instalacji dla preferowanego przez siebie systemu:
- [Instalacja kubectl w Linuxie](/docs/tasks/tools/install-kubectl-linux)
- [Instalacja kubectl w macOS-ie](/docs/tasks/tools/install-kubectl-macos)
- [Instalacja kubectl w Windowsie](/docs/tasks/tools/install-kubectl-windows)
## Obrazy kontenerów
## Obrazy kontenerów {#container-images}
Wszystkie obrazy kontenerowe umieszczane są w rejestrze `registry.k8s.io`.
Wszystkie obrazy kontenerowe
umieszczane są w rejestrze `registry.k8s.io`.
{{< feature-state for_k8s_version="v1.24" state="alpha" >}}
Dla wersji Kubernetesa {{< param "version">}} następujące obrazy kontenerów opatrzone są podpisem [cosign](https://github.com/sigstore/cosign):
| Obraz kontenera | Wspierana architektura |
| ------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------- |
| Obraz kontenera | Obsługiwane architektury |
| ------------------------------------------------------------------------- | --------------------------------- |
| registry.k8s.io/kube-apiserver:v{{< skew currentPatchVersion >}} | amd64, arm, arm64, ppc64le, s390x |
| registry.k8s.io/kube-controller-manager:v{{< skew currentPatchVersion >}} | amd64, arm, arm64, ppc64le, s390x |
| registry.k8s.io/kube-proxy:v{{< skew currentPatchVersion >}} | amd64, arm, arm64, ppc64le, s390x |
| registry.k8s.io/kube-scheduler:v{{< skew currentPatchVersion >}} | amd64, arm, arm64, ppc64le, s390x |
| registry.k8s.io/conformance:v{{< skew currentPatchVersion >}} | amd64, arm, arm64, ppc64le, s390x |
Obrazy kontenerów Kubernetesa obsługują rozmaite architektury sprzętowe, ich wyboru powinno zaś dokonać środowisko uruchomieniowe w zależności od wybranej platformy. Istnieje też możliwość pobrania obrazu kontenera dla konkretnej architektury poprzez dodanie do jego nazwy odpowiedniego przyrostka, np. `registry.k8s.io/kube-apiserver-arm64:v{{< skew currentPatchVersion >}}`. Wszystkie te warianty obrazów Kubernetesa są podpisane w taki sam sposób jak w przypadku listy manifestów wieloarchitekturowych.
### Architektury obrazów kontenerów {#container-image-architectures}
Wydawcy Kubernetesa publikują listę podpisanych obrazów kontenerowych w formacie [SPDX 2.3](https://spdx.dev/specifications/). Możesz ją pobrać wpisując w powłoce:
Wszystkie obrazy kontenerów są dostępne dla różnych architektur, a środowisko uruchomieniowe
kontenerów powinno automatycznie wybrać odpowiednią wersję na podstawie
platformy bazowej. Możliwe jest również pobranie obrazu
przeznaczonego dla konkretnej architektury, dodając odpowiedni sufiks do nazwy obrazu,
na przykład `registry.k8s.io/kube-apiserver-arm64:v{{< skew currentPatchVersion >}}`.
### Sygnatury obrazów kontenerów {#container-image-signatures}
{{< feature-state for_k8s_version="v1.26" state="beta" >}}
Dla Kubernetesa {{< param "version" >}},
obrazy kontenerów są podpisywane za
pomocą podpisów [sigstore](https://sigstore.dev):
{{< note >}}
Sygnatury `sigstore` obrazów kontenera nie pasują obecnie do siebie w różnych
lokalizacjach geograficznych. Więcej informacji na temat tego problemu można znaleźć w
odpowiednim [zgłoszeniu na GitHubie](https://github.com/kubernetes/registry.k8s.io/issues/187).
{{< /note >}}
Projekt Kubernetes publikuje listę podpisanych obrazów kontenerów
Kubernetes w formacie
[SPDX 2.3](https://spdx.dev/specifications/). Możesz pobrać tę listę za pomocą:
```shell
curl -Ls "https://sbom.k8s.io/$(curl -Ls https://dl.k8s.io/release/stable.txt)/release" | grep "SPDXID: SPDXRef-Package-registry.k8s.io" | grep -v sha256 | cut -d- -f3- | sed 's/-/\//' | sed 's/-v1/:v1/'
```
Dla wersji {{< skew currentVersion >}} Kubernetesa jedynym typem artefaktu kodu, którego integralność możesz zweryfikować, jest obraz kontenera (korzystając z eksperymentalnej opcji podpisu).
Aby ręcznie zweryfikować podpisane obrazy kontenerów głównych komponentów Kubernetesa, zapoznaj
się z [Zweryfikuj podpisane obrazy kontenerów](/docs/tasks/administer-cluster/verify-signed-artifacts).
By ręcznie zweryfikować podpisane obrazy kontenerów głównych komponentów Kubernetesa, zobacz [Zweryfikuj podpisane obrazy kontenerów](/docs/tasks/administer-cluster/verify-signed-artifacts).
Jeśli pobierzesz obraz kontenera przeznaczony dla jednej architektury, zostanie on
podpisany w taki sam sposób, jak obrazy dostępne w manifestach wieloarchitekturowych.
## Pliki binarne
## Pliki binarne {#binaries}
{{< release-binaries >}}
{{< release-binaries >}}

View File

@ -13,7 +13,7 @@ tags:
- fundamental
---
API-сервер — компонент {{< glossary_tooltip text="управляющего слоя" term_id="control-plane" >}} Kubernetes,
который делаает доступным Kubernetes API. API-сервер — это фронтенд управляющего слоя Kubernetes.
который делает доступным Kubernetes API. API-сервер — это фронтенд управляющего слоя Kubernetes.
<!--more-->

View File

@ -452,6 +452,7 @@ SIG Docs 的当前新贡献者大使:
可以通过以下方式联系中文本地化的维护人员:
* Qiming Teng ([GitHub - @tengqm](https://github.com/tengqm))
* Rui Chen ([GitHub - @chenrui333](https://github.com/chenrui333))
* Michael Yao ([GitHub - @windsonsea](https://github.com/windsonsea))
* [Slack 频道](https://kubernetes.slack.com/messages/kubernetes-docs-zh)

View File

@ -0,0 +1,71 @@
---
title: Slamtec Case Study
linkTitle: slamtec
case_study_styles: true
cid: caseStudies
featured: false
new_case_study_styles: true
heading_background: /images/case-studies/slamtec/banner1.jpg
heading_title_logo: /images/slamtec_logo.png
case_study_details:
- Company: Slamtec
- Location: Shanghai, China
- Industry: Robotics
---
<h2>Challenge</h2>
<p>Founded in 2013, SLAMTEC provides service robot autonomous localization and navigation solutions. The company's strength lies in its R&D team's ability to quickly introduce, and continually iterate on, its core products. In the past few years, the company, which had a legacy infrastructure based on Alibaba Cloud and VMware vSphere, began looking to build its own stable and reliable container cloud platform to host its Internet of Things applications. "Our needs for the cloud platform included high availability, scalability and security; multi-granularity monitoring alarm capability; friendliness to containers and microservices; and perfect CI/CD support," says Benniu Ji, Director of Cloud Computing Business Division.</p>
<h2>Solution</h2>
<p>Ji's team chose Kubernetes for orchestration. "CNCF brings quality assurance and a complete ecosystem for <a href="https://kubernetes.io/">Kubernetes</a>, which is very important for the wide application of Kubernetes," says Ji. Thus Slamtec decided to adopt other CNCF projects as well: <a href="https://prometheus.io/">Prometheus</a> monitoring, <a href="https://www.fluentd.org/">Fluentd</a> logging, <a href="https://goharbor.io/">Harbor</a> registry, and <a href="https://helm.sh/">Helm</a> package manager.</p>
<h2>Impact</h2>
<p>With the new platform, Ji reports that Slamtec has experienced "18+ months of 100% stability!" For users, there is now zero service downtime and seamless upgrades. "Kubernetes with third-party service mesh integration (Istio, along with Jaeger and Envoy) significantly reduced the microservice configuration and maintenance efforts by 50%," he adds. With centralized metrics monitoring and log aggregation provided by Prometheus on Fluentd, teams are saving 50% of time spent on troubleshooting and debugging. Harbor replication has allowed production/staging/testing environments to cross public cloud and the private Kubernetes cluster to share the same container registry, resulting in 30% savings of CI/CD efforts. Plus, Ji says, "Helm has accelerated prototype development and environment setup with its rich sharing charts."</p>
{{< case-studies/quote author="BENNIU JI, DIRECTOR OF CLOUD COMPUTING BUSINESS DIVISION" >}}
"Cloud native technology helps us ensure high availability of our business, while improving development and testing efficiency, shortening the research and development cycle and enabling rapid product delivery."
{{< /case-studies/quote >}}
{{< case-studies/lead >}}
Founded in 2013, Slamtec provides service robot autonomous localization and navigation solutions. In this fast-moving space, the company built its success on the ability of its R&D team to quickly introduce, and continually iterate on, its core products.
{{< /case-studies/lead >}}
<p>To sustain that development velocity, the company over the past few years began looking to build its own stable and reliable container cloud platform to host its Internet of Things applications. With a legacy infrastructure based on <a href="https://www.alibabacloud.com/">Alibaba Cloud</a> and <a href="https://www.vmware.com/products/vsphere.html">VMware vSphere</a>, Slamtec teams had already adopted microservice architecture and continuous delivery, for "fine granularity on-demand scaling, fault isolation, ease of development, testing, and deployment, and for facilitating high-speed iteration," says Benniu Ji, Director of Cloud Computing Business Division. So "our needs for the cloud platform included high availability, scalability and security; multi-granularity monitoring alarm capability; friendliness to containers and microservices; and perfect CI/CD support."</p>
<p>After an evaluation of existing technologies, Ji's team chose <a href="https://kubernetes.io/">Kubernetes</a> for orchestration. "CNCF brings quality assurance and a complete ecosystem for Kubernetes, which is very important for the wide application of Kubernetes," says Ji. Plus, "avoiding binding to an infrastructure technology or provider can help us ensure that our business is deployed and migrated in cross-regional environments, and can serve users all over the world."</p>
{{< case-studies/quote
image="/images/case-studies/slamtec/banner3.jpg"
author="BENNIU JI, DIRECTOR OF CLOUD COMPUTING BUSINESS DIVISION"
>}}
"CNCF brings quality assurance and a complete ecosystem for Kubernetes, which is very important for the wide application of Kubernetes."
{{< /case-studies/quote >}}
<p>Thus Slamtec decided to adopt other CNCF projects as well. "We built a monitoring and logging system based on <a href="https://prometheus.io/">Prometheus</a> and <a href="https://www.fluentd.org/">Fluentd</a>," says Ji. "The integration between Prometheus/Fluentd and Kubernetes is convenient, with multiple dimensions of data monitoring and log collection capabilities."</p>
<p>The company uses <a href="https://goharbor.io/">Harbor</a> as a container image repository. "Harbor's replication function helps us implement CI/CD on both private and public clouds," says Ji. "In addition, multi-project support, certification and policy configuration, and integration with Kubernetes are also excellent functions." <a href="https://helm.sh/">Helm</a> is also being used as a package manager, and the team is evaluating the Istio framework. "We're very pleased that Kubernetes and these frameworks can be seamlessly integrated," Ji adds.</p>
{{< case-studies/quote
image="/images/case-studies/slamtec/banner4.jpg"
author="BENNIU JI, DIRECTOR OF CLOUD COMPUTING BUSINESS DIVISION"
>}}
"Cloud native is suitable for microservice architecture, it's suitable for fast iteration and agile development, and it has a relatively perfect ecosystem and active community."
{{< /case-studies/quote >}}
<p>With the new platform, Ji reports that Slamtec has experienced "18+ months of 100% stability!" For users, there is now zero service downtime and seamless upgrades. "We benefit from the abstraction of Kubernetes from network and storage," says Ji. "The dependence on external services can be decoupled from the service and placed under unified management in the cluster."</p>
<p>Using Kubernetes and Istio "significantly reduced the microservice configuration and maintenance efforts by 50%," he adds. With centralized metrics monitoring and log aggregation provided by Prometheus on Fluentd, teams are saving 50% of time spent on troubleshooting and debugging. Harbor replication has allowed production/staging/testing environments to cross public cloud and the private Kubernetes cluster to share the same container registry, resulting in 30% savings of CI/CD efforts. Plus, Ji adds, "Helm has accelerated prototype development and environment setup with its rich sharing charts."</p>
<p>In short, Ji says, Slamtec's new platform is helping it achieve one of its primary goals: the quick and easy release of products. With multiple release models and a centralized control interface, the platform is changing developers' lives for the better. Slamtec also offers a unified API for the development of automated deployment tools according to users' specific needs.</p>
{{< case-studies/quote author="BENNIU JI, DIRECTOR OF CLOUD COMPUTING BUSINESS DIVISION" >}}
"We benefit from the abstraction of Kubernetes from network and storage, the dependence on external services can be decoupled from the service and placed under unified management in the cluster."
{{< /case-studies/quote >}}
<p>Given its own success with cloud native, Slamtec has just one piece of advice for organizations considering making the leap. "For already containerized services, you should migrate them to the cloud native architecture as soon as possible and enjoy the advantages brought by the cloud native ecosystem," Ji says. "To migrate traditional, non-containerized services, in addition to the architecture changes of the service itself, you need to fully consider the operation and maintenance workload required to build the cloud native architecture."</p>
<p>That said, the cost-benefit analysis has been simple for Slamtec. "Cloud native technology is suitable for microservice architecture, it's suitable for fast iteration and agile development, and it has a relatively perfect ecosystem and active community," says Ji. "It helps us ensure high availability of our business, while improving development and testing efficiency, shortening the research and development cycle and enabling rapid product delivery."</p>

File diff suppressed because one or more lines are too long

After

Width:  |  Height:  |  Size: 5.6 KiB

View File

@ -0,0 +1,79 @@
---
title: SlingTV Case Study
linkTitle: Sling TV
case_study_styles: true
cid: caseStudies
featured: true
weight: 49
quote: >
I would almost be so bold as to say that most of these applications that we are building now would not have been possible without the cloud native patterns and the flexibility that Kubernetes enables.
new_case_study_styles: true
heading_background: /images/case-studies/slingtv/banner1.jpg
heading_title_logo: /images/slingtv_logo.png
subheading: >
Sling TV: Marrying Kubernetes and AI to Enable Proper Web Scale
case_study_details:
- Company: Sling TV
- Location: Englewood, Colorado
- Industry: Streaming television
---
<h2>Challenge</h2>
<p>Launched by DISH Network in 2015, Sling TV experienced great customer growth from the beginning. After just a year, "we were going through some growing pains of some of the legacy systems and trying to find the right architecture to enable our future," says Brad Linder, Sling TV's Cloud Native & Big Data Evangelist. The company has particular challenges: "We take live TV and distribute it over the internet out to a user's device that we do not control," says Linder. "In a lot of ways, we are working in the Wild West: The internet is what it is going to be, and if a customer's service does not work for whatever reason, they do not care why. They just want things to work. Those are the variables of the equation that we have to try to solve. We really have to try to enable optionality and good customer experience at web scale."</p>
<h2>Solution</h2>
<p>Led by the belief that "the cloud native architectures and patterns really give us a lot of flexibility in meeting the needs of that sort of customer base," Linder partnered with <a href="http://rancher.com">Rancher Labs</a> to build Sling TV's next-generation platform around Kubernetes. "We are going to need to enable a hybrid cloud strategy including multiple public clouds and an on-premise VMWare multi data center environment to meet the needs of the business at some point, so getting that sort of abstraction was a real goal," he says. "That is one of the biggest reasons why we picked Kubernetes." The team launched its first applications on Kubernetes in Sling TV's two internal data centers. The push to enable AWS as a data center option is underway and should be available by the end of 2018. The team has added <a href="https://prometheus.io/">Prometheus</a> for monitoring and <a href="https://github.com/jaegertracing/jaeger">Jaeger</a> for tracing, to work alongside the company's existing tool sets: Zenoss, New Relic and ELK.</p>
<h2>Impact</h2>
<p>"We are getting to the place where we can one-click deploy an entire data center the compute, network, Kubernetes, logging, monitoring and all the apps," says Linder. "We have really enabled a platform thinking based approach to allowing applications to consume common tools. A new application can be onboarded in about an hour using common tooling and CI/CD processes. The gains on that side have been huge. Before, it took at least a few days to get things sorted for a new application to deploy. That does not consider the training of our operations staff to manage this new application. It is two or three orders of magnitude of savings in time and cost, and operationally it has given us the opportunity to let a core team of talented operations engineers manage common infrastructure and tooling to make our applications available at web scale."</p>
{{< case-studies/quote author="Brad Linder, Cloud Native & Big Data Evangelist for Sling TV" >}}
"I would almost be so bold as to say that most of these applications that we are building now would not have been possible without the cloud native patterns and the flexibility that Kubernetes enables."
{{< /case-studies/quote >}}
{{< case-studies/lead >}}
The beauty of streaming television, like the service offered by <a href="https://www.sling.com/">Sling TV</a>, is that you can watch it from any device you want, wherever you want.
{{< /case-studies/lead >}}
<p>Of course, from the provider side of things, that creates a particular set of challenges "We take live TV and distribute it over the internet out to a user's device that we do not control," says Brad Linder, Sling TV's Cloud Native & Big Data Evangelist. "In a lot of ways, we are working in the Wild West: The internet is what it is going to be, and if a customer's service does not work for whatever reason, they do not care why. They just want things to work. Those are the variables of the equation that we have to try to solve. We really have to try to enable optionality and we have to do it at web scale."</p>
<p>Indeed, Sling TV experienced great customer growth from the beginning of its launch by <a href="https://www.dish.com/">DISH Network</a> in 2015. After just a year, "we were going through some growing pains of some of the legacy systems and trying to find the right architecture to enable our future," says Linder. Tasked with building a next-generation web scale platform for the "personalized customer experience," Linder has spent the past year bringing Kubernetes to Sling TV.</p>
<p>Led by the belief that "the cloud native architectures and patterns really give us a lot of flexibility in meeting the needs of our customers," Linder partnered with <a href="http://rancher.com">Rancher Labs</a> to build the platform around Kubernetes. "They have really helped us get our head around how to use Kubernetes," he says. "We needed the flexibility to enable our use case versus just a simple orchestrater. Enabling our future in a way that did not give us vendor lock-in was also a key part of our strategy. I think that is part of the Rancher value proposition."</p>
{{< case-studies/quote
image="/images/case-studies/slingtv/banner3.jpg"
author="Brad Linder, Cloud Native & Big Data Evangelist for Sling TV"
>}}
"We needed the flexibility to enable our use case versus just a simple orchestrater. Enabling our future in a way that did not give us vendor lock-in was also a key part of our strategy. I think that is part of the Rancher value proposition."
{{< /case-studies/quote >}}
<p>One big reason he chose Kubernetes was getting a level of abstraction that would enable the company to "enable a hybrid cloud strategy including multiple public clouds and an on-premise VMWare multi data center environment to meet the needs of the business," he says. Another factor was how much the Kubernetes ecosystem has matured over the past couple of years. "We have spent a lot of time and energy around making logging, monitoring and alerting production ready to give us insights into applications' well-being," says Linder. The team has added <a href="https://prometheus.io/">Prometheus</a> for monitoring and <a href="https://github.com/jaegertracing/jaeger">Jaeger</a> for tracing, to work alongside the company's existing tool sets: Zenoss, New Relic and ELK.</p>
<p>With the emphasis on common tooling, "We are getting to the place where we can one-click deploy an entire data center the compute, network, Kubernetes, logging, monitoring and all the apps," says Linder. "We have really enabled a platform thinking based approach to allowing applications to consume common tools and services. A new application can be onboarded in about an hour using common tooling and CI/CD processes. The gains on that side have been huge. Before, it took at least a few days to get things sorted for a new application to deploy. That does not consider the training of our operations staff to manage this new application. It is two or three orders of magnitude of savings in time and cost, and operationally it has given us the opportunity to let a core team of talented operations engineers manage common infrastructure and tooling to make our applications available at web scale."</p>
{{< case-studies/quote
image="/images/case-studies/slingtv/banner4.jpg"
author="Brad Linder, Cloud Native & Big Data Evangelist for Sling TV"
>}}
"We have to be able to react to changes and hiccups in the matrix. It is the foundation for our ability to deliver a high-quality service for our customers."
{{< /case-studies/quote >}}
<p>The team launched its first applications on Kubernetes in Sling TV's two internal data centers in the early part of Q1 2018 and began to enable AWS as a data center option. The company plans to expand into other public clouds in the future.</p>
<p>The first application that went into production is a web socket-based back-end notification service. "It allows back-end changes to trigger messages to our clients in the field without the polling," says Linder. "We are talking about very high volumes of messages with this application. Without something like Kubernetes to be able to scale up and down, as well as just support that overall workload, that is pretty hard to do. I would almost be so bold as to say that most of these applications that we are building now would not have been possible without the cloud native patterns and the flexibility that Kubernetes enables."</p>
<p>Linder oversees three teams working together on building the next-generation platform: a platform engineering team; an enterprise middleware services team; and a big data and analytics team. "We have really tried to bring everything together to be able to have a client application interact with a cloud native middleware layer. That middleware layer must run on a platform, consume platform services and then have logs and events monitored by an artificial agent to keep things running smoothly," says Linder.</p>
{{< case-studies/quote author="BRAD LINDER, CLOUD NATIVE & BIG DATA EVANGELIST FOR SLING TV">}}
This undertaking is about "trying to marry Kubernetes with AI to enable web scale that just works".
{{< /case-studies/quote >}}
<p>Ultimately, this undertaking is about "trying to marry Kubernetes with AI to enable web scale that just works," he adds. "We want the artificial agents and the big data platform using the actual logs and events coming out of the applications, Kubernetes, the infrastructure, backing services and changes to the environment to make decisions like, 'Hey we need more capacity for this service so please add more nodes.' From a platform perspective, if you are truly doing web scale stuff and you are not using AI and big data, in my opinion, you are going to implode under your own weight. It is not a question of if, it is when. If you are in a 'millions of users' sort of environment, that implosion is going to be catastrophic. We are on our way to this goal and have learned a lot along the way."</p>
<p>For Sling TV, moving to cloud native has been exactly what they needed. "We have to be able to react to changes and hiccups in the matrix," says Linder. "It is the foundation for our ability to deliver a high-quality service for our customers. Building intelligent platforms, tools and clients in the field consuming those services has got to be part of all of this. In my eyes that is a big part of what cloud native is all about. It is taking these distributed, potentially unreliable entities and enabling a robust customer experience they expect."</p>

File diff suppressed because one or more lines are too long

After

Width:  |  Height:  |  Size: 5.8 KiB

View File

@ -0,0 +1,83 @@
---
title: SOS International Case Study
linkTitle: SOS International
case_study_styles: true
cid: caseStudies
logo: sos_featured_logo.png
new_case_study_styles: true
heading_background: /images/case-studies/sos/banner1.jpg
heading_title_logo: /images/sos_logo.png
subheading: >
SOS International: Using Kubernetes to Provide Emergency Assistance in a Connected World
case_study_details:
- Company: SOS International
- Location: Frederiksberg, Denmark
- Industry: Medical and Travel Assistance
---
<h2>Challenge</h2>
<p>For the past six decades, SOS International has been providing reliable medical and travel assistance in the Nordic region. In recent years, the company's business strategy has required increasingly intense development in the digital space, but when it came to its IT systems, "SOS has a very fragmented legacy," with three traditional monoliths (Java, .NET, and IBM's AS/400) and a waterfall approach, says Martin Ahrentsen, Head of Enterprise Architecture. "We have been forced to institute both new technology and new ways of working, so we could be more efficient with a shorter time to market. It was a much more agile approach, and we needed to have a platform that can help us deliver that to the business."</p>
<h2>Solution</h2>
<p>After an unsuccessful search for a standard system, the company decided to take a platform approach and look for a solution that rolls up Kubernetes and the container technology. <a href="https://www.openshift.com/">RedHat OpenShift</a> proved to be a perfect fit for SOS's fragmented systems. "We have a lot of different technologies that we use, both code languages and others, and all of them could use the resources on the new platform," says Ahrentsen. Of the company's three monoliths, "we can provide this new bleeding edge technology to two of them (.NET and Java)." The platform went live in the spring of 2018; there are now six greenfield projects based on microservices architecture underway, plus all of the company's Java applications are currently going through a "lift and shift" migration.</p>
<h2>Impact</h2>
<p>Kubernetes has delivered "improved time to market, agility, and the ability to adapt to changes and new technologies," says Ahrentsen. "Just the time between when the software is ready for release and when it can be released has dramatically been improved." The way of thinking at SOS International has also changed for the better: "Since we have Kubernetes and easy access to scripts that can help us automate, creating CI/CD pipelines easily, that has spawned a lot of internal interest in how to do this fully automated, all the way. It creates a very good climate in order to start the journey," he says. Moreover, being part of the cloud native community has helped the company attract talent. "They want to work with the cool, new technologies," says Ahrentsen. "During our onboarding, we could see that we were chosen by IT professionals because we provided the new technologies."</p>
{{< case-studies/quote author="Martin Ahrentsen, Head of Enterprise Architecture, SOS International" >}}
"The speed of the changes that cloud native software and technologies drive right now is amazing, and following and adopting it is very crucial for us. The amazing technology provided by Kubernetes and cloud native has started the change for SOS towards a digital future."
{{< /case-studies/quote >}}
{{< case-studies/lead >}}
For six decades, SOS International has provided reliable emergency medical and travel assistance for customers in the Nordic countries.
{{< /case-studies/lead >}}
<p>SOS operators handle a million cases and over a million phone calls a year. But in the past four years, the company's business strategy has required increasingly intense development in the digital space.</p>
<p>When it comes to its IT systems, "SOS has a very fragmented legacy," with three traditional monoliths running in the company's own data centers and a waterfall approach, says Martin Ahrentsen, Head of Enterprise Architecture. "We had to institute both new technology and new ways of working so we could be more efficient, with a shorter time to market. It was a much more agile approach, and we needed to have a platform that can help us deliver that to the business."</p>
<p>For a long time, Ahrentsen and his team searched for a standard solution that could work at SOS. "There aren't that many assistance companies like us, so you cannot get a standard system that fits for that; there is no perfect match," he says. "We would have to take a standard system and twist it too much so it is not standard anymore. Based on that, we decided to find a technology platform instead, with some common components that we could use to build the new digital systems and core systems."</p>
{{< case-studies/quote
image="/images/case-studies/sos/banner3.jpg"
author="Martin Ahrentsen, Head of Enterprise Architecture, SOS International"
>}}
"We have to deliver new digital services, but we also have to migrate the old stuff, and we have to transform our core systems into new systems built on top of this platform. One of the reasons why we chose this technology is that we could build new digital services while changing the old one."
{{< /case-studies/quote >}}
<p>Sold on what Kubernetes could do, Ahrentsen zeroed in on platforms that could meet the business's needs right away. The company opted to use RedHat's OpenShift container platform, which incorporates Docker containers and Kubernetes, as well as a whole stack of technologies, including RedHat Hyperconverged Infrastructure and some midware components, all from the open source community.</p>
<p>Based on the company's criteria—technology fit, agility fit, legal requirements, and competencies—the OpenShift solution seemed like a perfect fit for SOS's fragmented systems. "We have a lot of different technologies that we use, both code languages and others, and all of them could use the resources on the new platform," says Ahrentsen. Of the company's three monoliths, "we can provide this new bleeding edge technology to two of them (.NET and Java)."</p>
<p>The platform went live in the spring of 2018; six greenfield projects based on microservices architecture were initially launched, plus all of the company's Java applications are currently going through a "lift and shift" migration. One of the first Kubernetes-based projects to go live is Remote Medical Treatment, a solution in which customers can contact the SOS alarm center via voice, chat, or video. "We managed to develop it in quite a short timeframe with focus on full CI/CD pipelining and a modern microservice architecture all running in a dual OpenShift cluster setup," says Ahrentsen. Onsite, which is used for dispatching rescue trucks around the Nordic countries, and Follow Your Truck, which allows customers to track tow trucks, are also being rolled out.</p>
{{< case-studies/quote
image="/images/case-studies/sos/banner4.jpg"
author="Martin Ahrentsen, Head of Enterprise Architecture, SOS International"
>}}
"During our onboarding, we could see that we were chosen by IT professionals because we provided the new technologies."
{{< /case-studies/quote >}}
<p>The platform is still running on premise, because some of SOS's customers in the insurance industry, for whom the company handles data, don't yet have a cloud strategy. Kubernetes is allowing SOS to start in the data center and move to the cloud when the business is ready. "Over the next three to five years, all of them will have a strategy, and we could probably take the data and go to the cloud," says Ahrentsen. There's also the possibility of moving to a hybrid cloud setup for sensitive and non-sensitive data.</p>
<p>SOS's technology is certainly in a state of transition. "We have to deliver new digital services, but we also have to migrate the old stuff, and we have to transform our core systems into new systems built on top of this platform," says Ahrentsen. "One of the reasons why we chose this technology is that we could build new digital services while changing the old one."</p>
<p>But already, Kubernetes has delivered improved time to market, as evidenced by how quickly the greenfield projects were developed and released. "Just the time between when the software is ready for release and when it can be released has dramatically been improved," says Ahrentsen.</p>
<p>Moreover, being part of the cloud native community has helped the company attract talent as it pursues a goal of growing the ranks of engineers, operators, and architects from 60 to 100 this year. "They want to work with the cool, new technologies," says Ahrentsen. "During our onboarding, we could see that we were chosen by IT professionals because we provided the new technologies."</p>
{{< case-studies/quote author="Martin Ahrentsen, Head of Enterprise Architecture, SOS International" >}}
"The future world where everything is connected and sends data will create a big potential for us in terms of new market opportunities. But it will also set a big demand on the IT platform and what we need to deliver."
{{< /case-studies/quote >}}
<p>The way of thinking at SOS International has also changed dramatically: "Since we have Kubernetes and easy access to scripts that can help us automate, creating CI/CD pipelines easily, that has spawned a lot of internal interest in how to do this fully automated, all the way. It creates a very good climate in order to start the journey."</p>
<p>For this journey at SOS, digitalization and optimization are the key words. "For IT to deliver this, we need to improve, and that is not just on the way of using Kubernetes and the platform," says Ahrentsen. "It's also a way of building the systems to be ready for automation, and afterwards, machine learning and other interesting technologies that are on the way."</p>
<p>Case in point: the introduction of the internet of things into automobiles. The European Commission now mandates all new cars to be equipped with <a href="https://ec.europa.eu/transport/themes/its/road/action_plan/ecall_en">eCall</a>, which transmits location and other data in case of a serious traffic accident. SOS provides this service as smart auto assistance. "We receive the call and find out if an emergency response team needs to be sent, or if it's not heavy impact," says Ahrentsen. "The future world where everything is connected and sends data will create a big potential for us in terms of new market opportunities. But it will also set a big demand on the IT platform and what we need to deliver."</p>
<p>Ahrentsen feels that SOS is well equipped for the challenge, given the technology choices the company has made. "The speed of the changes that cloud native software and technologies drive right now is amazing, and following it and adopting it is very crucial for us," he says. "The amazing technology provided by Kubernetes and cloud native has started the change for SOS towards a digital future."</p>

File diff suppressed because one or more lines are too long

After

Width:  |  Height:  |  Size: 5.3 KiB

View File

@ -0,0 +1,79 @@
---
title: Spotify Case Study
linkTitle: Spotify
case_study_styles: true
cid: caseStudies
featured: false
new_case_study_styles: true
heading_background: /images/case-studies/spotify/banner1.jpg
heading_title_text: Spotify
subheading: >
Spotify: An Early Adopter of Containers, Spotify Is Migrating from Homegrown Orchestration to Kubernetes
case_study_details:
- Company: Spotify
- Location: Global
- Industry: Entertainment
---
<h2>Challenge</h2>
<p>Launched in 2008, the audio-streaming platform has grown to over 200 million monthly active users across the world. "Our goal is to empower creators and enable a really immersive listening experience for all of the consumers that we have today—and hopefully the consumers we'll have in the future," says Jai Chakrabarti, Director of Engineering, Infrastructure and Operations. An early adopter of microservices and Docker, Spotify had containerized microservices running across its fleet of VMs with a homegrown container orchestration system called <a href="https://github.com/spotify/helios">Helios</a>. By late 2017, it became clear that "having a small team working on the features was just not as efficient as adopting something that was supported by a much bigger community," he says.</p>
<h2>Solution</h2>
<p>"We saw the amazing community that had grown up around Kubernetes, and we wanted to be part of that," says Chakrabarti. Kubernetes was more feature-rich than Helios. Plus, "we wanted to benefit from added velocity and reduced cost, and also align with the rest of the industry on best practices and tools." At the same time, the team wanted to contribute its expertise and influence in the flourishing Kubernetes community. The migration, which would happen in parallel with Helios running, could go smoothly because "Kubernetes fit very nicely as a complement and now as a replacement to Helios," says Chakrabarti.</p>
<h2>Impact</h2>
<p>The team spent much of 2018 addressing the core technology issues required for a migration, which started late that year and is a big focus for 2019. "A small percentage of our fleet has been migrated to Kubernetes, and some of the things that we've heard from our internal teams are that they have less of a need to focus on manual capacity provisioning and more time to focus on delivering features for Spotify," says Chakrabarti. The biggest service currently running on Kubernetes takes about 10 million requests per second as an aggregate service and benefits greatly from autoscaling, says Site Reliability Engineer James Wen. Plus, he adds, "Before, teams would have to wait for an hour to create a new service and get an operational host to run it in production, but with Kubernetes, they can do that on the order of seconds and minutes." In addition, with Kubernetes's bin-packing and multi-tenancy capabilities, CPU utilization has improved on average two- to threefold.</p>
{{< case-studies/quote author="Jai Chakrabarti, Director of Engineering, Infrastructure and Operations, Spotify" >}}
"We saw the amazing community that's grown up around Kubernetes, and we wanted to be part of that. We wanted to benefit from added velocity and reduced cost, and also align with the rest of the industry on best practices and tools."
{{< /case-studies/quote >}}
{{< case-studies/lead >}}
"Our goal is to empower creators and enable a really immersive listening experience for all of the consumers that we have today—and hopefully the consumers we'll have in the future," says Jai Chakrabarti, Director of Engineering, Infrastructure and Operations at Spotify. Since the audio-streaming platform launched in 2008, it has already grown to over 200 million monthly active users around the world, and for Chakrabarti's team, the goal is solidifying Spotify's infrastructure to support all those future consumers too.
{{< /case-studies/lead >}}
<p>An early adopter of microservices and Docker, Spotify had containerized microservices running across its fleet of VMs since 2014. The company used an open source, homegrown container orchestration system called Helios, and in 2016-17 completed a migration from on premise data centers to Google Cloud. Underpinning these decisions, "We have a culture around autonomous teams, over 200 autonomous engineering squads who are working on different pieces of the pie, and they need to be able to iterate quickly," Chakrabarti says. "So for us to have developer velocity tools that allow squads to move quickly is really important."</p>
<p>But by late 2017, it became clear that "having a small team working on the <a href="https://github.com/spotify/helios">Helios</a> features was just not as efficient as adopting something that was supported by a much bigger community," says Chakrabarti. "We saw the amazing community that had grown up around Kubernetes, and we wanted to be part of that. We wanted to benefit from added velocity and reduced cost, and also align with the rest of the industry on best practices and tools." At the same time, the team wanted to contribute its expertise and influence in the flourishing Kubernetes community.</p>
{{< case-studies/quote
image="/images/case-studies/spotify/banner3.jpg"
author="Dave Zolotusky, Software Engineer, Infrastructure and Operations, Spotify"
>}}
"The community has been extremely helpful in getting us to work through all the technology much faster and much easier. And it's helped us validate all the things we're doing."
{{< /case-studies/quote >}}
<p>Another plus: "Kubernetes fit very nicely as a complement and now as a replacement to Helios, so we could have it running alongside Helios to mitigate the risks," says Chakrabarti. "During the migration, the services run on both, so we're not having to put all of our eggs in one basket until we can validate Kubernetes under a variety of load circumstances and stress circumstances."</p>
<p>The team spent much of 2018 addressing the core technology issues required for the migration. "We were able to use a lot of the Kubernetes APIs and extensibility features of Kubernetes to support and interface with our legacy infrastructure, so the integration was straightforward and easy," says Site Reliability Engineer James Wen.</p>
<p>Migration started late that year and has accelerated in 2019. "Our focus is really on stateless services, and once we address our last remaining technology blocker, that's where we hope that the uptick will come from," says Chakrabarti. "For stateful services there's more work that we need to do."</p>
<p>A small percentage of Spotify's fleet, containing over 150 services, has been migrated to Kubernetes so far. "We've heard from our customers that they have less of a need to focus on manual capacity provisioning and more time to focus on delivering features for Spotify," says Chakrabarti. The biggest service currently running on Kubernetes takes over 10 million requests per second as an aggregate service and benefits greatly from autoscaling, says Wen. Plus, Wen adds, "Before, teams would have to wait for an hour to create a new service and get an operational host to run it in production, but with Kubernetes, they can do that on the order of seconds and minutes." In addition, with Kubernetes's bin-packing and multi-tenancy capabilities, CPU utilization has improved on average two- to threefold.</p>
{{< case-studies/quote
image="/images/case-studies/spotify/banner4.jpg"
author="James Wen, Site Reliability Engineer, Spotify"
>}}
"We were able to use a lot of the Kubernetes APIs and extensibility features to support and interface with our legacy infrastructure, so the integration was straightforward and easy."
{{< /case-studies/quote >}}
<p>Chakrabarti points out that for all four of the top-level metrics that Spotify looks at—lead time, deployment frequency, time to resolution, and operational load—"there is impact that Kubernetes is having."</p>
<p>One success story that's come out of the early days of Kubernetes is a tool called Slingshot that a Spotify team built on Kubernetes. "With a pull request, it creates a temporary staging environment that self destructs after 24 hours," says Chakrabarti. "It's all facilitated by Kubernetes, so that's kind of an exciting example of how, once the technology is out there and ready to use, people start to build on top of it and craft their own solutions, even beyond what we might have envisioned as the initial purpose of it."</p>
<p>Spotify has also started to use <a href="https://grpc.io/">gRPC</a> and <a href="https://www.envoyproxy.io/">Envoy</a>, replacing existing homegrown solutions, just as it had with Kubernetes. "We created things because of the scale we were at, and there was no other solution existing," says Dave Zolotusky, Software Engineer, Infrastructure and Operations. "But then the community kind of caught up and surpassed us, even for tools that work at that scale."</p>
{{< case-studies/quote author="James Wen, Site Reliability Engineer, Spotify" >}}
"It's been surprisingly easy to get in touch with anybody we wanted to, to get expertise on any of the things we're working with. And it's helped us validate all the things we're doing."
{{< /case-studies/quote >}}
<p>Both of those technologies are in early stages of adoption, but already "we have reason to believe that gRPC will have a more drastic impact during early development by helping with a lot of issues like schema management, API design, weird backward compatibility issues, things like that," says Zolotusky. "So we're leaning heavily on gRPC to help us in that space."</p>
<p>As the team continues to fill out Spotify's cloud native stack—tracing is up next—it is using the CNCF landscape as a helpful guide. "We look at things we need to solve, and if there are a bunch of projects, we evaluate them equivalently, but there is definitely value to the project being a CNCF project," says Zolotusky.</p>
<p>Spotify's experiences so far with Kubernetes bears this out. "The community has been extremely helpful in getting us to work through all the technology much faster and much easier," Zolotusky says. "It's been surprisingly easy to get in touch with anybody we wanted to, to get expertise on any of the things we're working with. And it's helped us validate all the things we're doing."</p>

File diff suppressed because one or more lines are too long

After

Width:  |  Height:  |  Size: 5.0 KiB

View File

@ -5,9 +5,6 @@ weight: 90
---
<!--
reviewers:
- davidopp
- wojtek-t
title: Pod Priority and Preemption
content_type: concept
weight: 90

View File

@ -148,7 +148,7 @@ IBM Cloud | https://www.ibm.com/cloud/security |
Microsoft Azure | https://docs.microsoft.com/en-us/azure/security/azure-security |
Oracle Cloud Infrastructure | https://www.oracle.com/security |
Tencent Cloud | https://www.tencentcloud.com/solutions/data-security-and-information-protection |
VMware vSphere | https://www.vmware.com/security/hardening-guides |
VMware vSphere | https://www.vmware.com/solutions/security/hardening-guides |
{{< /table >}}
-->
@ -164,7 +164,7 @@ IBM 云 | https://www.ibm.com/cloud/security |
微软 Azure | https://docs.microsoft.com/en-us/azure/security/azure-security |
Oracle 云基础设施| https://www.oracle.com/security |
腾讯云 | https://www.tencentcloud.com/solutions/data-security-and-information-protection |
VMware vSphere | https://www.vmware.com/security/hardening-guides |
VMware vSphere | https://www.vmware.com/solutions/security/hardening-guides |
{{< /table >}}

View File

@ -707,7 +707,7 @@ built-in [Pod Security Admission Controller](/docs/concepts/security/pod-securit
<!--
### What about sandboxed Pods?
There is not currently an API standard that controls whether a Pod is considered sandboxed or
There is currently no API standard that controls whether a Pod is considered sandboxed or
not. Sandbox Pods may be identified by the use of a sandboxed runtime (such as gVisor or Kata
Containers), but there is no standard definition of what a sandboxed runtime is.
-->

View File

@ -64,9 +64,9 @@ Do not manage ReplicaSets owned by a Deployment. Consider opening an issue in th
The following are typical use cases for Deployments:
-->
## 用例
## 用例 {#use-case}
以下是 Deployments 的典型用例:
以下是 Deployment 的典型用例:
<!--
* [Create a Deployment to rollout a ReplicaSet](#creating-a-deployment). The ReplicaSet creates Pods in the background. Check the status of the rollout to see if it succeeds or not.
@ -87,10 +87,10 @@ The following are typical use cases for Deployments:
* 如果 Deployment 的当前状态不稳定,[回滚到较早的 Deployment 版本](#rolling-back-a-deployment)。
每次回滚都会更新 Deployment 的修订版本。
* [扩大 Deployment 规模以承担更多负载](#scaling-a-deployment)。
* [暂停 Deployment 的上线](#pausing-and-resuming-a-deployment) 以应用对 PodTemplateSpec 所作的多项修改,
* [暂停 Deployment 的上线](#pausing-and-resuming-a-deployment)以应用对 PodTemplateSpec 所作的多项修改,
然后恢复其执行以启动新的上线版本。
* [使用 Deployment 状态](#deployment-status)来判定上线过程是否出现停滞。
* [清理较旧的不再需要的 ReplicaSet](#clean-up-policy)
* [清理较旧的不再需要的 ReplicaSet](#clean-up-policy)。
<!--
The following is an example of a Deployment. It creates a ReplicaSet to bring up three `nginx` Pods:
@ -155,12 +155,12 @@ Before you begin, make sure your Kubernetes cluster is up and running.
Follow the steps given below to create the above Deployment:
-->
开始之前,请确保的 Kubernetes 集群已启动并运行。
按照以下步骤创建上述 Deployment
按照以下步骤创建上述 Deployment
<!--
1. Create the Deployment by running the following command:
-->
1. 通过运行以下命令创建 Deployment
1. 通过运行以下命令创建 Deployment
```shell
kubectl apply -f https://k8s.io/examples/controllers/nginx-deployment.yaml
@ -255,11 +255,11 @@ Follow the steps given below to create the above Deployment:
-->
ReplicaSet 输出中包含以下字段:
* `NAME` 列出名字空间中 ReplicaSet 的名称
* `NAME` 列出名字空间中 ReplicaSet 的名称
* `DESIRED` 显示应用的期望副本个数,即在创建 Deployment 时所定义的值。
此为期望状态;
* `CURRENT` 显示当前运行状态中的副本个数
* `READY` 显示应用中有多少副本可以为用户提供服务
此为**期望状态**。
* `CURRENT` 显示当前运行状态中的副本个数
* `READY` 显示应用中有多少副本可以为用户提供服务
* `AGE` 显示应用已经运行的时间长度。
<!--
@ -307,7 +307,7 @@ Kubernetes 不会阻止你这样做,但是如果多个控制器具有重叠的
<!--
### Pod-template-hash label
-->
### Pod-template-hash 标签
### Pod-template-hash 标签 {#pod-template-hash-label}
{{< caution >}}
<!--
@ -358,6 +358,7 @@ Follow the steps given below to update your Deployment:
```shell
kubectl set image deployment.v1.apps/nginx-deployment nginx=nginx:1.16.1
```
<!--
or use the following command:
-->
@ -420,7 +421,10 @@ Follow the steps given below to update your Deployment:
```
Waiting for rollout to finish: 2 out of 3 new replicas have been updated...
```
<!-- or -->
<!--
or
-->
或者
```
@ -436,7 +440,7 @@ Get more details on your updated Deployment:
* After the rollout succeeds, you can view the Deployment by running `kubectl get deployments`.
The output is similar to this:
-->
* 在上线成功后,可以通过运行 `kubectl get deployments` 来查看 Deployment
* 在上线成功后,可以通过运行 `kubectl get deployments` 来查看 Deployment
输出类似于:
```
@ -578,7 +582,7 @@ up to 3 replicas, as well as scaling down the old ReplicaSet to 0 replicas.
It then continued scaling up and down the new and the old ReplicaSet, with the same rolling update strategy.
Finally, you'll have 3 available replicas in the new ReplicaSet, and the old ReplicaSet is scaled down to 0.
-->
可以看到,当第一次创建 Deployment 时,它创建了一个 ReplicaSet`nginx-deployment-2035384211`
可以看到,当第一次创建 Deployment 时,它创建了一个 ReplicaSet`nginx-deployment-2035384211`
并将其直接扩容至 3 个副本。更新 Deployment 时,它创建了一个新的 ReplicaSet
nginx-deployment-1564180365并将其扩容为 1等待其就绪然后将旧 ReplicaSet 缩容到 2
将新的 ReplicaSet 扩容到 2 以便至少有 3 个 Pod 可用且最多创建 4 个 Pod。
@ -607,7 +611,7 @@ the desired Pods. If the Deployment is updated, the existing ReplicaSet that con
match `.spec.selector` but whose template does not match `.spec.template` are scaled down. Eventually, the new
ReplicaSet is scaled to `.spec.replicas` and all old ReplicaSets is scaled to 0.
-->
### 翻转(多 Deployment 动态更新)
### 翻转(多 Deployment 动态更新) {#rollover-aka-multiple-updates-in-flight}
Deployment 控制器每次注意到新的 Deployment 时,都会创建一个 ReplicaSet 以启动所需的 Pod。
如果更新了 Deployment则控制标签匹配 `.spec.selector` 但模板不匹配 `.spec.template` 的 Pod 的现有 ReplicaSet 被缩容。
@ -620,8 +624,7 @@ as per the update and start scaling that up, and rolls over the ReplicaSet that
-- it will add it to its list of old ReplicaSets and start scaling it down.
-->
当 Deployment 正在上线时被更新Deployment 会针对更新创建一个新的 ReplicaSet
并开始对其扩容,之前正在被扩容的 ReplicaSet 会被翻转,添加到旧 ReplicaSet 列表
并开始缩容。
并开始对其扩容,之前正在被扩容的 ReplicaSet 会被翻转,添加到旧 ReplicaSet 列表并开始缩容。
<!--
For example, suppose you create a Deployment to create 5 replicas of `nginx:1.14.2`,
@ -631,8 +634,8 @@ killing the 3 `nginx:1.14.2` Pods that it had created, and starts creating
`nginx:1.16.1` Pods. It does not wait for the 5 replicas of `nginx:1.14.2` to be created
before changing course.
-->
例如,假定你在创建一个 Deployment 以生成 `nginx:1.14.2` 的 5 个副本,但接下来
更新 Deployment 以创建 5 个 `nginx:1.16.1` 的副本,而此时只有 3 个 `nginx:1.14.2`
例如,假定你在创建一个 Deployment 以生成 `nginx:1.14.2` 的 5 个副本,但接下来更新
Deployment 以创建 5 个 `nginx:1.16.1` 的副本,而此时只有 3 个 `nginx:1.14.2`
副本已创建。在这种情况下Deployment 会立即开始杀死 3 个 `nginx:1.14.2` Pod
并开始创建 `nginx:1.16.1` Pod。它不会等待 `nginx:1.14.2` 的 5
个副本都创建完成后才开始执行变更动作。
@ -868,7 +871,7 @@ Deployment 被触发上线时,系统就会创建 Deployment 的新的修订版
Follow the steps given below to check the rollout history:
-->
### 检查 Deployment 上线历史
### 检查 Deployment 上线历史 {#checking-rollout-history-of-a-deployment}
按照如下步骤检查回滚历史:
@ -998,7 +1001,7 @@ Follow the steps given below to rollback the Deployment from the current version
<!--
2. Check if the rollback was successful and the Deployment is running as expected, run:
-->
2. 检查回滚是否成功以及 Deployment 是否正在运行,运行:
2. 检查回滚是否成功以及 Deployment 是否正在运行,你可以运行:
```shell
kubectl get deployment nginx-deployment
@ -1099,7 +1102,7 @@ Assuming [horizontal Pod autoscaling](/docs/tasks/run-application/horizontal-pod
in your cluster, you can set up an autoscaler for your Deployment and choose the minimum and maximum number of
Pods you want to run based on the CPU utilization of your existing Pods.
-->
假设集群启用了[Pod 的水平自动缩放](/zh-cn/docs/tasks/run-application/horizontal-pod-autoscale-walkthrough/)
假设集群启用了 [Pod 的水平自动缩放](/zh-cn/docs/tasks/run-application/horizontal-pod-autoscale-walkthrough/)
你可以为 Deployment 设置自动缩放器,并基于现有 Pod 的 CPU 利用率选择要运行的
Pod 个数下限和上限。
@ -1129,7 +1132,7 @@ ReplicaSets (ReplicaSets with Pods) in order to mitigate risk. This is called *p
RollingUpdate 的 Deployment 支持同时运行应用程序的多个版本。
当自动缩放器缩放处于上线进程(仍在进行中或暂停)中的 RollingUpdate Deployment 时,
Deployment 控制器会平衡现有的活跃状态的 ReplicaSet含 Pod 的 ReplicaSet中的额外副本
以降低风险。这称为 *比例缩放Proportional Scaling*
以降低风险。这称为**比例缩放Proportional Scaling**
<!--
For example, you are running a Deployment with 10 replicas, [maxSurge](#max-surge)=3, and [maxUnavailable](#max-unavailable)=2.
@ -1145,6 +1148,7 @@ For example, you are running a Deployment with 10 replicas, [maxSurge](#max-surg
```shell
kubectl get deploy
```
<!--
The output is similar to this:
-->
@ -1407,7 +1411,6 @@ apply multiple fixes in between pausing and resuming without triggering unnecess
<!--
The initial state of the Deployment prior to pausing its rollout will continue its function, but new updates to
the Deployment will not have any effect as long as the Deployment rollout is paused.
-->
暂停 Deployment 上线之前的初始状态将继续发挥作用,但新的更新在 Deployment
上线被暂停期间不会产生任何效果。
@ -1495,7 +1498,7 @@ You cannot rollback a paused Deployment until you resume it.
A Deployment enters various states during its lifecycle. It can be [progressing](#progressing-deployment) while
rolling out a new ReplicaSet, it can be [complete](#complete-deployment), or it can [fail to progress](#failed-deployment).
-->
## Deployment 状态 {#deployment-status}
## Deployment 状态 {#deployment-status}
Deployment 的生命周期中会有许多状态。上线新的 ReplicaSet 期间可能处于
[Progressing进行中](#progressing-deployment),可能是
@ -1509,7 +1512,7 @@ Kubernetes marks a Deployment as _progressing_ when one of the following tasks i
-->
### 进行中的 Deployment {#progressing-deployment}
执行下面的任务期间Kubernetes 标记 Deployment 为**进行中**Progressing_
执行下面的任务期间Kubernetes 标记 Deployment 为**进行中**Progressing
<!--
* The Deployment creates a new ReplicaSet.
@ -1517,9 +1520,9 @@ Kubernetes marks a Deployment as _progressing_ when one of the following tasks i
* The Deployment is scaling down its older ReplicaSet(s).
* New Pods become ready or available (ready for at least [MinReadySeconds](#min-ready-seconds)).
-->
* Deployment 创建新的 ReplicaSet
* Deployment 正在为其最新的 ReplicaSet 扩容
* Deployment 正在为其旧有的 ReplicaSet(s) 缩容
* Deployment 创建新的 ReplicaSet
* Deployment 正在为其最新的 ReplicaSet 扩容
* Deployment 正在为其旧有的 ReplicaSet 缩容
* 新的 Pod 已经就绪或者可用(就绪至少持续了 [MinReadySeconds](#min-ready-seconds) 秒)。
<!--
@ -1545,7 +1548,7 @@ Kubernetes marks a Deployment as _complete_ when it has the following characteri
-->
### 完成的 Deployment {#complete-deployment}
当 Deployment 具有以下特征时Kubernetes 将其标记为**完成Complete**;
当 Deployment 具有以下特征时Kubernetes 将其标记为**完成Complete**
<!--
* All of the replicas associated with the Deployment have been updated to the latest version you've specified, meaning any
@ -1605,6 +1608,7 @@ and the exit status from `kubectl rollout` is 0 (success):
```shell
echo $?
```
```
0
```
@ -1651,8 +1655,8 @@ Deployment progress has stalled.
The following `kubectl` command sets the spec with `progressDeadlineSeconds` to make the controller report
lack of progress of a rollout for a Deployment after 10 minutes:
-->
以下 `kubectl` 命令设置规约中的 `progressDeadlineSeconds`,从而告知控制器
10 分钟后报告 Deployment 的上线没有进展:
以下 `kubectl` 命令设置规约中的 `progressDeadlineSeconds`,从而告知控制器
10 分钟后报告 Deployment 的上线没有进展:
```shell
kubectl patch deployment/nginx-deployment -p '{"spec":{"progressDeadlineSeconds":600}}'
@ -1746,8 +1750,8 @@ Conditions:
<!--
If you run `kubectl get deployment nginx-deployment -o yaml`, the Deployment status is similar to this:
-->
如果运行 `kubectl get deployment nginx-deployment -o yaml`Deployment 状态输出
将类似于这样:
如果运行 `kubectl get deployment nginx-deployment -o yaml`
Deployment 状态输出将类似于这样:
```
status:
@ -1852,6 +1856,7 @@ and the exit status from `kubectl rollout` is 1 (indicating an error):
```shell
echo $?
```
```
1
```
@ -1889,6 +1894,27 @@ thus that Deployment will not be able to roll back.
显式将此字段设置为 0 将导致 Deployment 的所有历史记录被清空,因此 Deployment 将无法回滚。
{{< /note >}}
<!--
The cleanup only starts **after** a Deployment reaches a
[complete state](/docs/concepts/workloads/controllers/deployment/#complete-deployment).
If you set `.spec.revisionHistoryLimit` to 0, any rollout nonetheless triggers creation of a new
ReplicaSet before Kubernetes removes the old one.
-->
清理仅在 Deployment
达到[完整状态](/zh-cn/docs/concepts/workloads/controllers/deployment/#complete-deployment)**之后**才会开始。
如果你将 `.spec.revisionHistoryLimit` 设置为 0任何上线更新都会触发创建一个新的 ReplicaSet
然后 Kubernetes 才会移除旧的 ReplicaSet。
<!--
Even with a non-zero revision history limit, you can have more ReplicaSets than the limit
you configure. For example, if pods are crash looping, and there are multiple rolling updates
events triggered over time, you might end up with more ReplicaSets than the
`.spec.revisionHistoryLimit` because the Deployment never reaches a complete state.
-->
即使使用非零的修订历史限制,你可以使用的 ReplicaSet 的数量仍可能超过你配置的限制值。
例如,如果 Pod 反复崩溃,并且在一段时间内触发了多个滚动更新事件,
你可能会由于 Deployment 从未达到完整状态而导致 ReplicaSet 数量超过 `.spec.revisionHistoryLimit`
<!--
## Canary Deployment
@ -1981,7 +2007,7 @@ deployment --replicas=X`, and then you update that Deployment based on a manifes
(for example: by running `kubectl apply -f deployment.yaml`),
then applying that manifest overwrites the manual scaling that you previously did.
-->
如果你对某个 Deployment 执行了手动扩缩操作(例如,通过
如果你对某个 Deployment 执行了手动扩缩操作(例如,通过
`kubectl scale deployment deployment --replicas=X`
之后基于清单对 Deployment 执行了更新操作(例如通过运行
`kubectl apply -f deployment.yaml`),那么通过应用清单而完成的更新会覆盖之前手动扩缩所作的变更。
@ -2099,8 +2125,8 @@ the rolling update process.
-->
#### 滚动更新 Deployment {#rolling-update-deployment}
Deployment 会在 `.spec.strategy.type==RollingUpdate`时,采取
滚动更新的方式更新 Pod。你可以指定 `maxUnavailable``maxSurge`
Deployment 会在 `.spec.strategy.type==RollingUpdate`时,
采取滚动更新的方式更新 Pod。你可以指定 `maxUnavailable``maxSurge`
来控制滚动更新过程。
<!--
@ -2126,7 +2152,7 @@ Pods immediately when the rolling update starts. Once new Pods are ready, old Re
down further, followed by scaling up the new ReplicaSet, ensuring that the total number of Pods available
at all times during the update is at least 70% of the desired Pods.
-->
例如,当此值设置为 30% 时,滚动更新开始时会立即将旧 ReplicaSet 缩容到期望 Pod 个数的70%。
例如,当此值设置为 30% 时,滚动更新开始时会立即将旧 ReplicaSet 缩容到期望 Pod 个数的 70%。
新 Pod 准备就绪后,可以继续缩容旧有的 ReplicaSet然后对新的 ReplicaSet 扩容,
确保在更新期间可用的 Pod 总数在任何时候都至少为所需的 Pod 个数的 70%。
@ -2300,7 +2326,7 @@ a Pod is considered ready, see [Container Probes](/docs/concepts/workloads/pods/
A Deployment's revision history is stored in the ReplicaSets it controls.
-->
### 修订历史限制
### 修订历史限制 {#revision-history-limit}
Deployment 的修订历史记录存储在它所控制的 ReplicaSet 中。

View File

@ -43,8 +43,8 @@ where you can say hello.
-->
## 为现有的本地化做出贡献 {#contribute-to-an-existing-localization}
你可以帮助添加或改进现有本地化的内容。在 [Kubernetes Slack](https://slack.k8s.io/) 中,
你能找到每个本地化的频道。还有一个通用的
你可以帮助添加或改进现有本地化的内容。在 [Kubernetes Slack](https://slack.k8s.io/)
中,你能找到每个本地化的频道。还有一个通用的
[SIG Docs Localizations Slack 频道](https://kubernetes.slack.com/messages/sig-docs-localizations)
你可以在这里打个招呼。
@ -113,13 +113,6 @@ Create or update your chosen localized page based on the English original. See
If you notice a technical inaccuracy or other problem with the upstream
(English) documentation, you should fix the upstream documentation first and
then repeat the equivalent fix by updating the localization you're working on.
Limit changes in a pull requests to a single localization. Reviewing pull
requests that change content in multiple localizations is problematic.
Follow [Suggesting Content Improvements](/docs/contribute/suggesting-improvements/)
to propose changes to that localization. The process is similar to proposing
changes to the upstream (English) content.
-->
### 建议更改 {#suggest-changes}
@ -129,6 +122,14 @@ changes to the upstream (English) content.
如果你发现上游(英文)文档存在技术错误或其他问题,
你应该先修复上游文档,然后通过更新你正在处理的本地化来重复等效的修复。
<!--
Limit changes in a pull requests to a single localization. Reviewing pull
requests that change content in multiple localizations is problematic.
Follow [Suggesting Content Improvements](/docs/contribute/suggesting-improvements/)
to propose changes to that localization. The process is similar to proposing
changes to the upstream (English) content.
-->
请将 PR 限制为单个语言版本,因为多语言的 PR 内容修改可能难以审查。
按照[内容改进建议](/zh-cn/docs/contribute/suggesting-improvements/)提出对该本地化的更改。
@ -201,8 +202,8 @@ questions.
### 找到社区 {#find-community}
让 Kubernetes SIG Docs 知道你有兴趣创建本地化!
加入 [SIG Docs Slack 频道](https://kubernetes.slack.com/messages/sig-docs)
[SIG Docs Localizations Slack 频道](https://kubernetes.slack.com/messages/sig-docs-localizations)。
加入 [SIG Docs Slack 频道](https://kubernetes.slack.com/messages/sig-docs)
[SIG Docs Localizations Slack 频道](https://kubernetes.slack.com/messages/sig-docs-localizations)。
其他本地化团队很乐意帮助你入门并回答你的问题。
<!--
@ -240,7 +241,7 @@ GitHub organization. Each person on the team needs to create their own
[Organization Membership Request](https://github.com/kubernetes/org/issues/new/choose)
in the `kubernetes/org` repository.
-->
### 加入到 Kubernetes GitHub 组织 {#join-the-kubernetes-github-organization}
### 加入到 Kubernetes GitHub 组织 {#join-the-kubernetes-github-organization}
提交本地化 PR 后,你可以成为 Kubernetes GitHub 组织的成员。
团队中的每个人都需要在 `kubernetes/org`
@ -253,7 +254,14 @@ Next, add your Kubernetes localization team to
[`sig-docs/teams.yaml`](https://github.com/kubernetes/org/blob/main/config/kubernetes/sig-docs/teams.yaml).
For an example of adding a localization team, see the PR to add the
[Spanish localization team](https://github.com/kubernetes/org/pull/685).
-->
### 在 GitHub 中添加你的本地化团队 {#add-your-localization-team-in-github}
接下来,将你的 Kubernetes 本地化团队添加到
[`sig-docs/teams.yaml`](https://github.com/kubernetes/org/blob/main/config/kubernetes/sig-docs/teams.yaml)。
有关添加本地化团队的示例,请参见添加[西班牙本地化团队](https://github.com/kubernetes/org/pull/685)的 PR。
<!--
Members of `@kubernetes/sig-docs-**-owners` can approve PRs that change content
within (and only within) your localization directory: `/content/**/`. For each
localization, The `@kubernetes/sig-docs-**-reviews` team automates review
@ -262,12 +270,6 @@ new localization branches to coordinate translation efforts. Members of
`@kubernetes/website-milestone-maintainers` can use the `/milestone`
[Prow command](https://prow.k8s.io/command-help) to assign a milestone to issues or PRs.
-->
### 在 GitHub 中添加你的本地化团队 {#add-your-localization-team-in-github}
接下来,将你的 Kubernetes 本地化团队添加到
[`sig-docs/teams.yaml`](https://github.com/kubernetes/org/blob/main/config/kubernetes/sig-docs/teams.yaml)。
有关添加本地化团队的示例,请参见添加[西班牙本地化团队](https://github.com/kubernetes/org/pull/685)的 PR。
`@kubernetes/sig-docs-**-owners` 成员可以批准更改对应本地化目录 `/content/**/` 中内容的 PR并仅限这类 PR。
对于每个本地化,`@kubernetes/sig-docs-**-reviews` 团队被自动分派新 PR 的审阅任务。
`@kubernetes/website-maintainers` 成员可以创建新的本地化分支来协调翻译工作。
@ -314,11 +316,16 @@ Kubernetes 网站使用 Hugo 作为其 Web 框架。网站的 Hugo 配置位于
```toml
[languages.de]
title = "Kubernetes"
description = "Produktionsreife Container-Verwaltung"
languageName = "Deutsch (German)"
languageNameLatinScript = "Deutsch"
weight = 5
contentDir = "content/de"
weight = 8
languagedirection = "ltr"
[languages.de.params]
time_format_blog = "02.01.2006"
language_alternatives = ["en"]
description = "Produktionsreife Container-Orchestrierung"
languageNameLatinScript = "Deutsch"
```
<!--
@ -377,24 +384,16 @@ mkdir content/de
```
<!--
You also need to create a directory inside `data/i18n` for
You also need to create a directory inside `i18n` for
[localized strings](#site-strings-in-i18n); look at existing localizations
for an example. To use these new strings, you must also create a symbolic link
from `i18n/<localization>.toml` to the actual string configuration in
`data/i18n/<localization>/<localization>.toml` (remember to commit the symbolic
link).
for an example.
For example, for German the strings live in `data/i18n/de/de.toml`, and
`i18n/de.toml` is a symbolic link to `data/i18n/de/de.toml`.
For example, for German the strings live in `i18n/de/de.toml`.
-->
你还需要在 `data/i18n` 中为[本地化字符串](#site-strings-in-i18n)创建一个目录;
以现有的本地化为例。要使用这些新字符串,
你还必须创建从 `i18n/<localization>.toml`
`data/i18n/<localization>/<localization>.toml`
中实际字符串配置的符号链接(记得提交符号链接关联)。
你还需要在 `i18n` 中为[本地化字符串](#site-strings-in-i18n)创建一个目录;
以现有的本地化为例。
例如,对于德语,字符串位于 `data/i18n/de/de.toml` 中,
`i18n/de.toml` 是指向 `data/i18n/de/de.toml` 的符号链接。
例如,对于德语,字符串位于 `i18n/de/de.toml`
<!--
### Localize the community code of conduct
@ -446,13 +445,25 @@ language code `es`, looks like this:
语言代码为 `es` 的[西班牙语 OWNERS 文件](https://git.k8s.io/website/content/es/OWNERS)看起来像:
<!--
```yaml
# See the OWNERS docs at https://go.k8s.io/owners
# This is the localization project for Spanish.
# Teams and members are visible at https://github.com/orgs/kubernetes/teams.
reviewers:
- sig-docs-es-reviews
approvers:
- sig-docs-es-owners
labels:
- area/localization
- language/es
```
-->
```yaml
# 参见 OWNERS 文档,位于 https://go.k8s.io/owners
# 参见 OWNERS 文档https://go.k8s.io/owners
# 这是西班牙语的本地化项目
# 各团队和成员名单位于 https://github.com/orgs/kubernetes/teams
@ -464,6 +475,7 @@ approvers:
- sig-docs-es-owners
labels:
- area/localization
- language/es
```
@ -479,7 +491,8 @@ in alphabetical order.
-->
添加了特定语言的 OWNERS 文件之后,使用新的 Kubernetes 本地化团队、
`sig-docs-**-owners``sig-docs-**-reviews`
列表更新[根目录下的 OWNERS_ALIAES](https://git.k8s.io/website/OWNERS_ALIASES) 文件。
列表更新[根目录下的 OWNERS_ALIAES](https://git.k8s.io/website/OWNERS_ALIASES)
文件。
对于每个团队,
请按字母顺序添加[在 GitHub 中添加你的本地化团队](#add-your-localization-team-in-github)中所请求的
@ -613,7 +626,7 @@ Releases | [All heading and subheading URLs](/releases)
-----|-----
主页 | [所有标题和副标题网址](/zh-cn/docs/home/)
安装 | [所有标题和副标题网址](/zh-cn/docs/setup/)
教程 | [Kubernetes 基础](/zh-cn/docs/tutorials/kubernetes-basics/), [Hello Minikube](/zh-cn/docs/tutorials/hello-minikube/)
教程 | [Kubernetes 基础](/zh-cn/docs/tutorials/kubernetes-basics/)、[Hello Minikube](/zh-cn/docs/tutorials/hello-minikube/)
网站字符串 | [所有网站字符串](#site-strings-in-i18n)
发行版本 | [所有标题和副标题 URL](/zh-cn/releases)
@ -776,24 +789,24 @@ release: v{{< skew nextMinorVersion >}}.
### Site strings in i18n
Localizations must include the contents of
[`data/i18n/en/en.toml`](https://github.com/kubernetes/website/blob/main/data/i18n/en/en.toml)
[`i18n/en/en.toml`](https://github.com/kubernetes/website/blob/main/i18n/en/en.toml)
in a new language-specific file. Using German as an example:
`data/i18n/de/de.toml`.
`i18n/de/de.toml`.
Add a new localization directory and file to `data/i18n/`. For example, with
Add a new localization directory and file to `i18n/`. For example, with
German (`de`):
-->
### i18n/ 中的网站字符串 {#site-strings-in-i18n}
本地化必须在新的语言特定文件中包含
[`data/i18n/en/en.toml`](https://github.com/kubernetes/website/blob/main/data/i18n/en/en.toml)
的内容。以德语为例:`data/i18n/de/de.toml`。
[`i18n/en/en.toml`](https://github.com/kubernetes/website/blob/main/i18n/en/en.toml)
的内容。以德语为例:`i18n/de/de.toml`。
将新的本地化文件和目录添加到 `data/i18n/`。例如德语(`de`
将新的本地化文件和目录添加到 `i18n/`。例如德语(`de`
```bash
mkdir -p data/i18n/de
cp data/i18n/en/en.toml data/i18n/de/de.toml
mkdir -p i18n/de
cp i18n/en/en.toml i18n/de/de.toml
```
<!--

View File

@ -1060,7 +1060,7 @@ Here is the `{{</* figure */>}}` shortcode for the diagram defined in an
`.svg` image file saved to `/images/docs/components-of-kubernetes.svg`:
```none
{{</* figure src="/images/docs/components-of-kubernetes.svg" alt="Kubernetes pod running inside a cluster" class="diagram-large" caption="Figure 4. Kubernetes Architecture Components */>}}
{{</* figure src="/images/docs/components-of-kubernetes.svg" alt="Kubernetes pod running inside a cluster" class="diagram-large" caption="Figure 4. Kubernetes Architecture Components" */>}}
```
-->
**图表本身**

View File

@ -105,21 +105,21 @@ You can declare a `prerequisites` heading as follows:
<!--
The `heading` shortcode expects one string parameter.
The heading string parameter matches the prefix of a variable in the `i18n/<lang>.toml` files.
The heading string parameter matches the prefix of a variable in the `i18n/<lang>/<lang>.toml` files.
For example:
-->
短代码 `heading` 需要一个字符串参数。
该字符串参数要与 `i18n/<语言>.toml` 文件中以其为前缀的某个变量匹配。
该字符串参数要与 `i18n/<语言>/<语言>.toml` 文件中以其为前缀的某个变量匹配。
例如:
`i18n/en.toml`:
`i18n/en/en.toml`:
```toml
[whatsnext_heading]
other = "What's next"
```
`i18n/ko.toml`:
`i18n/ko/ko.toml`:
```toml
[whatsnext_heading]

File diff suppressed because one or more lines are too long

After

Width:  |  Height:  |  Size: 36 KiB

View File

@ -143,6 +143,34 @@ other admission controllers.
增加配额用量就是一个典型的示例,说明了这样做的必要性。
此类用法都需要相应的回收或回调过程,因为任一准入控制器都无法确定某个请求能否通过所有其它准入控制器。
<!--
The ordering of these calls can be seen below.
-->
这些调用的顺序如下所示。
<!--
Sequence diagram for kube-apiserver handling requests during the admission phase
showing mutation webhooks, followed by validatingadmissionpolicies and finally
validating webhooks. It shows that the continue until the first rejection,
or being accepted by all of them. It also shows that mutations by mutating
webhooks cause all previously called webhooks to be called again.
-->
{{< figure src="/zh-cn/docs/reference/access-authn-authz/admission-control-phases.svg" alt="kube-apiserver 在准入阶段处理请求的时序图,展示了变更性 Webhook随后是验证准入策略ValidatingAdmissionPolicies最后是验证性 Webhook。此时序图表明请求会持续经过这些步骤直到遇到第一个被拒绝的情况或者被所有检查接受。此外此图还显示变更性 Webhook 所做的变更会导致所有之前调用过的 Webhook 被重新调用。" class="diagram-large" link="[https://mermaid.live/edit#pako:eNqtVm1r3DgQ_iuDj9CUc3aPlBa6HIFeSu_CEQhNr4XiL7I9a6srSz5J3mQb9r93RrK9jjcp9-H8xdZoXh7N80jyQ1KYEpNV4vDfDnWB76WorGgynemTE_hLbBG8AYce1kb7W_kdoVImF0rtQDjwtXQgnX7hwaJrsfBYQtmFoNr71q2Wy0r6ussXhWmWDdpGyPLsmxs-l9K5Dt3y1du3v3HJB6mlXz1kia-xwSxZZYnGzluhsiTNkgEETUCWnJ-392SmrwE-2ym4kdYa-67wxjoyedvhPs000NNn_iysFLlCFyPCVJwWHPXHpgq1f3l1_qbA11x77vIJ7_2lUcYGx7taepy5KWPaqRc8l08bj1Rx4ldZ3M2cnlp6pvf7_ckJsxVdibNPkRKiBkEof-YJAZFnQRQFOidzqaTfpSB0Ca42nSohR-jaUjB3uEW7Ay8bDAnKKAfKt4gFKMl7dIWd9uy2b_7ozdU2XY5nopUOLaWEmsopqSuSCTk770gllscBZtmQDKTR0NbCIcO647mm88Kz-Q7z2piNSym1UuaOgOY72AolCTV5jglao2Qh0YXVraUOOj34jYkWcIB_5UNB7pjwAU9BrZaaVNzRWwXTWlrHGv9GEqc6KdASc-SU3NbWR0RUDsyaA5pZBaGcmZYZluY4LA4m8KAQncOQrrW4laZztI6CxlRndKI9Rsz1VlEJqXuS9oMcWmE99aMV2sM_xARv2fA-nn53c8WzfxNtVqOnFrLlNrD3hHfna3bnN1KTisjTr8FgrPwexqMmH4WWzaW3KkSPvF9Sx61RMSA39_Anrcblxho49oLfc3txGZcdGZqxc4z3uu_wl9g7Lj6YoLedupfHcZ9H6dyYAPlgmOC66VX3s_hJ5UmOeW3U5WEzB6bOLi4CEyv4GHcOnOKiWqRQWKQdCwJaU77sCWXHEEAsrKbkkJQD_bQruHlFjcUmmlo6h-My3FCXzy34wCcG6W_eJneQdRABl5t1dwVXems2-LPYOSEH1NemlOsd76_IJ5g8vE7lGjRiieW0V0d4J819TMuI9hGnI9Zn4x5L4IDz439ER3J4CtzQEpCaXVjN6lmg88Y-kef_ATvWJiWRgPisnTDRn92DToLa2JmFyjVcSypCGBTqunDjcALk-5iKJWnSX_z0zxGukMNNT5-lsJtwq5Gf6Ly53ekiXt9pYk1X1clqTScpjeJ91f-tjFYsJd3M1_GXJvzZpAntw6_GDD77H6uICLI](https://mermaid.live/edit#pako:eNqtVm1r3DgQ_iuDj9CUc3aPlBa6HIFeSu_CEQhNr4XiL7I9a6srSz5J3mQb9r93RrK9jjcp9-H8xdZoXh7N80jyQ1KYEpNV4vDfDnWB76WorGgynemTE_hLbBG8AYce1kb7W_kdoVImF0rtQDjwtXQgnX7hwaJrsfBYQtmFoNr71q2Wy0r6ussXhWmWDdpGyPLsmxs-l9K5Dt3y1du3v3HJB6mlXz1kia-xwSxZZYnGzluhsiTNkgEETUCWnJ-392SmrwE-2ym4kdYa-67wxjoyedvhPs000NNn_iysFLlCFyPCVJwWHPXHpgq1f3l1_qbA11x77vIJ7_2lUcYGx7taepy5KWPaqRc8l08bj1Rx4ldZ3M2cnlp6pvf7_ckJsxVdibNPkRKiBkEof-YJAZFnQRQFOidzqaTfpSB0Ca42nSohR-jaUjB3uEW7Ay8bDAnKKAfKt4gFKMl7dIWd9uy2b_7ozdU2XY5nopUOLaWEmsopqSuSCTk770gllscBZtmQDKTR0NbCIcO647mm88Kz-Q7z2piNSym1UuaOgOY72AolCTV5jglao2Qh0YXVraUOOj34jYkWcIB_5UNB7pjwAU9BrZaaVNzRWwXTWlrHGv9GEqc6KdASc-SU3NbWR0RUDsyaA5pZBaGcmZYZluY4LA4m8KAQncOQrrW4laZztI6CxlRndKI9Rsz1VlEJqXuS9oMcWmE99aMV2sM_xARv2fA-nn53c8WzfxNtVqOnFrLlNrD3hHfna3bnN1KTisjTr8FgrPwexqMmH4WWzaW3KkSPvF9Sx61RMSA39_Anrcblxho49oLfc3txGZcdGZqxc4z3uu_wl9g7Lj6YoLedupfHcZ9H6dyYAPlgmOC66VX3s_hJ5UmOeW3U5WEzB6bOLi4CEyv4GHcOnOKiWqRQWKQdCwJaU77sCWXHEEAsrKbkkJQD_bQruHlFjcUmmlo6h-My3FCXzy34wCcG6W_eJneQdRABl5t1dwVXems2-LPYOSEH1NemlOsd76_IJ5g8vE7lGjRiieW0V0d4J819TMuI9hGnI9Zn4x5L4IDz439ER3J4CtzQEpCaXVjN6lmg88Y-kef_ATvWJiWRgPisnTDRn92DToLa2JmFyjVcSypCGBTqunDjcALk-5iKJWnSX_z0zxGukMNNT5-lsJtwq5Gf6Ly53ekiXt9pYk1X1clqTScpjeJ91f-tjFYsJd3M1_GXJvzZpAntw6_GDD77H6uICLI)" >}}
<!--
## Why do I need them?
Several important features of Kubernetes require an admission controller to be enabled in order
to properly support the feature. As a result, a Kubernetes API server that is not properly
configured with the right set of admission controllers is an incomplete server and will not
support all the features you expect.
-->
## 为什么需要准入控制器? {#why-do-i-need-them}
Kubernetes 的多个重要特性需要按顺序启用某个准入控制器才能正确支持对应的特性。
因此,如果 Kubernetes API 服务器未正确配置相应的准入控制器集,
那么这种 API 服务器将是不完整的,并且无法支持你所期望的所有特性。
<!--
## How do I turn on an admission controller?
@ -377,14 +405,20 @@ will get the default one.
这样,没有任何特殊存储类需求的用户根本不需要关心它们,它们将被设置为使用默认存储类。
<!--
This admission controller does not do anything when no default storage class is configured. When more than one storage
class is marked as default, it rejects any creation of `PersistentVolumeClaim` with an error and an administrator
must revisit their `StorageClass` objects and mark only one as default.
This admission controller does nothing when no default `StorageClass` exists. When more than one storage
class is marked as default, and you then create a `PersistentVolumeClaim` with no `storageClassName` set,
Kubernetes uses the most recently created default `StorageClass`.
When a `PersistentVolumeClaim` is created with a specified `volumeName`, it remains in a pending state
if the static volume's `storageClassName` does not match the `storageClassName` on the `PersistentVolumeClaim`
after any default StorageClass is applied to it.
This admission controller ignores any `PersistentVolumeClaim` updates; it acts only on creation.
-->
当未配置默认存储类时,此准入控制器不执行任何操作。如果将多个存储类标记为默认存储类,
此控制器将拒绝所有创建 `PersistentVolumeClaim` 的请求,并返回错误信息。
要修复此错误,管理员必须重新检查其 `StorageClass` 对象,并仅将其中一个标记为默认。
当默认的 `StorageClass` 不存在时,此准入控制器不执行任何操作。如果将多个存储类标记为默认存储类,
而且你之后在未设置 `storageClassName` 的情况下创建 `PersistentVolumeClaim`
Kubernetes 将使用最近创建的默认 `StorageClass`
当使用指定的 `volumeName` 创建 `PersistentVolumeClaim` 时,如果在应用任意默认的 StorageClass 之后,
静态卷的 `storageClassName``PersistentVolumeClaim` 上的 `storageClassName` 不匹配,
`PersistentVolumeClaim` 保持在 Pending 状态。
此准入控制器会忽略所有 `PersistentVolumeClaim` 更新操作,仅处理创建操作。
<!--

View File

@ -348,6 +348,21 @@ If true, keep the managedFields when printing objects in JSON or YAML format.
</td>
</tr>
<tr>
<td colspan="2">--subresource string</td>
</tr>
<tr>
<td></td><td style="line-height: 130%; word-wrap: break-word;">
<p>
<!--
If specified, apply will operate on the subresource of the requested object. Only allowed when using --server-side. This flag is beta and may change in the future.
-->
如果指定此参数apply 将对请求对象的子资源进行操作。
仅在使用 --server-side 时允许指定此参数。此标志为 Beta 级别,将来可能会发生变化。
</p>
</td>
</tr>
<tr>
<td colspan="2">--template string</td>
</tr>
@ -378,18 +393,27 @@ The length of time to wait before giving up on a delete, zero means determine a
<td colspan="2">--validate string[="strict"]&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;<!--Default:-->默认值:"strict"</td>
</tr>
<tr>
<td></td><td style="line-height: 130%; word-wrap: break-word;"><p>
<td></td><td style="line-height: 130%; word-wrap: break-word;">
<p>
<!--
Must be one of: strict (or true), warn, ignore (or false).<br/>&quot;true&quot; or &quot;strict&quot; will use a schema to validate the input and fail the request if invalid. It will perform server side validation if ServerSideFieldValidation is enabled on the api-server, but will fall back to less reliable client-side validation if not.<br/>&quot;warn&quot; will warn about unknown or duplicate fields without blocking the request if server-side field validation is enabled on the API server, and behave as &quot;ignore&quot; otherwise.<br/>&quot;false&quot; or &quot;ignore&quot; will not perform any schema validation, silently dropping any unknown or duplicate fields.
Must be one of: strict (or true), warn, ignore (or false).
&quot;true&quot; or &quot;strict&quot; will use a schema to validate the input and
fail the request if invalid. It will perform server side validation if ServerSideFieldValidation
is enabled on the api-server, but will fall back to less reliable client-side validation if not.
&quot;warn&quot; will warn about unknown or duplicate fields without blocking the request if
server-side field validation is enabled on the API server, and behave as &quot;ignore&quot; otherwise.
&quot;false&quot; or &quot;ignore&quot; will not perform any schema validation,
silently dropping any unknown or duplicate fields.
-->
必须是以下选项之一strict或 true、warn、ignore或 false<br/>
"true" 或 "strict" 将使用模式定义来验证输入,如果无效,则请求失败。
必须是以下选项之一strict或 true、warn、ignore或 false
&quot;true&quot;&quot;strict&quot; 将使用模式定义来验证输入,如果无效,则请求失败。
如果在 API 服务器上启用了 ServerSideFieldValidation则执行服务器端验证
但如果未启用,它将回退到可靠性较低的客户端验证。<br/>
如果在 API 服务器上启用了服务器端字段验证,"warn" 将警告未知或重复的字段而不阻止请求,
否则操作与 "ignore" 的表现相同。<br/>
"false" 或 "ignore" 将不会执行任何模式定义检查,而是静默删除所有未知或重复的字段。
</p></td>
但如果未启用此参数API 服务器将回退到可靠性较低的客户端验证。
如果在 API 服务器上启用了服务器端字段验证,&quot;warn&quot; 将警告未知或重复的字段而不阻止请求,
否则操作与 &quot;ignore&quot; 的表现相同。
&quot;false&quot;&quot;ignore&quot; 将不会执行任何模式定义检查,而是静默删除所有未知或重复的字段。
</p>
</td>
</tr>
<tr>

View File

@ -42,11 +42,11 @@ kubectl apply edit-last-applied (RESOURCE/NAME | -f FILENAME)
<!--
```
# Edit the last-applied-configuration annotations by type/name in YAML
kubectl apply edit-last-applied deployment/nginx
# Edit the last-applied-configuration annotations by file in JSON
kubectl apply edit-last-applied -f deploy.yaml -o json
# Edit the last-applied-configuration annotations by type/name in YAML
kubectl apply edit-last-applied deployment/nginx
# Edit the last-applied-configuration annotations by file in JSON
kubectl apply edit-last-applied -f deploy.yaml -o json
```
-->
```shell
@ -184,18 +184,27 @@ Template string or path to template file to use when -o=go-template, -o=go-templ
<td colspan="2">--validate string[="strict"]&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;<!--Default:-->默认值:"strict"</td>
</tr>
<tr>
<td></td><td style="line-height: 130%; word-wrap: break-word;"><p>
<td></td><td style="line-height: 130%; word-wrap: break-word;">
<p>
<!--
Must be one of: strict (or true), warn, ignore (or false).<br/>&quot;true&quot; or &quot;strict&quot; will use a schema to validate the input and fail the request if invalid. It will perform server side validation if ServerSideFieldValidation is enabled on the api-server, but will fall back to less reliable client-side validation if not.<br/>&quot;warn&quot; will warn about unknown or duplicate fields without blocking the request if server-side field validation is enabled on the API server, and behave as &quot;ignore&quot; otherwise.<br/>&quot;false&quot; or &quot;ignore&quot; will not perform any schema validation, silently dropping any unknown or duplicate fields.
Must be one of: strict (or true), warn, ignore (or false).
&quot;true&quot; or &quot;strict&quot; will use a schema to validate the input and
fail the request if invalid. It will perform server side validation if ServerSideFieldValidation
is enabled on the api-server, but will fall back to less reliable client-side validation if not.
&quot;warn&quot; will warn about unknown or duplicate fields without blocking the request if
server-side field validation is enabled on the API server, and behave as &quot;ignore&quot; otherwise.
&quot;false&quot; or &quot;ignore&quot; will not perform any schema validation,
silently dropping any unknown or duplicate fields.
-->
必须是以下选项之一strict或 true、warn、ignore或 false<br/>
"true" 或 "strict" 将使用模式定义来验证输入,如果无效,则请求失败。
必须是以下选项之一strict或 true、warn、ignore或 false
&quot;true&quot;&quot;strict&quot; 将使用模式定义来验证输入,如果无效,则请求失败。
如果在 API 服务器上启用了 ServerSideFieldValidation则执行服务器端验证
但如果未启用,它将回退到可靠性较低的客户端验证。<br/>
如果在 API 服务器上启用了服务器端字段验证,"warn" 将警告未知或重复的字段而不阻止请求,
否则操作与 "ignore" 的表现相同。<br/>
"false" 或 "ignore" 将不会执行任何模式定义检查,而是静默删除所有未知或重复的字段。
</p></td>
但如果未启用此参数,它将回退到可靠性较低的客户端验证。
如果在 API 服务器上启用了服务器端字段验证,&quot;warn&quot; 将警告未知或重复的字段而不阻止请求,
否则操作与 &quot;ignore&quot; 的表现相同。
&quot;false&quot;&quot;ignore&quot; 将不会执行任何模式定义检查,而是静默删除所有未知或重复的字段。
</p>
</td>
</tr>
<tr>
@ -207,7 +216,8 @@ Must be one of: strict (or true), warn, ignore (or false).<br/>&quot;true&quot;
Only relevant if --edit=true. Defaults to the line ending native to your platform.
-->
仅在 --edit=true 时起作用。默认为你所用平台本地的行结尾格式。
</p></td>
</p>
</td>
</tr>
</tbody>

View File

@ -228,15 +228,22 @@ Template string or path to template file to use when -o=go-template, -o=go-templ
<td></td><td style="line-height: 130%; word-wrap: break-word;">
<p>
<!--
Must be one of: strict (or true), warn, ignore (or false).<br/>&quot;true&quot; or &quot;strict&quot; will use a schema to validate the input and fail the request if invalid. It will perform server side validation if ServerSideFieldValidation is enabled on the api-server, but will fall back to less reliable client-side validation if not.<br/>&quot;warn&quot; will warn about unknown or duplicate fields without blocking the request if server-side field validation is enabled on the API server, and behave as &quot;ignore&quot; otherwise.<br/>&quot;false&quot; or &quot;ignore&quot; will not perform any schema validation, silently dropping any unknown or duplicate fields.
Must be one of: strict (or true), warn, ignore (or false).
&quot;true&quot; or &quot;strict&quot; will use a schema to validate the input and
fail the request if invalid. It will perform server side validation if ServerSideFieldValidation
is enabled on the api-server, but will fall back to less reliable client-side validation if not.
&quot;warn&quot; will warn about unknown or duplicate fields without blocking the request if
server-side field validation is enabled on the API server, and behave as &quot;ignore&quot; otherwise.
&quot;false&quot; or &quot;ignore&quot; will not perform any schema validation,
silently dropping any unknown or duplicate fields.
-->
必须是以下选项之一strict或 true、warn、ignore或 false<br/>
"true" 或 "strict" 将使用模式定义来验证输入,如果无效,则请求失败。
必须是以下选项之一strict或 true、warn、ignore或 false
&quot;true&quot;&quot;strict&quot; 将使用模式定义来验证输入,如果无效,则请求失败。
如果在 API 服务器上启用了 ServerSideFieldValidation则执行服务器端验证
但如果未启用,它将回退到可靠性较低的客户端验证。<br/>
如果在 API 服务器上启用了服务器端字段验证,"warn" 将警告未知或重复的字段而不阻止请求,
否则操作与 "ignore" 的表现相同。<br/>
"false" 或 "ignore" 将不会执行任何模式定义检查,而是静默删除所有未知或重复的字段。
但如果未启用此参数,它将回退到可靠性较低的客户端验证。
如果在 API 服务器上启用了服务器端字段验证,&quot;warn&quot; 将警告未知或重复的字段而不阻止请求,
否则操作与 &quot;ignore&quot; 的表现相同。
&quot;false&quot;&quot;ignore&quot; 将不会执行任何模式定义检查,而是静默删除所有未知或重复的字段。
</p>
</td>
</tr>

View File

@ -2,51 +2,62 @@
title: JSONPath 支持
content_type: concept
weight: 40
math: true
---
<!--
title: JSONPath Support
content_type: concept
weight: 40
math: true
-->
<!-- overview -->
<!--
Kubectl supports JSONPath template.
-->
kubectl 支持 JSONPath 模板。
<!--
The {{< glossary_tooltip term_id="kubectl" text="kubectl" >}} tool supports JSONPath templates as an output format.
-->
{{< glossary_tooltip term_id="kubectl" text="kubectl" >}}
工具支持 JSONPath 模板作为输出格式。
<!-- body -->
<!--
JSONPath template is composed of JSONPath expressions enclosed by curly braces {}.
A _JSONPath template_ is composed of JSONPath expressions enclosed by curly braces: `{` and `}`.
Kubectl uses JSONPath expressions to filter on specific fields in the JSON object and format the output.
In addition to the original JSONPath template syntax, the following functions and syntax are valid:
-->
JSONPath 模板由 {} 包起来的 JSONPath 表达式组成。Kubectl 使用 JSONPath 表达式来过滤 JSON 对象中的特定字段并格式化输出。
**JSONPath 模板**由大括号 `{``}` 包起来的 JSONPath 表达式组成。
kubectl 使用 JSONPath 表达式来过滤 JSON 对象中的特定字段并格式化输出。
除了原始的 JSONPath 模板语法,以下函数和语法也是有效的:
<!--
1. Use double quotes to quote text inside JSONPath expressions.
2. Use the `range`, `end` operators to iterate lists.
3. Use negative slice indices to step backwards through a list. Negative indices do not "wrap around" a list and are valid as long as `-index + listLength >= 0`.
3. Use negative slice indices to step backwards through a list.
Negative indices do _not_ "wrap around" a list and are valid as long as \\( ( - index + listLength ) \ge 0 \\).
-->
1. 使用双引号将 JSONPath 表达式内的文本引起来。
2. 使用 `range``end` 运算符来迭代列表。
3. 使用负片索引后退列表。负索引不会“环绕”列表,并且只要 `-index + listLength> = 0` 就有效。
3. 使用负片索引后退列表。负索引**不会**“环绕”列表,
并且只要 \\( ( - index + listLength ) \ge 0 \\) 就有效。
{{< note >}}
<!--
- The `$` operator is optional since the expression always starts from the root object by default.
- The result object is printed as its String() function.
- The result object is printed as its `String()` function.
-->
- `$` 运算符是可选的,因为默认情况下表达式总是从根对象开始。
- 结果对象将作为其 String() 函数输出。
- 结果对象将作为其 `String()` 函数输出。
{{< /note >}}
<!--
## Functions in Kubernetes JSONPath {#functions}
-->
## Kubernetes JSONPath 中的函数 {#functions}
<!--
Given the JSON input:
-->
@ -95,33 +106,42 @@ Given the JSON input:
```
<!--
Function | Description | Example | Result
--------------------|------------------------------|-----------------------------------------------------------------|------------------
`text` | the plain text | `kind is {.kind}` | `kind is List`
`@` | the current object | `{@}` | the same as input
`.` or `[]` | child operator | `{.kind}`, `{['kind']}` or `{['name\.type']}` | `List`
`..` | recursive descent | `{..name}` | `127.0.0.1 127.0.0.2 myself e2e`
`*` | wildcard. Get all objects | `{.items[*].metadata.name}` | `[127.0.0.1 127.0.0.2]`
`[start:end:step]` | subscript operator | `{.users[0].name}` | `myself`
`[,]` | union operator | `{.items[*]['metadata.name', 'status.capacity']}` | `127.0.0.1 127.0.0.2 map[cpu:4] map[cpu:8]`
`?()` | filter | `{.users[?(@.name=="e2e")].user.password}` | `secret`
`range`, `end` | iterate list | `{range .items[*]}[{.metadata.name}, {.status.capacity}] {end}` | `[127.0.0.1, map[cpu:4]] [127.0.0.2, map[cpu:8]]`
`''` | quote interpreted string | `{range .items[*]}{.metadata.name}{'\t'}{end}` | `127.0.0.1 127.0.0.2`
`\` | escape termination character | `{.items[0].metadata.labels.kubernetes\.io/hostname}` | `127.0.0.1`
{{< table caption="Functions, their parameters, an example invocation, and the result" >}}
Function | Description | Example | Result
---------|-------------|---------|-------
`text` | the plain text | `kind is {.kind}` | `kind is List`
`@` | the current object | `{@}` | the same as input
`.` or `[]` | child operator | `{.kind}`, `{['kind']}` or `{['name\.type']}` | `List`
`..` | recursive descent | `{..name}` | `127.0.0.1 127.0.0.2 myself e2e`
`*` | wildcard. Get all objects | `{.items[*].metadata.name}` | `[127.0.0.1 127.0.0.2]`
`[start:end:step]` | subscript operator | `{.users[0].name}` | `myself`
`[,]` | union operator | `{.items[*]['metadata.name', 'status.capacity']}` | `127.0.0.1 127.0.0.2 map[cpu:4] map[cpu:8]`
`?()` | filter | `{.users[?(@.name=="e2e")].user.password}` | `secret`
`range`, `end` | iterate list | `{range .items[*]}[{.metadata.name}, {.status.capacity}] {end}` | `[127.0.0.1, map[cpu:4]] [127.0.0.2, map[cpu:8]]`
`''` | quote interpreted string | `{range .items[*]}{.metadata.name}{'\t'}{end}` | `127.0.0.1 127.0.0.2`
`\` | escape termination character | `{.items[0].metadata.labels.kubernetes\.io/hostname}` | `127.0.0.1`
{{< /table >}}
-->
函数 | 描述 | 示例 | 结果
--------------------|--------------------------|-----------------------------------------------------------------|------------------
`text` | 纯文本 | `kind is {.kind}` | `kind is List`
`@` | 当前对象 | `{@}` | 与输入相同
`.``[]` | 子运算符 | `{.kind}`、`{['kind']}` 或 `{['name\.type']}` | `List`
`..` | 递归下降 | `{..name}` | `127.0.0.1 127.0.0.2 myself e2e`
`*` | 通配符。获取所有对象 | `{.items[*].metadata.name}` | `[127.0.0.1 127.0.0.2]`
`[start:end:step]` | 下标运算符 | `{.users[0].name}` | `myself`
`[,]` | 并集运算符 | `{.items[*]['metadata.name', 'status.capacity']}` | `127.0.0.1 127.0.0.2 map[cpu:4] map[cpu:8]`
`?()` | 过滤 | `{.users[?(@.name=="e2e")].user.password}` | `secret`
`range``end` | 迭代列表 | `{range .items[*]}[{.metadata.name}, {.status.capacity}] {end}` | `[127.0.0.1, map[cpu:4]] [127.0.0.2, map[cpu:8]]`
`''` | 引用解释执行字符串 | `{range .items[*]}{.metadata.name}{'\t'}{end}` | `127.0.0.1 127.0.0.2`
`\` | 转义终止符 | `{.items[0].metadata.labels.kubernetes\.io/hostname}` | `127.0.0.1`
{{< table caption="函数、相关参数、调用示例和结果" >}}
函数 | 描述 | 示例 | 结果
----|-----|------|----
`text` | 纯文本 | `kind is {.kind}` | `kind is List`
`@` | 当前对象 | `{@}` | 与输入相同
`.``[]` | 子运算符 | `{.kind}`、`{['kind']}` 或 `{['name\.type']}` | `List`
`..` | 递归下降 | `{..name}` | `127.0.0.1 127.0.0.2 myself e2e`
`*` | 通配符。获取所有对象 | `{.items[*].metadata.name}` | `[127.0.0.1 127.0.0.2]`
`[start:end:step]` | 下标运算符 | `{.users[0].name}` | `myself`
`[,]` | 并集运算符 | `{.items[*]['metadata.name', 'status.capacity']}` | `127.0.0.1 127.0.0.2 map[cpu:4] map[cpu:8]`
`?()` | 过滤 | `{.users[?(@.name=="e2e")].user.password}` | `secret`
`range``end` | 迭代列表 | `{range .items[*]}[{.metadata.name}, {.status.capacity}] {end}` | `[127.0.0.1, map[cpu:4]] [127.0.0.2, map[cpu:8]]`
`''` | 引用解释执行字符串 | `{range .items[*]}{.metadata.name}{'\t'}{end}` | `127.0.0.1 127.0.0.2`
`\` | 转义终止符 | `{.items[0].metadata.labels.kubernetes\.io/hostname}` | `127.0.0.1`
{{< /table >}}
<!--
## Using JSONPath expressions with kubectl {#use-with-kubectl}
-->
## 通过 kubectl 使用 JSONPath 表达式 {#use-with-kubectl}
<!--
Examples using `kubectl` and JSONPath expressions:
@ -140,26 +160,27 @@ kubectl get pods -o=jsonpath='{.items[0].metadata.labels.kubernetes\.io/hostname
{{< note >}}
<!--
On Windows, you must _double_ quote any JSONPath template that contains spaces (not single quote as shown above for bash).
This in turn means that you must use a single quote or escaped double quote around any literals in the template. For example:
On Windows, you must _double_ quote any JSONPath template that contains spaces (not single quote as shown above for bash). This in turn means that you must use a single quote or escaped double quote around any literals in the template. For example:
-->
在 Windows 上,对于任何包含空格的 JSONPath 模板,你必须使用**双**引号(不是上面 bash 所示的单引号)。
反过来,这意味着你必须在模板中的所有文字周围使用单引号或转义的双引号。例如:
```cmd
kubectl get pods -o=jsonpath="{range .items[*]}{.metadata.name}{'\t'}{.status.startTime}{'\n'}{end}"
kubectl get pods -o=jsonpath="{range .items[*]}{.metadata.name}{\"\t\"}{.status.startTime}{\"\n\"}{end}"
```
-->
在 Windows 上,对于任何包含空格的 JSONPath 模板,你必须使用双引号(不是上面 bash 所示的单引号)。
反过来,这意味着你必须在模板中的所有文字周围使用单引号或转义的双引号。例如:
```cmd
C:\> kubectl get pods -o=jsonpath="{range .items[*]}{.metadata.name}{'\t'}{.status.startTime}{'\n'}{end}"
C:\> kubectl get pods -o=jsonpath="{range .items[*]}{.metadata.name}{\"\t\"}{.status.startTime}{\"\n\"}{end}"
```
{{< /note >}}
<!--
JSONPath regular expressions are not supported. If you want to match using regular expressions, you can use a tool such as `jq`.
## Regular expressions in JSONPath
JSONPath regular expressions are not supported. If you want to match using regular expressions, you can use a tool such as `jq`.
-->
## JSONPath 中的正则表达式 {#regular-expression-in-jsonpath}
不支持 JSONPath 正则表达式。如需使用正则表达式进行匹配操作,你可以使用如 `jq` 之类的工具。
<!--
```shell
# kubectl does not support regular expressions for JSONpath output
# The following command does not work
@ -169,9 +190,6 @@ kubectl get pods -o jsonpath='{.items[?(@.metadata.name=~/^test$/)].metadata.nam
kubectl get pods -o json | jq -r '.items[] | select(.metadata.name | test("test-")).metadata.name'
```
-->
{{< note >}}
不支持 JSONPath 正则表达式。如需使用正则表达式进行匹配操作,你可以使用如 `jq` 之类的工具。
```shell
# kubectl 的 JSONpath 输出不支持正则表达式
# 下面的命令不会生效
@ -180,4 +198,3 @@ kubectl get pods -o jsonpath='{.items[?(@.metadata.name=~/^test$/)].metadata.nam
# 下面的命令可以获得所需的结果
kubectl get pods -o json | jq -r '.items[] | select(.metadata.name | test("test-")).metadata.name'
```
{{< /note >}}

View File

@ -101,8 +101,9 @@ In this example, the kubelet is configured with the following settings:
2. `port`kubelet 将在 `20250` 端口上提供服务。
3. `serializeImagePulls`:并行拉取镜像。
4. `evictionHard`kubelet 将在以下情况之一驱逐 Pod
- 当节点的可用内存降至 100MiB 以下时。
- 当节点主文件系统的已使用 inode 超过 95%
- 当节点主文件系统的可用空间小于 10% 时
- 当镜像文件系统的可用空间小于 15% 时。
- 当节点主文件系统的 inode 超过 95% 正在使用时。
@ -113,8 +114,8 @@ evictionHard, the default values of other parameters will not be inherited and
will be set to zero. In order to provide custom values, you should provide all
the threshold values respectively.
-->
在示例中,通过只更改 evictionHard 的一个参数的默认值,
其他参数的默认值将不会被继承,他们会被设置为零。如果要提供自定义值,你应该分别设置所有阈值。
示例中,只更改 evictionHard 的一个参数的默认值,
这样其他参数的默认值将不会被继承,其他参数会被设置为零。如果要提供自定义值,你应该分别设置所有阈值。
{{< /note >}}
<!--
@ -134,7 +135,7 @@ If you use kubeadm to initialize your cluster, use the kubelet-config while crea
See [configuring kubelet using kubeadm](/docs/setup/production-environment/tools/kubeadm/kubelet-integration/) for details.
-->
如果你使用 kubeadm 初始化你的集群,在使用 `kubeadm init` 创建你的集群的时候请使用 kubelet-config。
更多细节请阅读[使用 kubeadm 配置 kubelet](/zh-cn/docs/setup/production-environment/tools/kubeadm/kubelet-integration/)
更多细节请阅读[使用 kubeadm 配置 kubelet](/zh-cn/docs/setup/production-environment/tools/kubeadm/kubelet-integration/)
{{< /note >}}
<!--
@ -379,7 +380,8 @@ they can follow these steps to inspect the kubelet configuration:
"imagefs.available": "15%",
"memory.available": "100Mi",
"nodefs.available": "10%",
"nodefs.inodesFree": "5%"
"nodefs.inodesFree": "5%",
"imagefs.inodesFree": "5%"
},
"evictionPressureTransitionPeriod": "1m0s",
"enableControllerAttachDetach": true,

View File

@ -1,11 +0,0 @@
# See the OWNERS docs at https://go.k8s.io/owners
# This is the directory for all source content.
# Teams and members are visible at https://github.com/orgs/kubernetes/teams.
reviewers:
- sig-docs-localization-reviewers
approvers:
- sig-docs-localization-owners
- sig-docs-website-owners

View File

@ -293,7 +293,6 @@ languageName = "English"
# Weight used for sorting.
weight = 1
languagedirection = "ltr"
i18nDir = "./data/i18n"
[languages.en.params]
description = "Production-Grade Container Orchestration"
@ -517,3 +516,75 @@ languagedirection = "ltr"
languageNameLatinScript = "Tiếng Việt"
description = "Giải pháp điều phối container trong môi trường production"
# Ensure that the correct file names are in the correct directory. For e.g. i18n/en should contain en.toml and never de.toml
[[module.mounts]]
source = "i18n/bn"
target = "i18n"
[[module.mounts]]
source = "i18n/de"
target = "i18n"
[[module.mounts]]
source = "i18n/en"
target = "i18n"
[[module.mounts]]
source = "i18n/es"
target = "i18n"
[[module.mounts]]
source = "i18n/fr"
target = "i18n"
[[module.mounts]]
source = "i18n/hi"
target = "i18n"
[[module.mounts]]
source = "i18n/id"
target = "i18n"
[[module.mounts]]
source = "i18n/it"
target = "i18n"
[[module.mounts]]
source = "i18n/ja"
target = "i18n"
[[module.mounts]]
source = "i18n/ko"
target = "i18n"
[[module.mounts]]
source = "i18n/nl"
target = "i18n"
[[module.mounts]]
source = "i18n/no"
target = "i18n"
[[module.mounts]]
source = "i18n/pl"
target = "i18n"
[[module.mounts]]
source = "i18n/pt-br"
target = "i18n"
[[module.mounts]]
source = "i18n/ru"
target = "i18n"
[[module.mounts]]
source = "i18n/uk"
target = "i18n"
[[module.mounts]]
source = "i18n/vi"
target = "i18n"
[[module.mounts]]
source = "i18n/zh-cn"
target = "i18n"

View File

@ -1,2 +1,11 @@
# No owner overrides here
# See data/i18n/*/OWNERS instead
# See the OWNERS docs at https://go.k8s.io/owners
# This is the directory for all source content.
# Teams and members are visible at https://github.com/orgs/kubernetes/teams.
reviewers:
- sig-docs-localization-reviewers
approvers:
- sig-docs-localization-owners
- sig-docs-website-owners

View File

@ -1 +0,0 @@
../data/i18n/bn/bn.toml

View File

@ -1 +0,0 @@
../data/i18n/de/de.toml

View File

@ -1 +0,0 @@
../data/i18n/en/en.toml

View File

@ -62,6 +62,9 @@ other = "GitHub"
# Also cover [community_learn] if localizing this site
[community_linkedin_name]
other = "LinkedIn"
[community_server_fault_name]
other = "Server Fault"
@ -664,4 +667,4 @@ other = "Warning:"
other = "Warning"
[whatsnext_heading]
other = "What's next"
other = "What's next"

View File

@ -1 +0,0 @@
../data/i18n/es/es.toml

View File

@ -1 +0,0 @@
../data/i18n/fr/fr.toml

View File

@ -1 +0,0 @@
../data/i18n/hi/hi.toml

View File

@ -1 +0,0 @@
../data/i18n/id/id.toml

View File

@ -1 +0,0 @@
../data/i18n/it/it.toml

View File

@ -1 +0,0 @@
../data/i18n/ja/ja.toml

View File

@ -1 +0,0 @@
../data/i18n/ko/ko.toml

View File

@ -1 +0,0 @@
../data/i18n/nl/nl.toml

View File

@ -1 +0,0 @@
../data/i18n/no/no.toml

View File

@ -1 +0,0 @@
../data/i18n/pl/pl.toml

View File

@ -1 +0,0 @@
../data/i18n/pt-br/pt-br.toml

View File

@ -1 +0,0 @@
../data/i18n/ru/ru.toml

View File

@ -1 +0,0 @@
../data/i18n/uk/uk.toml

View File

@ -1 +0,0 @@
../data/i18n/vi/vi.toml

View File

@ -1 +0,0 @@
../data/i18n/zh-cn/zh-cn.toml

Some files were not shown because too many files have changed in this diff Show More