Merge pull request #43789 from drewhagen/merged-main-dev-1.29
Merged main into dev 1.29 branchpull/43801/head
commit
2de5b8f32a
|
@ -997,6 +997,16 @@ div.alert > em.javascript-required {
|
|||
#bing-results-container {
|
||||
padding: 1em;
|
||||
}
|
||||
.bing-result {
|
||||
margin-bottom: 1em;
|
||||
}
|
||||
.bing-result-url {
|
||||
font-size: 14px;
|
||||
}
|
||||
.bing-result-snippet {
|
||||
color: #666666;
|
||||
font-size: 14px;
|
||||
}
|
||||
#bing-pagination-container {
|
||||
padding: 1em;
|
||||
margin-bottom: 1em;
|
||||
|
|
|
@ -4,6 +4,7 @@ abstract: "Automatisierte Bereitstellung, Skalierung und Verwaltung von Containe
|
|||
cid: home
|
||||
---
|
||||
|
||||
{{< site-searchbar >}}
|
||||
|
||||
{{< blocks/section id="oceanNodes" >}}
|
||||
{{% blocks/feature image="flower" %}}
|
||||
|
|
|
@ -5,7 +5,18 @@ date: 2021-07-15
|
|||
slug: sig-usability-spotlight-2021
|
||||
---
|
||||
|
||||
**Author:** Kunal Kushwaha, Civo
|
||||
**Author:** Kunal Kushwaha (Civo)
|
||||
|
||||
{{< note >}}
|
||||
SIG Usability, which is featured in this Spotlight blog, has been deprecated and is no longer active.
|
||||
As a result, the links and information provided in this blog post may no longer be valid or relevant.
|
||||
Should there be renewed interest and increased participation in the future, the SIG may be revived.
|
||||
However, as of August 2023 the SIG is inactive per the Kubernetes community policy.
|
||||
The Kubernetes project encourages you to explore other
|
||||
[SIGs](https://github.com/kubernetes/community/blob/master/sig-list.md#special-interest-groups)
|
||||
and resources available on the Kubernetes website to stay up-to-date with the latest developments
|
||||
and enhancements in Kubernetes.
|
||||
{{< /note >}}
|
||||
|
||||
## Introduction
|
||||
|
||||
|
|
|
@ -32,7 +32,6 @@ including manual cleanup based on the time a volume was last used or producing a
|
|||
|
||||
Provided you've enabled the feature gate (see [How to use it](#how-to-use-it), the new `.status.lastPhaseTransitionTime` field of a PersistentVolume (PV)
|
||||
is updated every time that PV transitions from one phase to another.
|
||||
``
|
||||
Whether it's transitioning from `Pending` to `Bound`, `Bound` to `Released`, or any other phase transition, the `lastPhaseTransitionTime` will be recorded.
|
||||
For newly created PVs the phase will be set to `Pending` and the `lastPhaseTransitionTime` will be recorded as well.
|
||||
|
||||
|
|
File diff suppressed because one or more lines are too long
After Width: | Height: | Size: 176 KiB |
|
@ -0,0 +1,203 @@
|
|||
---
|
||||
layout: blog
|
||||
title: "Introducing ingress2gateway; Simplifying Upgrades to Gateway API"
|
||||
date: 2023-10-25T10:00:00-08:00
|
||||
slug: introducing-ingress2gateway
|
||||
---
|
||||
|
||||
***Authors:*** Lior Lieberman (Google), Kobi Levi (independent)
|
||||
|
||||
Today we are releasing [ingress2gateway](https://github.com/kubernetes-sigs/ingress2gateway), a tool
|
||||
that can help you migrate from [Ingress](/docs/concepts/services-networking/ingress/) to [Gateway
|
||||
API](https://gateway-api.sigs.k8s.io). Gateway API is just weeks away from graduating to GA, if you
|
||||
haven't upgraded yet, now's the time to think about it!
|
||||
|
||||
|
||||
## Background
|
||||
|
||||
In the ever-evolving world of Kubernetes, networking plays a pivotal role. As more applications are
|
||||
deployed in Kubernetes clusters, effective exposure of these services to clients becomes a critical
|
||||
concern. If you've been working with Kubernetes, you're likely familiar with the [Ingress API],
|
||||
which has been the go-to solution for managing external access to services.
|
||||
|
||||
[Ingress API]:/docs/concepts/services-networking/ingress/
|
||||
|
||||
The Ingress API provides a way to route external traffic to your applications within the cluster,
|
||||
making it an indispensable tool for many Kubernetes users. Ingress has its limitations however, and
|
||||
as applications become more complex and the demands on your Kubernetes clusters increase, these
|
||||
limitations can become bottlenecks.
|
||||
|
||||
Some of the limitations are:
|
||||
|
||||
- **Insufficient common denominator** - by attempting to establish a common denominator for various
|
||||
HTTP proxies, Ingress can only accommodate basic HTTP routing, forcing more features of
|
||||
contemporary proxies like traffic splitting and header matching into provider-specific,
|
||||
non-transferable annotations.
|
||||
- **Inadequate permission model** - Ingress spec configures both infrastructure and application
|
||||
configuration in one object. With Ingress, the cluster operator and application developer operate
|
||||
on the same Ingress object without being aware of each other’s roles. This creates an insufficient
|
||||
role-based access control and has high potential for setup errors.
|
||||
- **Lack of protocol diversity** - Ingress primarily focuses on HTTP(S) routing and does not provide
|
||||
native support for other protocols, such as TCP, UDP and gRPC. This limitation makes it less
|
||||
suitable for handling non-HTTP workloads.
|
||||
|
||||
## Gateway API
|
||||
|
||||
To overcome this, Gateway API is designed to provide a more flexible, extensible, and powerful way
|
||||
to manage traffic to your services.
|
||||
|
||||
Gateway API is just weeks away from a GA (General Availability) release. It provides a standard
|
||||
Kubernetes API for ingress traffic control. It offers extended functionality, improved
|
||||
customization, and greater flexibility. By focusing on modular and expressive API resources, Gateway
|
||||
API makes it possible to describe a wider array of routing configurations and models.
|
||||
|
||||
The transition from Ingress API to Gateway API in Kubernetes is driven by advantages and advanced
|
||||
functionalities that Gateway API offers, with its foundation built on four core principles: a
|
||||
role-oriented approach, portability, expressiveness and extensibility.
|
||||
|
||||
### A role-oriented approach
|
||||
|
||||
Gateway API employs a role-oriented approach that aligns with the conventional roles within
|
||||
organizations involved in configuring Kubernetes service networking. This approach enables
|
||||
infrastructure engineers, cluster operators, and application developers to collectively address
|
||||
different aspects of Gateway API.
|
||||
|
||||
For instance, infrastructure engineers play a pivotal role in deploying GatewayClasses,
|
||||
cluster-scoped resources that act as templates to explicitly define behavior for Gateways derived
|
||||
from them, laying the groundwork for robust service networking.
|
||||
|
||||
Subsequently, cluster operators utilize these GatewayClasses to deploy gateways. A Gateway in
|
||||
Kubernetes' Gateway API defines how external traffic can be directed to Services within the cluster,
|
||||
essentially bridging non-Kubernetes sources to Kubernetes-aware destinations. It represents a
|
||||
request for a load balancer configuration aligned with a GatewayClass’ specification. The Gateway
|
||||
spec may not be exhaustive as some details can be supplied by the GatewayClass controller, ensuring
|
||||
portability. Additionally, a Gateway can be linked to multiple Route references to channel specific
|
||||
traffic subsets to designated services.
|
||||
|
||||
Lastly, application developers configure route resources (such as HTTPRoutes), to manage
|
||||
configuration (e.g. timeouts, request matching/filter) and Service composition (e.g. path routing to
|
||||
backends) Route resources define protocol-specific rules for mapping requests from a Gateway to
|
||||
Kubernetes Services. HTTPRoute is for multiplexing HTTP or terminated HTTPS connections. It's
|
||||
intended for use in cases where you want to inspect the HTTP stream and use HTTP request data for
|
||||
either routing or modification, for example using HTTP Headers for routing, or modifying them
|
||||
in-flight.
|
||||
|
||||
{{< figure src="gateway-api-resources.svg" alt="Diagram showing the key resources that make up Gateway API and how they relate to each other. The resources shown are GatewayClass, Gateway, and HTTPRoute; the Service API is also shown" class="diagram-medium" >}}
|
||||
|
||||
### Portability
|
||||
|
||||
With more than 20 [API
|
||||
implementations](https://gateway-api.sigs.k8s.io/implementations/#implementations), Gateway API is
|
||||
designed to be more portable across different implementations, clusters and environments. It helps
|
||||
reduce Ingress' reliance on non-portable, provider-specific annotations, making your configurations
|
||||
more consistent and easier to manage across multiple clusters.
|
||||
|
||||
Gateway API commits to supporting the 5 latest Kubernetes minor versions. That means that Gateway
|
||||
API currently supports Kubernetes 1.24+.
|
||||
|
||||
### Expressiveness
|
||||
|
||||
Gateway API provides standard, Kubernetes-backed support for a wide range of features, such as
|
||||
header-based matching, traffic splitting, weight-based routing, request mirroring and more. With
|
||||
Ingress, these features need custom provider-specific annotations.
|
||||
|
||||
### Extensibility
|
||||
|
||||
Gateway API is designed with extensibility as a core feature. Rather than enforcing a
|
||||
one-size-fits-all model, it offers the flexibility to link custom resources at multiple layers
|
||||
within the API's framework. This layered approach to customization ensures that users can tailor
|
||||
configurations to their specific needs without overwhelming the main structure. By doing so, Gateway
|
||||
API facilitates more granular and context-sensitive adjustments, allowing for a fine-tuned balance
|
||||
between standardization and adaptability. This becomes particularly valuable in complex cloud-native
|
||||
environments where specific use cases require nuanced configurations. A critical difference is that
|
||||
Gateway API has a much broader base set of features and a standard pattern for extensions that can
|
||||
be more expressive than annotations were on Ingress.
|
||||
|
||||
|
||||
## Upgrading to Gateway
|
||||
|
||||
Migrating from Ingress to Gateway API may seem intimidating, but luckily Kubernetes just released a
|
||||
tool to simplify the process. [ingress2gateway](https://github.com/kubernetes-sigs/ingress2gateway)
|
||||
assists in the migration by converting your existing Ingress resources into Gateway API resources.
|
||||
Here is how you can get started with Gateway API and using ingress2gateway:
|
||||
|
||||
1. [Install a Gateway
|
||||
controller](https://gateway-api.sigs.k8s.io/guides/#installing-a-gateway-controller) OR [install
|
||||
the Gateway API CRDs manually](https://gateway-api.sigs.k8s.io/guides/#installing-gateway-api) .
|
||||
|
||||
2. Install [ingress2gateway](https://github.com/kubernetes-sigs/ingress2gateway).
|
||||
|
||||
If you have a Go development environment locally, you can install `ingress2gateway` with:
|
||||
|
||||
```
|
||||
go install github.com/kubernetes-sigs/ingress2gateway@v0.1.0
|
||||
```
|
||||
|
||||
This installs `ingress2gateway` to `$(go env GOPATH)/bin/ingress2gateway`.
|
||||
|
||||
Alternatively, follow the installation guide
|
||||
[here](https://github.com/kubernetes-sigs/ingress2gateway#installation).
|
||||
|
||||
3. Once the tool is installed, you can use it to convert the ingress resources in your cluster to
|
||||
Gateway API resources.
|
||||
|
||||
```
|
||||
ingress2gateway print
|
||||
```
|
||||
|
||||
This above command will:
|
||||
|
||||
1. Load your current Kubernetes client config including the active context, namespace and
|
||||
authentication details.
|
||||
2. Search for ingresses and provider-specific resources in that namespace.
|
||||
3. Convert them to Gateway API resources (Currently only Gateways and HTTPRoutes). For other
|
||||
options you can run the tool with `-h`, or refer to
|
||||
[https://github.com/kubernetes-sigs/ingress2gateway#options](https://github.com/kubernetes-sigs/ingress2gateway#options).
|
||||
|
||||
4. Review the converted Gateway API resources, validate them, and then apply them to your cluster.
|
||||
|
||||
5. Send test requests to your Gateway to check that it is working. You could get your gateway
|
||||
address using `kubectl get gateway <gateway-name> -n <namespace> -o
|
||||
jsonpath='{.status.addresses}{"\n"}'`.
|
||||
|
||||
6. Update your DNS to point to the new Gateway.
|
||||
|
||||
7. Once you've confirmed that no more traffic is going through your Ingress configuration, you can
|
||||
safely delete it.
|
||||
|
||||
## Wrapping up
|
||||
|
||||
Achieving reliable, scalable and extensible networking has always been a challenging objective. The
|
||||
Gateway API is designed to improve the current Kubernetes networking standards like ingress and
|
||||
reduce the need for implementation specific annotations and CRDs.
|
||||
|
||||
It is a Kubernetes standard API, consistent across different platforms and implementations and most
|
||||
importantly it is future proof. Gateway API is the next generation of the Ingress API, but has a
|
||||
larger scope than that, expanding to tackle mesh and layer 4 routing as well. Gateway API and
|
||||
ingress2gateway are supported by a dedicated team under SIG Network that actively work on it and
|
||||
manage the ecosystem. It is also likely to receive more updates and community support.
|
||||
|
||||
### The Road Ahead
|
||||
|
||||
ingress2gateway is just getting started. We're planning to onboard more providers, introduce support
|
||||
for more types of Gateway API routes, and make sure everything syncs up smoothly with the ongoing
|
||||
development of Gateway API.
|
||||
|
||||
Excitingly, Gateway API is also making significant strides. While v1.0 is about to launching,
|
||||
there's still a lot of work ahead. This release incorporates many new experimental features, with
|
||||
additional functionalities currently in the early stages of planning and development.
|
||||
|
||||
If you're interested in helping to contribute, we would love to have you! Please check out the
|
||||
[community page](https://gateway-api.sigs.k8s.io/contributing/community/) which includes links to
|
||||
the Slack channel and community meetings. We look forward to seeing you!!
|
||||
|
||||
### Useful Links
|
||||
|
||||
- Get involved with the Ingress2Gateway project on
|
||||
[GitHub](https://github.com/kubernetes-sigs/ingress2gateway)
|
||||
- Open a new issue -
|
||||
[ingress2gateway](https://github.com/kubernetes-sigs/ingress2gateway/issues/new/choose), [Gateway
|
||||
API](https://github.com/kubernetes-sigs/gateway-api/issues/new/choose).
|
||||
- Join our [discussions](https://github.com/kubernetes-sigs/gateway-api/discussions).
|
||||
- [Gateway API Getting Started](https://gateway-api.sigs.k8s.io/guides/)
|
||||
- [Gateway API Implementations](https://gateway-api.sigs.k8s.io/implementations/#gateways)
|
Binary file not shown.
After Width: | Height: | Size: 29 KiB |
|
@ -0,0 +1,153 @@
|
|||
---
|
||||
layout: blog
|
||||
title: "Gateway API v1.0: GA Release"
|
||||
date: 2023-10-31T10:00:00-08:00
|
||||
slug: gateway-api-ga
|
||||
---
|
||||
|
||||
**Authors:** Shane Utt (Kong), Nick Young (Isovalent), Rob Scott (Google)
|
||||
|
||||
On behalf of Kubernetes SIG Network, we are pleased to announce the v1.0 release of [Gateway
|
||||
API](https://gateway-api.sigs.k8s.io/)! This release marks a huge milestone for
|
||||
this project. Several key APIs are graduating to GA (generally available), while
|
||||
other significant features have been added to the Experimental channel.
|
||||
|
||||
## What's new
|
||||
|
||||
### Graduation to v1
|
||||
This release includes the graduation of
|
||||
[Gateway](https://gateway-api.sigs.k8s.io/api-types/gateway/),
|
||||
[GatewayClass](https://gateway-api.sigs.k8s.io/api-types/gatewayclass/), and
|
||||
[HTTPRoute](https://gateway-api.sigs.k8s.io/api-types/httproute/) to v1, which
|
||||
means they are now generally available (GA). This API version denotes a high
|
||||
level of confidence in the API surface and provides guarantees of backwards
|
||||
compatibility. Note that although, the version of these APIs included in the
|
||||
Standard channel are now considered stable, that does not mean that they are
|
||||
complete. These APIs will continue to receive new features via the Experimental
|
||||
channel as they meet graduation criteria. For more information on how all of
|
||||
this works, refer to the [Gateway API Versioning
|
||||
Policy](https://gateway-api.sigs.k8s.io/concepts/versioning/).
|
||||
|
||||
### Logo
|
||||
Gateway API now has a logo! This logo was designed through a collaborative
|
||||
process, and is intended to represent the idea that this is a set of Kubernetes
|
||||
APIs for routing traffic both north-south and east-west:
|
||||
|
||||

|
||||
|
||||
### CEL Validation
|
||||
Historically, Gateway API has bundled a validating webhook as part of installing
|
||||
the API. Starting in v1.0, webhook installation is optional and only recommended
|
||||
for Kubernetes 1.24. Gateway API now includes
|
||||
[CEL](/docs/reference/using-api/cel/) validation rules as
|
||||
part of the
|
||||
[CRDs](/docs/concepts/extend-kubernetes/api-extension/custom-resources/).
|
||||
This new form of validation is supported in Kubernetes 1.25+, and thus the
|
||||
validating webhook is no longer required in most installations.
|
||||
|
||||
### Standard channel
|
||||
This release was primarily focused on ensuring that the existing beta APIs were
|
||||
well defined and sufficiently stable to graduate to GA. That led to a variety of
|
||||
spec clarifications, as well as some improvements to status to improve the
|
||||
overall UX when interacting with Gateway API.
|
||||
|
||||
### Experimental channel
|
||||
Most of the changes included in this release were limited to the experimental
|
||||
channel. These include HTTPRoute timeouts, TLS config from Gateways to backends,
|
||||
WebSocket support, Gateway infrastructure labels, and more. Stay tuned for a
|
||||
follow up blog post that will cover each of these new features in detail.
|
||||
|
||||
### Everything else
|
||||
For a full list of the changes included in this release, please refer to the
|
||||
[v1.0.0 release
|
||||
notes](https://github.com/kubernetes-sigs/gateway-api/releases/tag/v1.0.0).
|
||||
|
||||
## How we got here
|
||||
|
||||
The idea of Gateway API was initially [proposed](https://youtu.be/Ne9UJL6irXY?si=wgtC9w8PMB5ZHil2)
|
||||
4 years ago at KubeCon San Diego as the next generation
|
||||
of Ingress API. Since then, an incredible community has formed to develop what
|
||||
has likely become the most collaborative API in Kubernetes history. Over 170
|
||||
people have contributed to this API so far, and that number continues to grow.
|
||||
|
||||
A special thank you to the 20+ [community members who agreed to take on an
|
||||
official role in the
|
||||
project](https://github.com/kubernetes-sigs/gateway-api/blob/main/OWNERS_ALIASES),
|
||||
providing some time for reviews and sharing the load of maintaining the project!
|
||||
|
||||
We especially want to highlight the emeritus maintainers that played a pivotal
|
||||
role in the early development of this project:
|
||||
|
||||
* [Bowei Du](https://github.com/bowei)
|
||||
* [Daneyon Hansen](https://github.com/danehans)
|
||||
* [Harry Bagdi](https://github.com/hbagdi)
|
||||
|
||||
## Try it out
|
||||
|
||||
Unlike other Kubernetes APIs, you don't need to upgrade to the latest version of
|
||||
Kubernetes to get the latest version of Gateway API. As long as you're running
|
||||
one of the 5 most recent minor versions of Kubernetes (1.24+), you'll be able to
|
||||
get up and running with the latest version of Gateway API.
|
||||
|
||||
To try out the API, follow our [Getting Started
|
||||
guide](https://gateway-api.sigs.k8s.io/guides/).
|
||||
|
||||
## What's next
|
||||
|
||||
This release is just the beginning of a much larger journey for Gateway API, and
|
||||
there are still plenty of new features and new ideas in flight for future
|
||||
releases of the API.
|
||||
|
||||
One of our key goals going forward is to work to stabilize and graduate other
|
||||
experimental features of the API. These include [support for service
|
||||
mesh](https://gateway-api.sigs.k8s.io/concepts/gamma/), additional route types
|
||||
([GRPCRoute](https://gateway-api.sigs.k8s.io/references/spec/#gateway.networking.k8s.io/v1alpha2.GRPCRoute),
|
||||
[TCPRoute](https://gateway-api.sigs.k8s.io/references/spec/#gateway.networking.k8s.io/v1alpha2.TCPRoute),
|
||||
[TLSRoute](https://gateway-api.sigs.k8s.io/references/spec/#gateway.networking.k8s.io/v1alpha2.TLSRoute),
|
||||
[UDPRoute](https://gateway-api.sigs.k8s.io/references/spec/#gateway.networking.k8s.io/v1alpha2.UDPRoute)),
|
||||
and a variety of experimental features.
|
||||
|
||||
We've also been working towards moving
|
||||
[ReferenceGrant](https://gateway-api.sigs.k8s.io/api-types/referencegrant/) into
|
||||
a built-in Kubernetes API that can be used for more than just Gateway API.
|
||||
Within Gateway API, we've used this resource to safely enable cross-namespace
|
||||
references, and that concept is now being adopted by other SIGs. The new version
|
||||
of this API will be owned by SIG Auth and will likely include at least some
|
||||
modifications as it migrates to a built-in Kubernetes API.
|
||||
|
||||
### Gateway API at KubeCon + CloudNativeCon
|
||||
|
||||
At [KubeCon North America
|
||||
(Chicago)](https://events.linuxfoundation.org/kubecon-cloudnativecon-north-america/)
|
||||
and the adjacent [Contributor
|
||||
Summit](https://www.kubernetes.dev/events/2023/kcsna/) there are several talks
|
||||
related to Gateway API that will go into more detail on these topics. If you're
|
||||
attending either of these events this year, considering adding these to your
|
||||
schedule.
|
||||
|
||||
**Contributor Summit:**
|
||||
|
||||
- [Lessons Learned Building a GA API with CRDs](https://sched.co/1Sp9u)
|
||||
- [Conformance Profiles: Building a generic conformance test reporting framework](https://sched.co/1Sp9l)
|
||||
- [Gateway API: Beyond GA](https://sched.co/1SpA9)
|
||||
|
||||
**KubeCon Main Event:**
|
||||
|
||||
- [Gateway API: The Most Collaborative API in Kubernetes History Is GA](https://sched.co/1R2qM)
|
||||
|
||||
**KubeCon Office Hours:**
|
||||
|
||||
Gateway API maintainers will be holding office hours sessions at KubeCon if
|
||||
you'd like to discuss or brainstorm any related topics. To get the latest
|
||||
updates on these sessions, join the `#sig-network-gateway-api` channel on
|
||||
[Kubernetes Slack](https://slack.kubernetes.io/).
|
||||
|
||||
## Get involved
|
||||
|
||||
We've only barely scratched the surface of what's in flight with Gateway API.
|
||||
There are lots of opportunities to get involved and help define the future of
|
||||
Kubernetes routing APIs for both Ingress and Mesh.
|
||||
|
||||
If this is interesting to you, please [join us in the
|
||||
community](https://gateway-api.sigs.k8s.io/contributing/) and help us build the
|
||||
future of Gateway API together!
|
|
@ -0,0 +1,176 @@
|
|||
---
|
||||
layout: blog
|
||||
title: "Kubernetes Contributor Summit: Behind-the-scenes"
|
||||
slug: k8s-contributor-summit-behind-the-scenes
|
||||
date: 2023-11-03
|
||||
canonicalUrl: https://www.k8s.dev/blog/2023/11/03/k8s-contributor-summit-behind-the-scenes/
|
||||
---
|
||||
|
||||
**Author** : Frederico Muñoz (SAS Institute)
|
||||
|
||||
Every year, just before the official start of KubeCon+CloudNativeCon, there's a special event that
|
||||
has a very special place in the hearts of those organizing and participating in it: the Kubernetes
|
||||
Contributor Summit. To find out why, and to provide a behind-the-scenes perspective, we interview
|
||||
Noah Abrahams, whom amongst other roles was the co-lead for the Kubernetes Contributor Summit in
|
||||
2023.
|
||||
|
||||
|
||||
**Frederico Muñoz (FSM)**: Hello Noah, and welcome. Could you start by introducing yourself and
|
||||
telling us how you got involved in Kubernetes?
|
||||
|
||||
**Noah Abrahams (NA)**: I’ve been in this space for quite a while. I got started in IT in the mid
|
||||
90's, and I’ve been working in the "Cloud" space for about 15 years. It was, frankly, through a
|
||||
combination of sheer luck (being in the right place at the right time) and having good mentors to
|
||||
pull me into those places (thanks, Tim!), that I ended up at a startup called Apprenda in 2016.
|
||||
While I was there, they pivoted into Kubernetes, and it was the best thing that could have happened
|
||||
to my career. It was around v1.2 and someone asked me if I could give a presentation on Kubernetes
|
||||
concepts at "my local meetup" in Las Vegas. The meetup didn’t exist yet, so I created it, and got
|
||||
involved in the wider community. One thing led to another, and soon I was involved in ContribEx,
|
||||
joined the release team, was doing booth duty for the CNCF, became an ambassador, and here we are
|
||||
today.
|
||||
|
||||
## The Contributor Summit
|
||||
|
||||

|
||||
|
||||
**FM**: Before leading the organisation of the KCSEU 2023, how many other Contributor Summits were
|
||||
you a part of?
|
||||
|
||||
**NA**: I was involved in four or five before taking the lead. If I'm recalling correctly, I
|
||||
attended the summit in Copenhagen, then sometime in 2018 I joined the wrong meeting, because the
|
||||
summit staff meeting was listed on the ContribEx calendar. Instead of dropping out of the call, I
|
||||
listened a bit, then volunteered to take on some work that didn't look like it had anybody yet
|
||||
dedicated to it. I ended up running Ops in Seattle and helping run the New Contributor Workshop in
|
||||
Shanghai, that year. Since then, I’ve been involved in all but two, since I missed both Barcelona
|
||||
and Valencia.
|
||||
|
||||
**FM**: Have you noticed any major changes in terms of how the conference is organized throughout
|
||||
the years? Namely in terms of number of participants, venues, speakers, themes...
|
||||
|
||||
**NA**: The summit changes over the years with the ebb and flow of the desires of the contributors
|
||||
that attend. While we can typically expect about the same number of attendees, depending on the
|
||||
region that the event is held in, we adapt the style and content greatly based on the feedback that
|
||||
we receive at the end of each event. Some years, contributors ask for more free-style or
|
||||
unconference type sessions, and we plan on having more of those, but some years, people ask for more
|
||||
planned sessions or workshops, so that's what we facilitate. We also have to continually adapt to
|
||||
the venue that we have, the number of rooms we're allotted, how we're going to share the space with
|
||||
other events and so forth. That all goes into the planning ahead of time, from how many talk tracks
|
||||
we’ll have, to what types of tables and how many microphones we want in a room.
|
||||
|
||||
There has been one very significant change over the years, though, and that is that we no longer run
|
||||
the New Contributor Workshop. While the content was valuable, running the session during the summit
|
||||
never led to any people who weren’t already contributing to the project becoming dedicated
|
||||
contributors to the project, so we removed it from the schedule. We'll deliver that content another
|
||||
way, while we’ll keep the summit focused on existing contributors.
|
||||
|
||||
## What makes it special
|
||||
|
||||
**FM**: Going back to the introduction I made, I’ve heard several participants saying that KubeCon
|
||||
is great, but that the Contributor Summit is for them the main event. In your opinion, why do you
|
||||
think that makes it so?
|
||||
|
||||
**NA**: I think part of it ties into what I mentioned a moment ago, the flexibility in our content
|
||||
types. For many contributors, I think the summit is basically "How Kubecon used to be", back when
|
||||
it was primarily a gathering of the contributors to talk about the health of the project and the
|
||||
work that needed to be done. So, in that context, if the contributors want to discuss, say, a new
|
||||
Working Group, then they have dedicated space to do so in the summit. They also have the space to
|
||||
sit down and hack on a tough problem, discuss architectural philosophy, bring potential problems to
|
||||
more people’s attention, refine our methods, and so forth. Plus, the unconference aspect allows for
|
||||
some malleability on the day-of, for whatever is most important right then and there. Whatever
|
||||
folks want to get out of this environment is what we’ll provide, and having a space and time
|
||||
specifically to address your particular needs is always going to be well received.
|
||||
|
||||
Let's not forget the social aspect, too. Despite the fact that we're a global community and work
|
||||
together remotely and asynchronously, it's still easier to work together when you have a personal
|
||||
connection, and can put a face to a Github handle. Zoom meetings are a good start, but even a
|
||||
single instance of in-person time makes a big difference in how people work together. So, getting
|
||||
folks together a couple times a year makes the project run more smoothly.
|
||||
|
||||
## Organizing the Summit
|
||||
|
||||
**FM**: In terms of the organization team itself, could you share with us a general overview of the
|
||||
staffing process? Who are the people that make it happen? How many different teams are involved?
|
||||
|
||||
**NA**: There's a bit of the "usual suspects" involved in making this happen, many of whom you'll
|
||||
find in the ContribEx meetings, but really it comes down to whoever is going to step up and do the
|
||||
work. We start with a general call out for volunteers from the org. There's a Github issue where
|
||||
we'll track the staffing and that will get shouted out to all the usual comms channels: slack,
|
||||
k-dev, etc.
|
||||
|
||||
From there, there's a handful of different teams, overseeing content/program committee,
|
||||
registration, communications, day-of operations, the awards the SIGs present to their members, the
|
||||
after-summit social event, and so on. The leads for each team/role are generally picked from folks
|
||||
who have stepped up and worked the event before, either as a shadow, or a previous lead, so we know
|
||||
we can rely on them, which is a recurring theme. The leads pick their shadows from whoever pipes up
|
||||
on the issue, and the teams move forward, operating according to their role books, which we try to
|
||||
update at the end of each summit, with what we've learned over the past few months. It's expected
|
||||
that a shadow will be in line to lead that role at some point in a future summit, so we always have
|
||||
a good bench of folks available to make this event happen. A couple of the roles also have some
|
||||
non-shadow volunteers where people can step in to help a bit, like as an on-site room monitor, and
|
||||
get a feel for how things are put together without having to give a serious up-front commitment, but
|
||||
most of the folks working the event are dedicated to both making the summit successful, and coming
|
||||
back to do so in the future. Of course, the roster can change over time, or even suddenly, as
|
||||
people gain or lose travel budget, get new jobs, only attend Europe or North America or Asia, etc.
|
||||
It's a constant dance, relying 100% on the people who want to make this project successful.
|
||||
|
||||
Last, but not least, is the Summit lead. They have to keep the entire process moving forward, be
|
||||
willing to step in to keep bike-shedding from derailing our deadlines, make sure the right people
|
||||
are talking to one another, lead all our meetings to make sure everyone gets a voice, etc. In some
|
||||
cases, the lead has to even be willing to take over an entirely separate role, in case someone gets
|
||||
sick or has any other extenuating circumstances, to make sure absolutely nothing falls through the
|
||||
cracks. The lead is only allowed to volunteer after they’ve been through this a few times and know
|
||||
what the event entails. Event planning is not for the faint of heart.
|
||||
|
||||
|
||||
**FM**: The participation of volunteers is essential, but there's also the topic of CNCF support:
|
||||
how does this dynamic play out in practice?
|
||||
|
||||
**NA**: This event would not happen in its current form without our CNCF liaison. They provide us
|
||||
with space, make sure we are fed and caffeinated and cared for, bring us outside spaces to evaluate,
|
||||
so we have somewhere to hold the social gathering, get us the budget so we have t-shirts and patches
|
||||
and the like, and generally make it possible for us to put this event together. They're even
|
||||
responsible for the signage and arrows, so the attendees know where to go. They're the ones sitting
|
||||
at the front desk, keeping an eye on everything and answering people's questions. At the same time,
|
||||
they're along to facilitate, and try to avoid influencing our planning.
|
||||
|
||||
There's a ton of work that goes into making the summit happen that is easy to overlook, as an
|
||||
attendee, because people tend to expect things to just work. It is not exaggerating to say this
|
||||
event would not have happened like it has over the years, without the help from our liaisons, like
|
||||
Brienne and Deb. They are an integral part of the team.
|
||||
|
||||
## A look ahead
|
||||
|
||||
**FM**: Currently, we’re preparing the NA 2023 summit, how is it going? Any changes in format
|
||||
compared with previous ones?
|
||||
|
||||
**NA**: I would say it's going great, though I'm sort of emeritus lead for this event, mostly
|
||||
picking up the things that I see need to be done and don't have someone assigned to it. We're
|
||||
always learning from our past experiences and making small changes to continually be better, from
|
||||
how many people need to be on a particular rotation to how far in advance we open and close the CFP.
|
||||
There's no major changes right now, just continually providing the content that the contributors
|
||||
want.
|
||||
|
||||
**FM**: For our readers that might be interested in joining in the Kubernetes Contributor Summit, is
|
||||
there anything they should know?
|
||||
|
||||
**NA**: First of all, the summit is an event by and for Org members. If you're not already an org
|
||||
member, you should be getting involved before trying to attend the summit, as the content is curated
|
||||
specifically towards the contributors and maintainers of the project. That applies to the staff, as
|
||||
well, as all the decisions should be made with the interests and health of kubernetes contributors
|
||||
being the end goal. We get a lot of people who show interest in helping out, but then aren't ready
|
||||
to make any sort of commitment, and that just makes more work for us. If you're not already a
|
||||
proven and committed member of this community, it’s difficult for us to place you in a position that
|
||||
requires reliability. We have made some rare exceptions when we need someone local to help us out,
|
||||
but those are few and far between.
|
||||
|
||||
If you are, however, already a member, we'd love to have you. The more people that are involved,
|
||||
the better the event becomes. That applies to both dedicated staff, and those in attendance
|
||||
bringing CFPs, unconference topics, and just contributing to the discussions. If you're part of
|
||||
this community and you're going to be at KubeCon, I would highly urge you to attend, and if you're
|
||||
not yet an org member, let's make that happen!
|
||||
|
||||
**FM**: Indeed! Any final comments you would like to share?
|
||||
|
||||
**NA**: Just that the Contributor Summit is, for me, the ultimate manifestation of the Hallway
|
||||
Track. By being here, you're part of the conversations that move this project forward. It's good
|
||||
for you, and it's good for Kubernetes. I hope to see you all in Chicago!
|
Binary file not shown.
After Width: | Height: | Size: 219 KiB |
|
@ -0,0 +1,139 @@
|
|||
---
|
||||
layout: blog
|
||||
title: "Spotlight on SIG Architecture: Production Readiness"
|
||||
slug: sig-architecture-production-readiness-spotlight-2023
|
||||
date: 2023-11-02
|
||||
canonicalUrl: https://www.k8s.dev/blog/2023/11/02/sig-architecture-production-readiness-spotlight-2023/
|
||||
---
|
||||
|
||||
**Author**: Frederico Muñoz (SAS Institute)
|
||||
|
||||
_This is the second interview of a SIG Architecture Spotlight series that will cover the different
|
||||
subprojects. In this blog, we will cover the [SIG Architecture: Production Readiness
|
||||
subproject](https://github.com/kubernetes/community/blob/master/sig-architecture/README.md#production-readiness-1)_.
|
||||
|
||||
In this SIG Architecture spotlight, we talked with [Wojciech Tyczynski](https://github.com/wojtek-t)
|
||||
(Google), lead of the Production Readiness subproject.
|
||||
|
||||
## About SIG Architecture and the Production Readiness subproject
|
||||
|
||||
**Frederico (FSM)**: Hello Wojciech, could you tell us a bit about yourself, your role and how you
|
||||
got involved in Kubernetes?
|
||||
|
||||
**Wojciech Tyczynski (WT)**: I started contributing to Kubernetes in January 2015. At that time,
|
||||
Google (where I was and still am working) decided to start a Kubernetes team in the Warsaw office
|
||||
(in addition to already existing teams in California and Seattle). I was lucky enough to be one of
|
||||
the seeding engineers for that team.
|
||||
|
||||
After two months of onboarding and helping with different tasks across the project towards 1.0
|
||||
launch, I took ownership of the scalability area and I was leading Kubernetes to support clusters
|
||||
with 5000 nodes. I’m still involved in [SIG Scalability](https://github.com/kubernetes/community/blob/master/sig-scalability/README.md)
|
||||
as its Technical Lead. That was the start of a journey since scalability is such a cross-cutting topic,
|
||||
and I started contributing to many other areas including, over time, to SIG Architecture.
|
||||
|
||||
**FSM**: In SIG Architecture, why specifically the Production Readiness subproject? Was it something
|
||||
you had in mind from the start, or was it an unexpected consequence of your initial involvement in
|
||||
scalability?
|
||||
|
||||
**WT**: After reaching that milestone of [Kubernetes supporting 5000-node clusters](https://kubernetes.io/blog/2017/03/scalability-updates-in-kubernetes-1-6/),
|
||||
one of the goals was to ensure that Kubernetes would not degrade its scalability properties over time. While
|
||||
non-scalable implementation is always fixable, designing non-scalable APIs or contracts is
|
||||
problematic. I was looking for a way to ensure that people are thinking about
|
||||
scalability when they create new features and capabilities without introducing too much overhead.
|
||||
|
||||
This is when I joined forces with [John Belamaric](https://github.com/johnbelamaric) and
|
||||
[David Eads](https://github.com/deads2k) and created a Production Readiness subproject within SIG
|
||||
Architecture. While setting the bar for scalability was only one of a few motivations for it, it
|
||||
ended up fitting quite well. At the same time, I was already involved in the overall reliability of
|
||||
the system internally, so other goals of Production Readiness were also close to my heart.
|
||||
|
||||
**FSM**: To anyone new to how SIG Architecture works, how would you describe the main goals and
|
||||
areas of intervention of the Production Readiness subproject?
|
||||
|
||||
**WT**: The goal of the Production Readiness subproject is to ensure that any feature that is added
|
||||
to Kubernetes can be reliably used in production clusters. This primarily means that those features
|
||||
are observable, scalable, supportable, can always be safely enabled and in case of production issues
|
||||
also disabled.
|
||||
|
||||
## Production readiness and the Kubernetes project
|
||||
|
||||
**FSM**: Architectural consistency being one of the goals of the SIG, is this made more challenging
|
||||
by the [distributed and open nature of Kubernetes](https://www.cncf.io/reports/kubernetes-project-journey-report/)?
|
||||
Do you feel this impacts the approach that Production Readiness has to take?
|
||||
|
||||
**WT**: The distributed nature of Kubernetes certainly impacts Production Readiness, because it
|
||||
makes thinking about aspects like enablement/disablement or scalability more challenging. To be more
|
||||
precise, when enabling or disabling features that span multiple components you need to think about
|
||||
version skew between them and design for it. For scalability, changes in one component may actually
|
||||
result in problems for a completely different one, so it requires a good understanding of the whole
|
||||
system, not just individual components. But it’s also what makes this project so interesting.
|
||||
|
||||
**FSM**: Those running Kubernetes in production will have their own perspective on things, how do
|
||||
you capture this feedback?
|
||||
|
||||
**WT**: Fortunately, we aren’t talking about _"them"_ here, we’re talking about _"us"_: all of us are
|
||||
working for companies that are managing large fleets of Kubernetes clusters and we’re involved in
|
||||
that too, so we suffer from those problems ourselves.
|
||||
|
||||
So while we’re trying to get feedback (our annual PRR survey is very important for us), it rarely
|
||||
reveals completely new problems - it rather shows the scale of them. And we try to react to it -
|
||||
changes like "Beta APIs off by default" happen in reaction to the data that we observe.
|
||||
|
||||
**FSM**: On the topic of reaction, that made me think of how the [Kubernetes Enhancement Proposal (KEP)](https://github.com/kubernetes/enhancements/blob/master/keps/NNNN-kep-template/README.md)
|
||||
template has a Production Readiness Review (PRR) section, which is tied to the graduation
|
||||
process. Was this something born out of identified insufficiencies? How would you describe the
|
||||
results?
|
||||
|
||||
**WT**: As mentioned above, the overall goal of the Production Readiness subproject is to ensure
|
||||
that every newly added feature can be reliably used in production. It’s not possible to enforce that
|
||||
by a central team - we need to make it everyone's problem.
|
||||
|
||||
To achieve it, we wanted to ensure that everyone designing their new feature is thinking about safe
|
||||
enablement, scalability, observability, supportability, etc. from the very beginning. Which means
|
||||
not when the implementation starts, but rather during the design. Given that KEPs are effectively
|
||||
Kubernetes design docs, making it part of the KEP template was the way to achieve the goal.
|
||||
|
||||
**FSM**: So, in a way making sure that feature owners have thought about the implications of their
|
||||
proposal.
|
||||
|
||||
**WT**: Exactly. We already observed that just by forcing feature owners to think through the PRR
|
||||
aspects (via forcing them to fill in the PRR questionnaire) many of the original issues are going
|
||||
away. Sure - as PRR approvers we’re still catching gaps, but even the initial versions of KEPs are
|
||||
better now than they used to be a couple of years ago in what concerns thinking about
|
||||
productionisation aspects, which is exactly what we wanted to achieve - spreading the culture of
|
||||
thinking about reliability in its widest possible meaning.
|
||||
|
||||
**FSM**: We've been talking about the PRR process, could you describe it for our readers?
|
||||
|
||||
**WT**: The [PRR process](https://github.com/kubernetes/community/blob/master/sig-architecture/production-readiness.md)
|
||||
is fairly simple - we just want to ensure that you think through the productionisation aspects of
|
||||
your feature early enough. If you do your job, it’s just a matter of answering some questions in the
|
||||
KEP template and getting approval from a PRR approver (in addition to regular SIG approval). If you
|
||||
didn’t think about those aspects earlier, it may require spending more time and potentially revising
|
||||
some decisions, but that’s exactly what we need to make the Kubernetes project reliable.
|
||||
|
||||
## Helping with Production Readiness
|
||||
|
||||
**FSM**: Production Readiness seems to be one area where a good deal of prior exposure is required
|
||||
in order to be an effective contributor. Are there also ways for someone newer to the project to
|
||||
contribute?
|
||||
|
||||
**WT**: PRR approvers have to have a deep understanding of the whole Kubernetes project to catch
|
||||
potential issues. Kubernetes is such a large project now with so many nuances that people who are
|
||||
new to the project can simply miss the context, no matter how senior they are.
|
||||
|
||||
That said, there are many ways that you may implicitly help. Increasing the reliability of
|
||||
particular areas of the project by improving its observability and debuggability, increasing test
|
||||
coverage, and building new kinds of tests (upgrade, downgrade, chaos, etc.) will help us a lot. Note
|
||||
that the PRR subproject is focused on keeping the bar at the design level, but we should also care
|
||||
equally about the implementation. For that, we’re relying on individual SIGs and code approvers, so
|
||||
having people there who are aware of productionisation aspects, and who deeply care about it, will
|
||||
help the project a lot.
|
||||
|
||||
**FSM**: Thank you! Any final comments you would like to share with our readers?
|
||||
|
||||
**WT**: I would like to highlight and thank all contributors for their cooperation. While the PRR
|
||||
adds some additional work for them, we see that people care about it, and what’s even more
|
||||
encouraging is that with every release the quality of the answers improves, and questions "do I
|
||||
really need a metric reflecting if my feature works" or "is downgrade really that important" don’t
|
||||
really appear anymore.
|
|
@ -340,30 +340,6 @@ Then, delete the Secret you now know the name of:
|
|||
kubectl -n examplens delete secret/example-automated-thing-token-zyxwv
|
||||
```
|
||||
|
||||
The control plane spots that the ServiceAccount is missing its Secret,
|
||||
and creates a replacement:
|
||||
|
||||
```shell
|
||||
kubectl -n examplens get serviceaccount/example-automated-thing -o yaml
|
||||
```
|
||||
|
||||
```yaml
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
annotations:
|
||||
kubectl.kubernetes.io/last-applied-configuration: |
|
||||
{"apiVersion":"v1","kind":"ServiceAccount","metadata":{"annotations":{},"name":"example-automated-thing","namespace":"examplens"}}
|
||||
creationTimestamp: "2019-07-21T07:07:07Z"
|
||||
name: example-automated-thing
|
||||
namespace: examplens
|
||||
resourceVersion: "1026"
|
||||
selfLink: /api/v1/namespaces/examplens/serviceaccounts/example-automated-thing
|
||||
uid: f23fd170-66f2-4697-b049-e1e266b7f835
|
||||
secrets:
|
||||
- name: example-automated-thing-token-4rdrh
|
||||
```
|
||||
|
||||
## Clean up
|
||||
|
||||
If you created a namespace `examplens` to experiment with, you can remove it:
|
||||
|
|
|
@ -291,33 +291,6 @@ variables as well as some other useful variables:
|
|||
The `apiVersion`, `kind`, `metadata.name` and `metadata.generateName` are always accessible from
|
||||
the root of the object. No other metadata properties are accessible.
|
||||
|
||||
Only property names of the form `[a-zA-Z_.-/][a-zA-Z0-9_.-/]*` are accessible.
|
||||
Accessible property names are escaped according to the following rules when accessed in the
|
||||
expression:
|
||||
|
||||
| escape sequence | property name equivalent |
|
||||
| ----------------------- | -----------------------|
|
||||
| `__underscores__` | `__` |
|
||||
| `__dot__` | `.` |
|
||||
|`__dash__` | `-` |
|
||||
| `__slash__` | `/` |
|
||||
| `__{keyword}__` | [CEL RESERVED keyword](https://github.com/google/cel-spec/blob/v0.6.0/doc/langdef.md#syntax) |
|
||||
|
||||
{{< note >}}
|
||||
A **CEL reserved** keyword only needs to be escaped if the token is an exact match
|
||||
for the reserved keyword.
|
||||
For example, `int` in the word “sprint” would not be escaped.
|
||||
{{< /note >}}
|
||||
|
||||
Examples on escaping:
|
||||
|
||||
|property name | rule with escaped property name |
|
||||
| ----------------|-----------------------------------|
|
||||
| namespace | `object.__namespace__ > 0` |
|
||||
| x-prop | `object.x__dash__prop > 0` |
|
||||
| redact__d | `object.redact__underscores__d > 0` |
|
||||
| string | `object.startsWith('kube')` |
|
||||
|
||||
Equality on arrays with list type of 'set' or 'map' ignores element order, i.e. [1, 2] == [2, 1].
|
||||
Concatenation on arrays with x-kubernetes-list-type use the semantics of the list type:
|
||||
|
||||
|
|
|
@ -35,7 +35,7 @@ The **flowcontrol.apiserver.k8s.io/v1beta2** API version of FlowSchema and Prior
|
|||
|
||||
### v1.27
|
||||
|
||||
The **v1.27** release will stop serving the following deprecated API versions:
|
||||
The **v1.27** release stopped serving the following deprecated API versions:
|
||||
|
||||
#### CSIStorageCapacity {#csistoragecapacity-v127}
|
||||
|
||||
|
|
|
@ -262,6 +262,16 @@ Secret somewhere that your terminal / computer screen could be seen by an onlook
|
|||
When you delete a ServiceAccount that has an associated Secret, the Kubernetes
|
||||
control plane automatically cleans up the long-lived token from that Secret.
|
||||
|
||||
{{< note >}}
|
||||
If you view the ServiceAccount using:
|
||||
|
||||
` kubectl get serviceaccount build-robot -o yaml`
|
||||
|
||||
You can't see the `build-robot-secret` Secret in the ServiceAccount API objects
|
||||
[`.secrets`](/docs/reference/kubernetes-api/authentication-resources/service-account-v1/) field
|
||||
because that field is only populated with auto-generated Secrets.
|
||||
{{< /note >}}
|
||||
|
||||
## Add ImagePullSecrets to a service account
|
||||
|
||||
First, [create an imagePullSecret](/docs/concepts/containers/images/#specifying-imagepullsecrets-on-a-pod).
|
||||
|
|
|
@ -482,7 +482,7 @@ kind: Cluster
|
|||
apiVersion: kind.x-k8s.io/v1alpha4
|
||||
nodes:
|
||||
- role: control-plane
|
||||
image: kindest/node:v1.23.0@sha256:49824ab1727c04e56a21a5d8372a402fcd32ea51ac96a2706a12af38934f81ac
|
||||
image: kindest/node:v1.28.0@sha256:9f3ff58f19dcf1a0611d11e8ac989fdb30a28f40f236f59f0bea31fb956ccf5c
|
||||
kubeadmConfigPatches:
|
||||
- |
|
||||
kind: JoinConfiguration
|
||||
|
@ -490,7 +490,7 @@ nodes:
|
|||
kubeletExtraArgs:
|
||||
seccomp-default: "true"
|
||||
- role: worker
|
||||
image: kindest/node:v1.23.0@sha256:49824ab1727c04e56a21a5d8372a402fcd32ea51ac96a2706a12af38934f81ac
|
||||
image: kindest/node:v1.28.0@sha256:9f3ff58f19dcf1a0611d11e8ac989fdb30a28f40f236f59f0bea31fb956ccf5c
|
||||
kubeadmConfigPatches:
|
||||
- |
|
||||
kind: JoinConfiguration
|
||||
|
|
|
@ -11,7 +11,7 @@ spec:
|
|||
localhostProfile: profiles/audit.json
|
||||
containers:
|
||||
- name: test-container
|
||||
image: hashicorp/http-echo:0.2.3
|
||||
image: hashicorp/http-echo:1.0
|
||||
args:
|
||||
- "-text=just made some syscalls!"
|
||||
securityContext:
|
||||
|
|
|
@ -10,7 +10,7 @@ spec:
|
|||
type: RuntimeDefault
|
||||
containers:
|
||||
- name: test-container
|
||||
image: hashicorp/http-echo:0.2.3
|
||||
image: hashicorp/http-echo:1.0
|
||||
args:
|
||||
- "-text=just made some more syscalls!"
|
||||
securityContext:
|
||||
|
|
|
@ -11,7 +11,7 @@ spec:
|
|||
localhostProfile: profiles/fine-grained.json
|
||||
containers:
|
||||
- name: test-container
|
||||
image: hashicorp/http-echo:0.2.3
|
||||
image: hashicorp/http-echo:1.0
|
||||
args:
|
||||
- "-text=just made some syscalls!"
|
||||
securityContext:
|
||||
|
|
|
@ -11,7 +11,7 @@ spec:
|
|||
localhostProfile: profiles/violation.json
|
||||
containers:
|
||||
- name: test-container
|
||||
image: hashicorp/http-echo:0.2.3
|
||||
image: hashicorp/http-echo:1.0
|
||||
args:
|
||||
- "-text=just made some syscalls!"
|
||||
securityContext:
|
||||
|
|
|
@ -0,0 +1,189 @@
|
|||
---
|
||||
reviewers:
|
||||
- ramrodo
|
||||
- krol3
|
||||
- electrocucaracha
|
||||
title: Volúmenes efímeros
|
||||
content_type: concept
|
||||
weight: 30
|
||||
---
|
||||
|
||||
<!-- overview -->
|
||||
|
||||
Este documento describe _volúmenes efímeros_ en Kubernetes. Se sugiere tener conocimiento previo sobre [volúmenes](/docs/concepts/storage/volumes/), en particular PersistentVolumeClaim y PersistentVolume.
|
||||
|
||||
<!-- body -->
|
||||
|
||||
Algunas aplicaciones requieren almacenamiento adicional, pero no les preocupa si esos datos se almacenan de manera persistente entre reinicios. Por ejemplo, los servicios de caché a menudo tienen limitaciones de tamaño de memoria y pueden trasladar datos poco utilizados a un almacenamiento más lento que la memoria, con un impacto mínimo en el rendimiento general.
|
||||
|
||||
Otras aplicaciones esperan que algunos datos de entrada de solo lectura estén presentes en archivos, como datos de configuración o claves secretas.
|
||||
|
||||
Los _volúmenes efímeros_ están diseñados para estos casos de uso. Debido a que los volúmenes siguen el ciclo de vida del Pod y se crean y eliminan junto con el Pod, los Pods pueden detenerse y reiniciarse sin estar limitados a la disponibilidad de algún volumen persistente.
|
||||
|
||||
Los volúmenes efímeros se especifican _en línea_ en la especificación del Pod, lo que simplifica la implementación y gestión de aplicaciones.
|
||||
|
||||
### Tipos de volúmenes efímeros
|
||||
|
||||
Kubernetes admite varios tipos diferentes de volúmenes efímeros para diversos propósitos:
|
||||
|
||||
- [emptyDir](/docs/concepts/storage/volumes/#emptydir): vacíos al inicio del Pod, con el almacenamiento proveniente localmente del directorio base de kubelet (generalmente el disco raíz) o la RAM.
|
||||
- [configMap](/docs/concepts/storage/volumes/#configmap),
|
||||
[downwardAPI](/docs/concepts/storage/volumes/#downwardapi),
|
||||
[secret](/docs/concepts/storage/volumes/#secret): inyectar diferentes tipos de datos de Kubernetes en un Pod.
|
||||
|
||||
- [CSI volúmenes efímeros](#csi-ephemeral-volumes):
|
||||
Similar a los tipos de volumen anteriores, pero proporcionados por controladores especiales {{< glossary_tooltip text="CSI" term_id="csi" >}} que [soportan específicamente esta característica](https://kubernetes-csi.github.io/docs/ephemeral-local-volumes.html)
|
||||
- [volúmenes efímeros genéricos](#generic-ephemeral-volumes), que pueden proporcionar todos los controladores de almacenamiento que también admiten volúmenes persistentes
|
||||
|
||||
`emptyDir`, `configMap`, `downwardAPI`, `secret` se proporcionan como [almacenamiento efímero local](/docs/concepts/configuration/manage-resources-containers/#local-ephemeral-storage).
|
||||
Ellos son administrados por kubelet en cada nodo.
|
||||
|
||||
Los volúmenes efímeros CSI _deben_ ser proporcionados por controladores de almacenamiento CSI de terceros.
|
||||
|
||||
Los volúmenes efímeros genéricos _pueden_ ser proporcionados por controladores de almacenamiento CSI de terceros, pero también por cualquier otro controlador de almacenamiento que admita la provisión dinámica. Algunos controladores CSI están escritos específicamente para volúmenes efímeros CSI y no admiten la provisión dinámica; por lo tanto, no se pueden utilizar para volúmenes efímeros genéricos.
|
||||
|
||||
La ventaja de utilizar controladores de terceros es que pueden ofrecer funcionalidades que Kubernetes en sí mismo no admite, como el almacenamiento con características de rendimiento diferentes al disco gestionado por kubelet o la inyección de datos diversos.
|
||||
|
||||
### Volúmenes efímeros de CSI
|
||||
|
||||
{{< feature-state for_k8s_version="v1.25" state="stable" >}}
|
||||
|
||||
{{< note >}}
|
||||
Los volúmenes efímeros CSI solo son compatibles con un subconjunto de controladores CSI.
|
||||
La [lista de controladores](https://kubernetes-csi.github.io/docs/drivers.html) CSI de Kubernetes muestra cuáles controladores admiten volúmenes efímeros.
|
||||
{{< /note >}}
|
||||
Conceptualmente, los volúmenes efímeros CSI son similares a los tipos de volumen `configMap`,
|
||||
`downwardAPI` y `secret`: el almacenamiento se gestiona localmente en cada nodo y se crea junto con otros recursos locales después de que un Pod ha sido programado en un nodo. Kubernetes ya no tiene ningún concepto de reprogramación de Pods en esta etapa. La creación de volúmenes debe ser poco propensa a fallos,
|
||||
de lo contrario, el inicio del Pod queda atascado. En particular, [la programación de Pods con conciencia de la capacidad de almacenamiento](/docs/concepts/storage/storage-capacity/) _no_ está admitida para estos volúmenes. Actualmente, tampoco están cubiertos por los límites de uso de recursos de almacenamiento de un Pod, porque eso es algo que kubelet solo puede aplicar para el almacenamiento que administra él mismo.
|
||||
|
||||
Aquí tienes un ejemplo de manifiesto para un Pod que utiliza almacenamiento efímero CSI:
|
||||
|
||||
```yaml
|
||||
kind: Pod
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: my-csi-app
|
||||
spec:
|
||||
containers:
|
||||
- name: my-frontend
|
||||
image: busybox:1.28
|
||||
volumeMounts:
|
||||
- mountPath: "/data"
|
||||
name: my-csi-inline-vol
|
||||
command: ["sleep", "1000000"]
|
||||
volumes:
|
||||
- name: my-csi-inline-vol
|
||||
csi:
|
||||
driver: inline.storage.kubernetes.io
|
||||
volumeAttributes:
|
||||
foo: bar
|
||||
```
|
||||
|
||||
Los `volumeAttributes` determinan qué volumen es preparado por el controlador. Estos atributos son específicos de cada controlador y no están estandarizados. Consulta la documentación de cada controlador CSI para obtener instrucciones adicionales.
|
||||
|
||||
### Restricciones del conductor CSI
|
||||
|
||||
Los volúmenes efímeros CSI permiten a los usuarios proporcionar `volumeAttributes` directamente al controlador CSI como parte de la especificación del Pod. Un controlador CSI que permite `volumeAttributes` que normalmente están restringidos a administradores NO es adecuado para su uso en un volumen efímero en línea. Por ejemplo, los parámetros que normalmente se definen en la clase de almacenamiento no deben estar expuestos a los usuarios a través del uso de volúmenes efímeros en línea.
|
||||
|
||||
Los administradores del clúster que necesiten restringir los controladores CSI que se pueden utilizar como volúmenes en línea dentro de una especificación de Pod pueden hacerlo mediante:
|
||||
|
||||
- Eliminar `Ephemeral` de `volumeLifecycleModes` en la especificación de CSIDriver, lo que evita que los controladores CSI admitan volúmenes efímeros en línea.
|
||||
|
||||
- Usando un [webhook de admisión](/docs/reference/access-authn-authz/extensible-admission-controllers/)
|
||||
para restringir el uso de este controlador.
|
||||
|
||||
### Volúmenes efímeros genéricos
|
||||
|
||||
{{< feature-state for_k8s_version="v1.23" state="stable" >}}
|
||||
|
||||
Los volúmenes efímeros genéricos son similares a los volúmenes `emptyDir` en el sentido de que proporcionan un directorio por Pod para datos temporales que generalmente está vacío después de la provisión. Pero también pueden tener características adicionales:
|
||||
|
||||
- El almacenamiento puede ser local o conectado a la red.
|
||||
- Los volúmenes pueden tener un tamaño fijo que los Pods no pueden exceder.
|
||||
- Los volúmenes pueden tener algunos datos iniciales, dependiendo del controlador y los parámetros.
|
||||
- Se admiten operaciones típicas en los volúmenes, siempre que el controlador las soporte, incluyendo
|
||||
[instantáneas](/docs/concepts/storage/volume-snapshots/),
|
||||
[clonación](/docs/concepts/storage/volume-pvc-datasource/),
|
||||
[cambiar el tamaño](/docs/concepts/storage/persistent-volumes/#expanding-persistent-volumes-claims),
|
||||
y [seguimiento de la capacidad de almacenamiento](/docs/concepts/storage/storage-capacity/).
|
||||
|
||||
Ejemplo:
|
||||
|
||||
```yaml
|
||||
kind: Pod
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: my-app
|
||||
spec:
|
||||
containers:
|
||||
- name: my-frontend
|
||||
image: busybox:1.28
|
||||
volumeMounts:
|
||||
- mountPath: "/scratch"
|
||||
name: scratch-volume
|
||||
command: ["sleep", "1000000"]
|
||||
volumes:
|
||||
- name: scratch-volume
|
||||
ephemeral:
|
||||
volumeClaimTemplate:
|
||||
metadata:
|
||||
labels:
|
||||
type: my-frontend-volume
|
||||
spec:
|
||||
accessModes: ["ReadWriteOnce"]
|
||||
storageClassName: "scratch-storage-class"
|
||||
resources:
|
||||
requests:
|
||||
storage: 1Gi
|
||||
```
|
||||
|
||||
### Ciclo de vida y reclamo de volumen persistente
|
||||
|
||||
La idea clave de diseño es que los [parámetros para una solicitud de volumen](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#ephemeralvolumesource-v1-core)
|
||||
se permiten dentro de una fuente de volumen del Pod. Se admiten etiquetas, anotaciones y
|
||||
todo el conjunto de campos para una PersistentVolumeClaim. Cuando se crea un Pod de este tipo, el controlador de volúmenes efímeros crea entonces un objeto PersistentVolumeClaim real en el mismo espacio de nombres que el Pod y asegura que la PersistentVolumeClaim
|
||||
se elimine cuando se elimina el Pod.
|
||||
|
||||
Eso desencadena la vinculación y/o aprovisionamiento de volúmenes, ya sea de inmediato si el {{< glossary_tooltip text="StorageClass" term_id="storage-class" >}} utiliza la vinculación inmediata de volúmenes o cuando el Pod está programado provisionalmente en un nodo (modo de vinculación de volumen `WaitForFirstConsumer`). Este último se recomienda para volúmenes efímeros genéricos, ya que permite al planificador elegir libremente un nodo adecuado para el Pod. Con la vinculación inmediata, el planificador está obligado a seleccionar un nodo que tenga acceso al volumen una vez que esté disponible.
|
||||
|
||||
En términos de [propiedad de recursos](/docs/concepts/architecture/garbage-collection/#owners-dependents),
|
||||
un Pod que tiene almacenamiento efímero genérico es el propietario de la PersistentVolumeClaim(s) que proporciona ese almacenamiento efímero. Cuando se elimina el Pod, el recolector de basura de Kubernetes elimina la PVC, lo que suele desencadenar la eliminación del volumen, ya que la política de recuperación predeterminada de las clases de almacenamiento es eliminar los volúmenes.
|
||||
Puedes crear almacenamiento local cuasi-efímero utilizando una StorageClass con una política de recuperación de `retain`: el almacenamiento sobrevive al Pod y, en este caso, debes asegurarte de que la limpieza del volumen se realice por separado.
|
||||
|
||||
Mientras estas PVC existen, pueden usarse como cualquier otra PVC. En particular, pueden ser referenciadas como fuente de datos en la clonación o creación de instantáneas de volúmenes. El objeto PVC también contiene el estado actual del volumen.
|
||||
|
||||
### Nomenclatura de PersistentVolumeClaim.
|
||||
|
||||
La nomenclatura de las PVC creadas automáticamente es determinista: el nombre es una combinación del nombre del Pod y el nombre del volumen, con un guion medio (`-`) en el medio. En el ejemplo anterior, el nombre de la PVC será `my-app-scratch-volume`. Esta nomenclatura determinista facilita la interacción con la PVC, ya que no es necesario buscarla una vez que se conocen el nombre del Pod y el nombre del volumen.
|
||||
|
||||
La nomenclatura determinista también introduce un posible conflicto entre diferentes Pods (un Pod "pod-a" con el volumen "scratch" y otro Pod con nombre "pod" y volumen "a-scratch" terminan teniendo el mismo nombre de PVC "pod-a-scratch") y entre Pods y PVCs creadas manualmente.
|
||||
|
||||
Estos conflictos se detectan: una PVC solo se utiliza para un volumen efímero si se creó para el Pod. Esta comprobación se basa en la relación de propiedad. Una PVC existente no se sobrescribe ni se modifica. Pero esto no resuelve el conflicto, ya que sin la PVC adecuada, el Pod no puede iniciarse.
|
||||
|
||||
{{< caution >}}
|
||||
Ten cuidado al nombrar Pods y volúmenes dentro del mismo espacio de nombres para evitar que se produzcan estos conflictos.
|
||||
{{< /caution >}}
|
||||
|
||||
### Seguridad
|
||||
|
||||
El uso de volúmenes efímeros genéricos permite a los usuarios crear PVC de forma indirecta si pueden crear Pods, incluso si no tienen permiso para crear PVC directamente. Los administradores del clúster deben ser conscientes de esto. Si esto no encaja en su modelo de seguridad, deberían utilizar un [webhook de admisión](/docs/reference/access-authn-authz/extensible-admission-controllers/) que rechace objetos como Pods que tienen un volumen efímero genérico.
|
||||
|
||||
La cuota normal del [espacio de nombres para PVC](/docs/concepts/policy/resource-quotas/#storage-resource-quota) sigue aplicándose, por lo que incluso si a los usuarios se les permite utilizar este nuevo mecanismo, no pueden utilizarlo para eludir otras políticas.
|
||||
|
||||
## {{% heading "whatsnext" %}}
|
||||
|
||||
### Volúmenes efímeros gestionados por kubelet
|
||||
|
||||
Ver [almacenamiento efímero local](/docs/concepts/configuration/manage-resources-containers/#local-ephemeral-storage).
|
||||
|
||||
### Volúmenes efímeros de CSI
|
||||
|
||||
- Para obtener más información sobre el diseño, consulta el
|
||||
[KEP de Volúmenes efímeros en línea de CSI](https://github.com/kubernetes/enhancements/blob/ad6021b3d61a49040a3f835e12c8bb5424db2bbb/keps/sig-storage/20190122-csi-inline-volumes.md).
|
||||
- Para obtener más información sobre el desarrollo futuro de esta función, consulte el
|
||||
[problema de seguimiento de mejoras #596](https://github.com/kubernetes/enhancements/issues/596).
|
||||
|
||||
### Volúmenes efímeros genéricos
|
||||
|
||||
- Para obtener más información sobre el diseño, consulta el
|
||||
[KEP de Volúmenes efímeros genéricos en línea](https://github.com/kubernetes/enhancements/blob/master/keps/sig-storage/1698-generic-ephemeral-volumes/README.md).
|
|
@ -4,6 +4,7 @@ abstract: "Otomatisasi Kontainer deployment, scaling, dan management"
|
|||
cid: home
|
||||
---
|
||||
|
||||
{{< site-searchbar >}}
|
||||
|
||||
{{< blocks/section id="oceanNodes" >}}
|
||||
{{% blocks/feature image="flower" %}}
|
||||
|
|
|
@ -4,6 +4,8 @@ abstract: Deployment, scalabilità, e gestione di container automatizzata
|
|||
cid: home
|
||||
---
|
||||
|
||||
{{< site-searchbar >}}
|
||||
|
||||
{{< blocks/section id="oceanNodes" >}}
|
||||
{{% blocks/feature image="flower" %}}
|
||||
### [Kubernetes (K8s)]({{< relref "/docs/concepts/overview/what-is-kubernetes" >}}) è un software open-source per l'automazione del deployment, scalabilità, e gestione di applicativi in containers.
|
||||
|
|
|
@ -10,7 +10,7 @@ weight: 20
|
|||
<!-- overview -->
|
||||
이 페이지는 어떻게 네트워크 폴리시(NetworkPolicy)로 실리움(Cilium)를 사용하는지 살펴본다.
|
||||
|
||||
실리움의 배경에 대해서는 [실리움 소개](https://docs.cilium.io/en/stable/intro)를 읽어보자.
|
||||
실리움의 배경에 대해서는 [실리움 소개](https://docs.cilium.io/en/stable/overview/intro)를 읽어보자.
|
||||
|
||||
|
||||
## {{% heading "prerequisites" %}}
|
||||
|
|
|
@ -2,259 +2,241 @@
|
|||
title: 社区
|
||||
layout: basic
|
||||
cid: community
|
||||
community_styles_migrated: true
|
||||
---
|
||||
|
||||
<!--
|
||||
title: Community
|
||||
layout: basic
|
||||
cid: community
|
||||
-->
|
||||
<img
|
||||
id="banner"
|
||||
srcset="/images/community/kubernetes-community-final-02.jpg 1500w, /images/community/kubernetes-community-02-mobile.jpg 900w"
|
||||
sizes="(max-width: 900px) 900px, (max-width: 1920px) 1500px"
|
||||
src="/images/community/kubernetes-community-final-02.jpg"
|
||||
alt="Kubernetes 会议一览">
|
||||
|
||||
<div class="newcommunitywrapper">
|
||||
<div class="banner1">
|
||||
<img src="/images/community/kubernetes-community-final-02.jpg" alt="Kubernetes 会议一览" style="width:100%;padding-left:0px" class="desktop">
|
||||
<img src="/images/community/kubernetes-community-02-mobile.jpg" alt="Kubernetes 会议一览" style="width:100%;padding-left:0px" class="mobile">
|
||||
<div class="community-section" id="introduction">
|
||||
<!-- <p>The Kubernetes community — users, contributors, and the culture we've
|
||||
built together — is one of the biggest reasons for the meteoric rise of
|
||||
this open source project. Our culture and values continue to grow and change
|
||||
as the project itself grows and changes. We all work together toward constant
|
||||
improvement of the project and the ways we work on it.</p>
|
||||
<p> We are the people who file issues and pull requests, attend SIG meetings,
|
||||
Kubernetes meetups, and KubeCon, advocate for its adoption and innovation,
|
||||
run <code>kubectl get pods</code>, and contribute in a thousand other vital
|
||||
ways. Read on to learn how you can get involved and become part of this amazing
|
||||
community.</p> -->
|
||||
<p>
|
||||
Kubernetes 社区 -- 用户、贡献者以及我们一起塑造的文化 --
|
||||
是这个开源项目持续增长的最重要原因。我们的文化和价值观随着项目自身的成长和变化而成长着、变化着。我们一起努力地持续改进项目本身,以及我们在这个项目中的工作方式。
|
||||
</p>
|
||||
<p>
|
||||
我们是登记缺陷、提出拉取请求、参加 SIG 会议、Kubernetes 聚会、KubeCon、为了技术采纳和创新四处宣讲、运行
|
||||
<code>kubectl get pods</code>
|
||||
并通过难以计数的其他重要方式作出贡献的一群人。如果想要了解如何参与进来并成为这一令人赞叹的社区的一员,请继续阅读。
|
||||
</p>
|
||||
</div>
|
||||
|
||||
<div class="intro">
|
||||
<br class="mobile">
|
||||
<!--p>The Kubernetes community -- users, contributors, and the culture we've built together -- is one of the biggest reasons for the meteoric rise of this open source project. Our culture and values continue to grow and change as the project itself grows and changes. We all work together toward constant improvement of the project and the ways we work on it.
|
||||
<br><br>We are the people who file issues and pull requests, attend SIG meetings, Kubernetes meetups, and KubeCon, advocate for its adoption and innovation, run <code>kubectl get pods</code>, and contribute in a thousand other vital ways. Read on to learn how you can get involved and become part of this amazing community.</p>
|
||||
-->
|
||||
<p>
|
||||
Kubernetes 社区 -- 用户、贡献者以及我们一起塑造的文化 -- 是这个开源项目持续增长的最重要原因。
|
||||
我们的文化和价值观随着项目自身的成长和变化而成长着、变化着。
|
||||
我们一起努力地持续改进项目本身,以及我们在这个项目中的工作方式。
|
||||
<br><br>我们是登记缺陷、提出拉取请求、参加 SIG 会议、Kubernetes Meetup、KubeCon,
|
||||
为了技术采纳和创新四处宣讲、运行 <code>kubectl get pods</code>
|
||||
并通过难以计数的其他重要方式作出贡献的一群人。
|
||||
如果想要了解如何参与进来并成为这一令人赞叹的社区的一员,请继续阅读。</p>
|
||||
</div>
|
||||
|
||||
<div class="community__navbar">
|
||||
|
||||
<a href="https://www.kubernetes.dev/"><!--Contributor Community-->贡献者社区</a>
|
||||
<a href="#values"><!--Community Values-->社区价值观</a>
|
||||
<a href="#conduct"><!--Code of conduct-->行为规范</a>
|
||||
<a href="#videos"><!--Videos-->视频</a>
|
||||
<a href="#discuss"><!--Discussions-->讨论</a>
|
||||
<a href="#events"><!--Events and meetups-->活动与聚会</a>
|
||||
<a href="#news"><!--News-->新闻</a>
|
||||
<a href="/releases"><!--Releases-->发行版本</a>
|
||||
|
||||
</div>
|
||||
<br class="mobile"><br class="mobile">
|
||||
<div class="imagecols">
|
||||
<br class="mobile">
|
||||
<div class="imagecol">
|
||||
<img src="/images/community/kubernetes-community-final-03.jpg" alt="Kubernetes 会议一览" style="width:100%" class="desktop">
|
||||
<div id="navigation-items">
|
||||
<div class="community-nav-item external-link">
|
||||
<!-- <a href="https://www.kubernetes.dev/">Contributor community</a> -->
|
||||
<a href="https://www.kubernetes.dev/">贡献者社区</a>
|
||||
</div>
|
||||
|
||||
<div class="imagecol">
|
||||
<img src="/images/community/kubernetes-community-final-04.jpg" alt="Kubernetes 会议一览" style="width:100%" class="desktop">
|
||||
<div class="community-nav-item">
|
||||
<!-- <a href="#values">Community values</a> -->
|
||||
<a href="#values">社区价值观</a>
|
||||
</div>
|
||||
|
||||
<div class="imagecol" style="margin-right:0% important">
|
||||
<img src="/images/community/kubernetes-community-final-05.jpg" alt="Kubernetes 会议一览" style="width:100%;margin-right:0% important" class="desktop">
|
||||
<div class="community-nav-item">
|
||||
<!-- <a href="#conduct">Code of conduct</a> -->
|
||||
<a href="#conduct">行为规范</a>
|
||||
</div>
|
||||
<div class="community-nav-item">
|
||||
<!-- <a href="#videos">Videos</a> -->
|
||||
<a href="#videos">视频</a>
|
||||
</div>
|
||||
<div class="community-nav-item">
|
||||
<!-- <a href="#discuss">Discussions</a> -->
|
||||
<a href="#discuss">讨论</a>
|
||||
</div>
|
||||
<div class="community-nav-item">
|
||||
<!-- <a href="#meetups">Meetups</a> -->
|
||||
<a href="#meetups">聚会</a>
|
||||
</div>
|
||||
<div class="community-nav-item">
|
||||
<!-- <a href="#news">News</a> -->
|
||||
<a href="#news">新闻</a>
|
||||
</div>
|
||||
<div class="community-nav-item">
|
||||
<!-- <a href="/releases">Releases</a> -->
|
||||
<a href="/releases">发行版本</a>
|
||||
</div>
|
||||
<img src="/images/community/kubernetes-community-04-mobile.jpg" alt="Kubernetes 会议一览" style="width:100%;margin-bottom:3%" class="mobile">
|
||||
<a name="values"></a>
|
||||
</div>
|
||||
|
||||
<div><a name="values"></a></div>
|
||||
<div class="conduct">
|
||||
<div class="conducttext">
|
||||
<br class="mobile"><br class="mobile">
|
||||
<br class="tablet"><br class="tablet">
|
||||
<div class="conducttextnobutton" style="margin-bottom:2%"><h1><!--Community Values-->社区价值观</h1>
|
||||
<!--
|
||||
The Kubernetes Community values are the keystone to the ongoing success of the project.<br>
|
||||
These principles guide every aspect of the Kubernetes project.
|
||||
-->
|
||||
Kubernetes 社区价值观是项目持续成功的基石。<br>
|
||||
这些原则会指导 Kubernetes 项目的方方面面。
|
||||
<br>
|
||||
<a href="/community/values/">
|
||||
<br class="mobile"><br class="mobile">
|
||||
<span class="fullbutton">
|
||||
<!--READ MORE-->继续阅读
|
||||
</span>
|
||||
<div class="community-section" id="gallery">
|
||||
<img src="/images/community/kubernetes-community-final-03.jpg" alt="Kubernetes 会议一览" class="community-gallery-desktop">
|
||||
<img src="/images/community/kubernetes-community-final-04.jpg" alt="Kubernetes 会议一览" class="community-gallery-desktop">
|
||||
<img src="/images/community/kubernetes-community-final-05.jpg" alt="Kubernetes 会议一览" class="community-gallery-desktop">
|
||||
<img src="/images/community/kubernetes-community-04-mobile.jpg" alt="Kubernetes 会议一览" class="community-gallery-mobile">
|
||||
</div>
|
||||
|
||||
<div class="community-section" id="values">
|
||||
<!-- <h2>Community Values</h2> -->
|
||||
<h2>社区价值观</h2>
|
||||
<!-- <p>
|
||||
The Kubernetes Community values are the keystone to the ongoing success of the project.<br class="optional"/>
|
||||
These principles guide every aspect of the Kubernetes project.
|
||||
</p> -->
|
||||
<p>
|
||||
Kubernetes 社区价值观是项目持续成功的基石。<br class="optional"/>
|
||||
这些原则会指导 Kubernetes 项目的方方面面。
|
||||
</p>
|
||||
<a href="https://www.kubernetes.dev/community/values/" class="community-cta-button">
|
||||
<!-- <span class="community-cta">Read more</span> -->
|
||||
<span class="community-cta">继续阅读</span>
|
||||
</a>
|
||||
</div><a name="conduct"></a>
|
||||
</div>
|
||||
|
||||
<div class="community-section" id="conduct">
|
||||
<!-- <h2>Code of Conduct</h2> -->
|
||||
<h2>行为规范</h2>
|
||||
<!-- <p>The Kubernetes community values respect and inclusiveness, and enforces a Code of Conduct in all interactions.</p> -->
|
||||
<p>Kubernetes 社区倡导相互尊重和包容,并要求在所有交互中遵从一种行为规范(Code of Conduct)。</p>
|
||||
<!-- <p>If you notice a violation of the Code of Conduct at an event or meeting, in <a href="#slack">Slack</a>, or in another communication mechanism, reach out to the Kubernetes Code of Conduct Committee at <a href="mailto:conduct@kubernetes.io">conduct@kubernetes.io</a>. All reports are kept confidential. You can read <a href="https://github.com/kubernetes/community/tree/master/committee-code-of-conduct">about the committee</a> in the Kubernetes community repository on GitHub.</p> -->
|
||||
<p>
|
||||
如果你在某个活动或会议中、<a href="#slack">Slack</a> 上或者其他通信机制中观察到违反行为规范的情况,
|
||||
请通过 <a href="mailto:conduct@kubernetes.io" style="color:#0662EE;font-weight:300">conduct@kubernetes.io</a>
|
||||
联系 Kubernetes 的行为规范委员会(Kubernetes Code of Conduct Committee)。所有的违规报告都会被严格保密。你可以在 GitHub
|
||||
上的 Kubernetes Community 仓库阅读 <a href="https://github.com/kubernetes/community/tree/master/committee-code-of-conduct">关于该委员会</a>
|
||||
的信息。
|
||||
</p>
|
||||
<a href="/community/code-of-conduct/" class="community-cta-button">
|
||||
<!-- <span class="community-cta">Read more</span> -->
|
||||
<span class="community-cta">继续阅读</span>
|
||||
</a>
|
||||
</div>
|
||||
|
||||
<div id="videos" class="community-section">
|
||||
<!-- <h2>Videos</h2> -->
|
||||
<h2>视频</h2>
|
||||
|
||||
<!-- <p class="community-simple">Kubernetes is on YouTube, a lot. Subscribe for a wide range of topics.</p> -->
|
||||
<p class="community-simple">Kubernetes 在 YouTube 上有自己的账户,订阅我们以了解更多不同的主题。</p>
|
||||
|
||||
<div class="container">
|
||||
<div class="video youtube">
|
||||
<iframe src="https://www.youtube.com/embed/videoseries?list=PL69nYSiGNLP3azFUvYJjGn45YbF6C-uIg" title="阅读办公会议" allow="camera 'none'; microphone 'none'; geolocation 'none'; fullscreen https://www.youtube.com/" ></iframe>
|
||||
<a href="https://www.youtube.com/playlist?list=PL69nYSiGNLP3azFUvYJjGn45YbF6C-uIg">
|
||||
<!-- <span class="videocta">Watch monthly office hours ▶</span> -->
|
||||
<span class="videocta">观看阅读办公会议 ▶</span>
|
||||
</a>
|
||||
</div>
|
||||
|
||||
<div class="video youtube">
|
||||
<iframe src="https://www.youtube.com/embed/videoseries?list=PL69nYSiGNLP1pkHsbPjzAewvMgGUpkCnJ" title="社区每周例会" allow="camera 'none'; microphone 'none'; geolocation 'none'; fullscreen https://www.youtube.com/"></iframe>
|
||||
<a href="https://www.youtube.com/playlist?list=PL69nYSiGNLP1pkHsbPjzAewvMgGUpkCnJ">
|
||||
<!-- <span class="videocta">Watch weekly community meetings ▶</span> -->
|
||||
<span class="videocta">观看社区每周例会 ▶</span>
|
||||
</a>
|
||||
</div>
|
||||
|
||||
<div class="video youtube" id="discuss">
|
||||
<iframe src="https://www.youtube.com/embed/videoseries?list=PL69nYSiGNLP3QpQrhZq_sLYo77BVKv09F" title="来自社区成员的演讲" allow="camera 'none'; microphone 'none'; geolocation 'none'; fullscreen https://www.youtube.com/"></iframe>
|
||||
<a href="https://www.youtube.com/playlist?list=PL69nYSiGNLP3QpQrhZq_sLYo77BVKv09F">
|
||||
<!-- <span class="videocta">Watch a talk from a community member ▶</span> -->
|
||||
<span class="videocta">观看来自社区成员的演讲 ▶</span>
|
||||
</a>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div id="resources" class="community-section">
|
||||
<h2>讨论</h2>
|
||||
|
||||
<!-- <p class="community-simple">We talk a lot. Find us and join the conversation on any of these platforms.</p> -->
|
||||
<p class="community-simple">我们有很多演讲材料。你可以在以下任何平台上找到我们并加入讨论。</p>
|
||||
|
||||
<div class="container">
|
||||
<div class="community-resource">
|
||||
<a href="https://discuss.kubernetes.io/">
|
||||
<img src="/images/community/discuss.png" alt="Forum">
|
||||
</a>
|
||||
<!-- <a href="https://discuss.kubernetes.io/">Community forums ▶</a> -->
|
||||
<a href="https://discuss.kubernetes.io/">论坛 ▶</a>
|
||||
<!-- <p>Topic-based technical discussions that bridge docs,
|
||||
troubleshooting, and so much more.</p> -->
|
||||
<p>基于主题的技术讨论,涵盖了文档,问题排查,以及很多其他内容。</p>
|
||||
</div>
|
||||
|
||||
<div id="twitter" class="community-resource">
|
||||
<a href="https://twitter.com/kubernetesio">
|
||||
<img src="/images/community/twitter.png" alt="Twitter">
|
||||
</a>
|
||||
<a href="https://twitter.com/kubernetesio">Twitter ▶</a>
|
||||
<p><em>#kubernetesio</em></p>
|
||||
<!-- <p>Real-time announcements of blog posts, events, news, ideas.</p> -->
|
||||
<p>关于博客更新、活动、新闻、以及想法的实时更新。</p>
|
||||
</div>
|
||||
|
||||
<div id="github" class="community-resource">
|
||||
<a href="https://github.com/kubernetes/kubernetes">
|
||||
<img src="/images/community/github.png" alt="GitHub">
|
||||
</a>
|
||||
<a href="https://github.com/kubernetes/kubernetes">GitHub ▶</a>
|
||||
<!-- <p>All the project and issue tracking, plus of course code.</p> -->
|
||||
<p>所有的项目和 Issue 跟踪,当然也包含代码。</p>
|
||||
</div>
|
||||
|
||||
<div id="server-fault" class="community-resource">
|
||||
<a href="https://serverfault.com/questions/tagged/kubernetes">
|
||||
<img src="/images/community/serverfault.png" alt="Server Fault">
|
||||
</a>
|
||||
<a href="https://serverfault.com/questions/tagged/kubernetes">Server Fault ▶</a>
|
||||
<!-- <p>Kubernetes-related discussion on Server Fault. Ask a question, or answer one.</p> -->
|
||||
<p>Server Fault 上有关于 Kubernetes 的讨论。你可以提出问题,或者解答他人的问题。</p>
|
||||
</div>
|
||||
|
||||
<div id="slack" class="community-resource">
|
||||
<a href="https://kubernetes.slack.com/">
|
||||
<img src="/images/community/slack.png" alt="Slack">
|
||||
</a>
|
||||
<a href="https://kubernetes.slack.com/">Slack ▶</a>
|
||||
<!-- <p>With 170+ channels, you'll find one that fits your needs.</p> -->
|
||||
<p>我们有超过 170 个频道,你可以找到一个适合你的频道。</p>
|
||||
<details>
|
||||
<!-- <summary><em>Need an invitation?</em></summary> -->
|
||||
<summary><em>需要邀请吗?</em></summary>
|
||||
<!-- Visit <a href="https://slack.k8s.io/">https://slack.k8s.io/</a>
|
||||
for an invitation. -->
|
||||
请通过访问 <a href="https://slack.k8s.io/">https://slack.k8s.io/</a>
|
||||
来获取邀请。
|
||||
</details>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
|
||||
<div class="conduct">
|
||||
<div class="conducttext">
|
||||
<br class="mobile"><br class="mobile">
|
||||
<br class="tablet"><br class="tablet">
|
||||
<div class="conducttextnobutton" style="margin-bottom:2%"><h1><!--Code of Conduct-->行为规范</h1>
|
||||
<!--
|
||||
The Kubernetes community values respect and inclusiveness, and enforces a Code of Conduct in all interactions. If you notice a violation of the Code of Conduct at an event or meeting, in Slack, or in another communication mechanism, reach out to the Kubernetes Code of Conduct Committee at <a href="mailto:conduct@kubernetes.io" style="color:#0662EE;font-weight:300">conduct@kubernetes.io</a>. All reports are kept confidential. You can read about the committee <a href="https://github.com/kubernetes/community/tree/master/committee-code-of-conduct" style="color:#0662EE;font-weight:300">here</a>.
|
||||
-->
|
||||
Kubernetes 社区倡导相互尊重和包容,并要求在所有交互中遵从一种行为规范(Code of Conduct)。
|
||||
如果你在某个活动或会议中、Slack 上或者其他通信机制中观察到违反行为规范的情况,
|
||||
请通过 <a href="mailto:conduct@kubernetes.io" style="color:#0662EE;font-weight:300">conduct@kubernetes.io</a>
|
||||
联系 Kubernetes 的行为规范委员会(Kubernetes Code of Conduct Committee)。
|
||||
所有的违规报告都会被严格保密。你可以在
|
||||
<a href="https://github.com/kubernetes/community/tree/master/committee-code-of-conduct" style="color:#0662EE;font-weight:300">这里</a>阅读该委员会的信息。
|
||||
<br>
|
||||
<a href="https://kubernetes.io/community/code-of-conduct/">
|
||||
<br class="mobile"><br class="mobile">
|
||||
|
||||
<span class="fullbutton">
|
||||
<!--READ MORE-->继续阅读
|
||||
</span>
|
||||
</a>
|
||||
</div><a name="videos"></a>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div class="videos">
|
||||
<br class="mobile"><br class="mobile">
|
||||
<br class="tablet"><br class="tablet">
|
||||
<h1 style="margin-top:0px">Videos</h1>
|
||||
|
||||
<div style="margin-bottom:4%;font-weight:300;text-align:center;padding-left:10%;padding-right:10%"><!--We're on YouTube,
|
||||
a lot. Subscribe for a wide range of topics.-->我们在 YouTube 上有很多视频,你可以订阅很多不同的主题。</div>
|
||||
|
||||
<div class="videocontainer">
|
||||
|
||||
<div class="video">
|
||||
|
||||
<iframe width="100%" height="250"
|
||||
src="https://www.youtube.com/embed/videoseries?list=PL69nYSiGNLP3azFUvYJjGn45YbF6C-uIg" title="阅读办公时间" frameborder="0" allow="autoplay; encrypted-media" allowfullscreen></iframe>
|
||||
|
||||
<a href="https://www.youtube.com/playlist?list=PL69nYSiGNLP3azFUvYJjGn45YbF6C-uIg">
|
||||
<div class="videocta">
|
||||
观看阅读办公会议 ▶</div>
|
||||
</a>
|
||||
<div class="community-section" id="events">
|
||||
<div class="container">
|
||||
<!-- <h2>Upcoming Events</h2> -->
|
||||
<h2>未来活动</h2>
|
||||
{{< upcoming-events >}}
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div class="video">
|
||||
<iframe width="100%" height="250"
|
||||
src="https://www.youtube.com/embed/videoseries?list=PL69nYSiGNLP1pkHsbPjzAewvMgGUpkCnJ" title="社区每周例会" frameborder="0" allow="autoplay; encrypted-media" allowfullscreen></iframe>
|
||||
<a href="https://www.youtube.com/playlist?list=PL69nYSiGNLP1pkHsbPjzAewvMgGUpkCnJ">
|
||||
<div class="videocta">
|
||||
观看社区每周例会 ▶
|
||||
</div>
|
||||
</a>
|
||||
</div>
|
||||
|
||||
<div class="video">
|
||||
|
||||
<iframe width="100%" height="250"
|
||||
src="https://www.youtube.com/embed/videoseries?list=PL69nYSiGNLP3QpQrhZq_sLYo77BVKv09F" title="来自社区成员的演讲" frameborder="0" allow="autoplay; encrypted-media" allowfullscreen></iframe>
|
||||
|
||||
<a href="https://www.youtube.com/playlist?list=PL69nYSiGNLP3QpQrhZq_sLYo77BVKv09F">
|
||||
<div class="videocta">
|
||||
观看来自社区成员的演讲 ▶
|
||||
</div>
|
||||
|
||||
</a>
|
||||
<a name="discuss"></a>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div class="resources">
|
||||
<br class="mobile"><br class="mobile">
|
||||
<br class="tablet"><br class="tablet">
|
||||
<h1 style="padding-top:1%"><!--Discussions-->讨论</h1>
|
||||
|
||||
<div style="font-weight:300;text-align:center"><!--We talk a lot. Find us and join the conversation on any of
|
||||
these platforms.-->我们有很多演讲材料。你可以在以下任何平台上找到我们并加入讨论。</div>
|
||||
|
||||
<div class="resourcecontainer">
|
||||
|
||||
<div class="resourcebox">
|
||||
<img src="/images/community/discuss.png" alt=Forum" style="width:80%;padding-bottom:2%">
|
||||
<a href="https://discuss.kubernetes.io/" style="color:#0662EE;display:block;margin-top:1%">
|
||||
<!--forum-->论坛 ▶
|
||||
</a>
|
||||
<div class="resourceboxtext" style="font-size:12px;text-transform:none !important;font-weight:300;line-height:1.4em;color:#333333;margin-top:4%">
|
||||
<!--
|
||||
Topic-based technical discussions that bridge docs, StackOverflow, and so much more
|
||||
-->
|
||||
基于主题的技术讨论,包括 docs、StackOverflow 以及很多其他渠道。
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div class="resourcebox">
|
||||
<img src="/images/community/twitter.png" alt="Twitter" style="width:80%;padding-bottom:2%">
|
||||
<a href="https://twitter.com/kubernetesio" style="color:#0662EE;display:block;margin-top:1%">
|
||||
Twitter ▶
|
||||
</a>
|
||||
<div class="resourceboxtext" style="font-size:12px;text-transform:none
|
||||
!important;font-weight:300;line-height:1.4em;color:#333333;margin-top:4%"><!--Real-time announcements of blog posts,
|
||||
events, news, ideas -->关于博客发表、活动、新闻、想法的实时公布</div>
|
||||
</div>
|
||||
|
||||
<div class="resourcebox">
|
||||
<img src="/images/community/github.png" alt="GitHub" style="width:80%;padding-bottom:2%">
|
||||
<a href="https://github.com/kubernetes/kubernetes" style="color:#0662EE;display:block;margin-top:1%">
|
||||
github ▶
|
||||
</a>
|
||||
<div class="resourceboxtext" style="font-size:12px;text-transform:none !important;font-weight:300;line-height:1.4em;color:#333333;margin-top:4%">
|
||||
<!--All the project and issue tracking, plus of course code-->所有的项目和缺陷跟踪,当然也包含代码
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div class="resourcebox">
|
||||
<img src="/images/community/stack.png" alt="Stack Overflow" style="width:80%;padding-bottom:2%">
|
||||
<a href="https://stackoverflow.com/search?q=kubernetes" style="color:#0662EE;display:block;margin-top:1%">
|
||||
StackOverflow ▶
|
||||
</a>
|
||||
<div class="resourceboxtext" style="font-size:12px;text-transform:none !important;font-weight:300;line-height:1.4em;color:#333333;margin-top:4%">
|
||||
<!--Technical troubleshooting for any use case-->针对不同场景的技术问题排查
|
||||
<a name="events"></a>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
</div>
|
||||
</div>
|
||||
<div class="events">
|
||||
<br class="mobile"><br class="mobile">
|
||||
<br class="tablet"><br class="tablet">
|
||||
<div class="eventcontainer">
|
||||
<h1 style="color:white !important"><!--Upcoming Events-->未来活动</h1>
|
||||
{{< upcoming-events >}}
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div class="meetups">
|
||||
<div class="meetupcol">
|
||||
<div class="meetuptext">
|
||||
<h1 style="text-align:left"><!--Global Community-->全球社区</h1>
|
||||
<!--
|
||||
With over 150 meetups in the world and growing, go find your local kube people. If one isn't near, take charge and create your own.
|
||||
-->
|
||||
我们在全球有超过 150 个 Meetup,而且数量仍在增长,你可以去找到本地的 Kubernetes
|
||||
人员。如果附近没有,你可以负起责来,建一个你自己的 Meetup。
|
||||
</div>
|
||||
<a href="https://www.meetup.com/topics/kubernetes/">
|
||||
<div class="button">
|
||||
<!--FIND A MEETUP-->寻找 Meetup
|
||||
</div>
|
||||
</a>
|
||||
<a name="news"></a>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div class="news">
|
||||
<br class="mobile"><br class="mobile">
|
||||
<br class="tablet"><br class="tablet">
|
||||
<h1 style="margin-bottom:2%"><!--Recent News-->最近新闻</h1>
|
||||
|
||||
<br>
|
||||
<div class="twittercol1">
|
||||
<a class="twitter-timeline" data-tweet-limit="1" href="https://twitter.com/kubernetesio?ref_src=twsrc%5Etfw"><!--Tweets
|
||||
by kubernetesio-->kubernetesio 账号的推特</a> <script async src="https://platform.twitter.com/widgets.js" charset="utf-8"></script>
|
||||
</div>
|
||||
|
||||
<br>
|
||||
<br><br><br><br>
|
||||
<div class="community-section" id="meetups">
|
||||
<!-- <h2>Global community</h2> -->
|
||||
<h2>全球社区</h2>
|
||||
<p>
|
||||
<!-- With over 150 meetups in the world and growing, go find your local kube people. If one isn't near, take charge and create your own. -->
|
||||
我们在全球有超过 150 个聚会,而且数量仍在增长,你可以去找到本地的 Kubernetes 人员。如果附近没有,你可以负起责来,建一个你自己的聚会。
|
||||
</p>
|
||||
<a href="https://www.meetup.com/topics/kubernetes/" class="community-cta-button">
|
||||
<!-- <span class="community-cta">Find a meetup</span> -->
|
||||
<span class="community-cta">寻找聚会</span>
|
||||
</a>
|
||||
</div>
|
||||
|
||||
<div class="community-section community-frame" id="news">
|
||||
<!-- <h2>Recent News</h2> -->
|
||||
<h2>最新新闻</h2>
|
||||
<div class="twittercol1">
|
||||
<!-- <a class="twitter-timeline" data-tweet-limit="1" href="https://twitter.com/kubernetesio?ref_src=twsrc%5Etfw">Tweets by kubernetesio</a> -->
|
||||
<a class="twitter-timeline" data-tweet-limit="1" href="https://twitter.com/kubernetesio?ref_src=twsrc%5Etfw">kubernetesio 账号发布的推文</a>
|
||||
</div>
|
||||
</div>
|
||||
|
|
|
@ -864,6 +864,11 @@ poorly-behaved workloads that may be harming system health.
|
|||
监视这些指标有助于判断你的配置是否不当地限制了重要流量,
|
||||
或者发现可能会损害系统健康的,行为不良的工作负载。
|
||||
|
||||
<!--
|
||||
#### Maturity level BETA
|
||||
-->
|
||||
#### 成熟度水平 BETA
|
||||
|
||||
<!--
|
||||
* `apiserver_flowcontrol_rejected_requests_total` is a counter vector
|
||||
(cumulative since server start) of requests that were rejected,
|
||||
|
@ -874,8 +879,8 @@ poorly-behaved workloads that may be harming system health.
|
|||
-->
|
||||
* `apiserver_flowcontrol_rejected_requests_total` 是一个计数器向量,
|
||||
记录被拒绝的请求数量(自服务器启动以来累计值),
|
||||
由标签 `flow_chema`(表示与请求匹配的 FlowSchema)、`priority_level`
|
||||
(表示分配给请该求的优先级)和 `reason` 来区分。
|
||||
可按标签 `flow_chema`(表示与请求匹配的 FlowSchema)、`priority_level`
|
||||
(表示分配给请该求的优先级)和 `reason` 分解。
|
||||
`reason` 标签将是以下值之一:
|
||||
|
||||
<!--
|
||||
|
@ -902,8 +907,55 @@ poorly-behaved workloads that may be harming system health.
|
|||
-->
|
||||
* `apiserver_flowcontrol_dispatched_requests_total` 是一个计数器向量,
|
||||
记录开始执行的请求数量(自服务器启动以来的累积值),
|
||||
由 `flow_schema` 和 `priority_level` 来区分。
|
||||
可按 `flow_schema` 和 `priority_level` 分解。
|
||||
|
||||
<!--
|
||||
* `apiserver_flowcontrol_current_inqueue_requests` is a gauge vector
|
||||
holding the instantaneous number of queued (not executing) requests,
|
||||
broken down by `priority_level` and `flow_schema`.
|
||||
|
||||
* `apiserver_flowcontrol_current_executing_requests` is a gauge vector
|
||||
holding the instantaneous number of executing (not waiting in a
|
||||
queue) requests, broken down by `priority_level` and `flow_schema`.
|
||||
-->
|
||||
* `apiserver_flowcontrol_current_inqueue_requests` 是一个测量向量,
|
||||
记录排队中的(未执行)请求的瞬时数量,可按 `priority_level` 和 `flow_schema` 分解。
|
||||
|
||||
* `apiserver_flowcontrol_current_executing_requests` 是一个测量向量,
|
||||
记录执行中(不在队列中等待)请求的瞬时数量,可按 `priority_level` 和 `flow_schema` 分解。
|
||||
|
||||
<!--
|
||||
* `apiserver_flowcontrol_current_executing_seats` is a gauge vector
|
||||
holding the instantaneous number of occupied seats, broken down by
|
||||
`priority_level` and `flow_schema`.
|
||||
|
||||
* `apiserver_flowcontrol_request_wait_duration_seconds` is a histogram
|
||||
vector of how long requests spent queued, broken down by the labels
|
||||
`flow_schema`, `priority_level`, and `execute`. The `execute` label
|
||||
indicates whether the request has started executing.
|
||||
-->
|
||||
* `apiserver_flowcontrol_current_executing_seats` 是一个测量向量,
|
||||
记录了按 `priority_level` 和 `flow_schema` 细分的瞬时占用席位数量。
|
||||
|
||||
* `apiserver_flowcontrol_request_wait_duration_seconds` 是一个直方图向量,
|
||||
记录了按 `flow_schema`、`priority_level` 和 `execute` 标签细分的请求在队列中等待的时长。
|
||||
`execute` 标签表示请求是否已开始执行。
|
||||
|
||||
{{< note >}}
|
||||
<!--
|
||||
Since each FlowSchema always assigns requests to a single
|
||||
PriorityLevelConfiguration, you can add the histograms for all the
|
||||
FlowSchemas for one priority level to get the effective histogram for
|
||||
requests assigned to that priority level.
|
||||
-->
|
||||
由于每个 FlowSchema 总会给请求分配 PriorityLevelConfiguration,
|
||||
因此你可以将一个优先级的所有 FlowSchema 的直方图相加,以得到分配给该优先级的请求的有效直方图。
|
||||
{{< /note >}}
|
||||
|
||||
<!--
|
||||
#### Maturity level ALPHA
|
||||
-->
|
||||
#### 成熟度水平 ALPHA
|
||||
|
||||
<!--
|
||||
* `apiserver_current_inqueue_requests` is a gauge vector of recent
|
||||
|
@ -915,13 +967,22 @@ poorly-behaved workloads that may be harming system health.
|
|||
last window's high water mark of number of requests actively being
|
||||
served.
|
||||
-->
|
||||
* `apiserver_current_inqueue_requests` 是一个表向量,
|
||||
* `apiserver_current_inqueue_requests` 是一个测量向量,
|
||||
记录最近排队请求数量的高水位线,
|
||||
由标签 `request_kind` 分组,标签的值为 `mutating` 或 `readOnly`。
|
||||
这些高水位线表示在最近一秒钟内看到的最大数字。
|
||||
它们补充说明了老的表向量 `apiserver_current_inflight_requests`
|
||||
它们补充说明了老的测量向量 `apiserver_current_inflight_requests`
|
||||
(该量保存了最后一个窗口中,正在处理的请求数量的高水位线)。
|
||||
|
||||
<!--
|
||||
* `apiserver_current_inqueue_seats` is a gauge vector of the sum over
|
||||
queued requests of the largest number of seats each will occupy,
|
||||
grouped by labels named `flow_schema` and `priority_level`.
|
||||
-->
|
||||
* `apiserver_current_inqueue_seats` 是一个测量向量,
|
||||
记录了排队请求中每个请求将占用的最大席位数的总和,
|
||||
按 `flow_schema` 和 `priority_level` 两个标签进行分组。
|
||||
|
||||
<!--
|
||||
* `apiserver_flowcontrol_read_vs_write_current_requests` is a
|
||||
histogram vector of observations, made at the end of every
|
||||
|
@ -934,36 +995,18 @@ poorly-behaved workloads that may be harming system health.
|
|||
limit for executing).
|
||||
-->
|
||||
* `apiserver_flowcontrol_read_vs_write_current_requests` 是一个直方图向量,
|
||||
在每个纳秒结束时记录请求数量的观察值,由标签 `phase`(取值为 `waiting` 及 `executing`)
|
||||
和 `request_kind`(取值为 `mutating` 及 `readOnly`)区分。
|
||||
在每个纳秒结束时记录请求数量的观察值,可按标签 `phase`(取值为 `waiting` 及 `executing`)
|
||||
和 `request_kind`(取值为 `mutating` 及 `readOnly`)分解。
|
||||
每个观察到的值是一个介于 0 和 1 之间的比值,计算方式为请求数除以该请求数的对应限制
|
||||
(等待的队列长度限制和执行所用的并发限制)。
|
||||
|
||||
<!--
|
||||
* `apiserver_flowcontrol_current_inqueue_requests` is a gauge vector
|
||||
holding the instantaneous number of queued (not executing) requests,
|
||||
broken down by `priority_level` and `flow_schema`.
|
||||
-->
|
||||
* `apiserver_flowcontrol_current_inqueue_requests` 是一个表向量,
|
||||
记录包含排队中的(未执行)请求的瞬时数量,
|
||||
由 `priority_level` 和 `flow_schema` 区分。
|
||||
|
||||
<!--
|
||||
* `apiserver_flowcontrol_current_executing_requests` is a gauge vector
|
||||
holding the instantaneous number of executing (not waiting in a
|
||||
queue) requests, broken down by `priority_level` and `flow_schema`.
|
||||
-->
|
||||
* `apiserver_flowcontrol_current_executing_requests` 是一个表向量,
|
||||
记录包含执行中(不在队列中等待)请求的瞬时数量,
|
||||
由 `priority_level` 和 `flow_schema` 进一步区分。
|
||||
|
||||
<!--
|
||||
* `apiserver_flowcontrol_request_concurrency_in_use` is a gauge vector
|
||||
holding the instantaneous number of occupied seats, broken down by
|
||||
`priority_level` and `flow_schema`.
|
||||
-->
|
||||
* `apiserver_flowcontrol_request_concurrency_in_use` 是一个规范向量,
|
||||
包含占用座位的瞬时数量,由 `priority_level` 和 `flow_schema` 进一步区分。
|
||||
包含占用席位的瞬时数量,可按 `priority_level` 和 `flow_schema` 分解。
|
||||
|
||||
<!--
|
||||
* `apiserver_flowcontrol_priority_level_request_utilization` is a
|
||||
|
@ -977,7 +1020,7 @@ poorly-behaved workloads that may be harming system health.
|
|||
-->
|
||||
* `apiserver_flowcontrol_priority_level_request_utilization` 是一个直方图向量,
|
||||
在每个纳秒结束时记录请求数量的观察值,
|
||||
由标签 `phase`(取值为 `waiting` 及 `executing`)和 `priority_level` 区分。
|
||||
可按标签 `phase`(取值为 `waiting` 及 `executing`)和 `priority_level` 分解。
|
||||
每个观察到的值是一个介于 0 和 1 之间的比值,计算方式为请求数除以该请求数的对应限制
|
||||
(等待的队列长度限制和执行所用的并发限制)。
|
||||
|
||||
|
@ -996,7 +1039,7 @@ poorly-behaved workloads that may be harming system health.
|
|||
phase).
|
||||
-->
|
||||
* `apiserver_flowcontrol_priority_level_seat_utilization` 是一个直方图向量,
|
||||
在每个纳秒结束时记录某个优先级并发度限制利用率的观察值,由标签 `priority_level` 区分。
|
||||
在每个纳秒结束时记录某个优先级并发度限制利用率的观察值,可按标签 `priority_level` 分解。
|
||||
此利用率是一个分数:(占用的席位数)/(并发限制)。
|
||||
此指标考虑了除 WATCH 之外的所有请求的所有执行阶段(包括写入结束时的正常延迟和额外延迟,
|
||||
以覆盖相应的通知操作);对于 WATCH 请求,只考虑传递预先存在对象通知的初始阶段。
|
||||
|
@ -1011,7 +1054,7 @@ poorly-behaved workloads that may be harming system health.
|
|||
Note that this produces different statistics than an unbiased survey would.
|
||||
-->
|
||||
* `apiserver_flowcontrol_request_queue_length_after_enqueue` 是一个直方图向量,
|
||||
记录请求队列的长度,由 `priority_level` 和 `flow_schema` 进一步区分。
|
||||
记录请求队列的长度,可按 `priority_level` 和 `flow_schema` 分解。
|
||||
每个排队中的请求都会为其直方图贡献一个样本,并在添加请求后立即上报队列的长度。
|
||||
请注意,这样产生的统计数据与无偏调查不同。
|
||||
|
||||
|
@ -1042,26 +1085,17 @@ poorly-behaved workloads that may be harming system health.
|
|||
此字段始终等于 `apiserver_flowcontrol_current_limit_seats`
|
||||
(它过去不作为一个独立的指标存在)。
|
||||
|
||||
<!--
|
||||
* `apiserver_flowcontrol_nominal_limit_seats` is a gauge vector
|
||||
holding each priority level's nominal concurrency limit, computed
|
||||
from the API server's total concurrency limit and the priority
|
||||
level's configured nominal concurrency shares.
|
||||
-->
|
||||
* `apiserver_flowcontrol_nominal_limit_seats` 是一个表向量,包含每个优先级的额定并发度限制,
|
||||
指标值根据 API 服务器的总并发度限制和各优先级所配置的额定并发度份额计算得出。
|
||||
|
||||
<!--
|
||||
* `apiserver_flowcontrol_lower_limit_seats` is a gauge vector holding
|
||||
the lower bound on each priority level's dynamic concurrency limit.
|
||||
-->
|
||||
* `apiserver_flowcontrol_lower_limit_seats` 是一个表向量,包含每个优先级的动态并发度限制的下限。
|
||||
* `apiserver_flowcontrol_lower_limit_seats` 是一个测量向量,包含每个优先级的动态并发度限制的下限。
|
||||
|
||||
<!--
|
||||
* `apiserver_flowcontrol_upper_limit_seats` is a gauge vector holding
|
||||
the upper bound on each priority level's dynamic concurrency limit.
|
||||
-->
|
||||
* `apiserver_flowcontrol_upper_limit_seats` 是一个表向量,包含每个优先级的动态并发度限制的上限。
|
||||
* `apiserver_flowcontrol_upper_limit_seats` 是一个测量向量,包含每个优先级的动态并发度限制的上限。
|
||||
|
||||
<!--
|
||||
* `apiserver_flowcontrol_demand_seats` is a histogram vector counting
|
||||
|
@ -1081,7 +1115,7 @@ poorly-behaved workloads that may be harming system health.
|
|||
holding, for each priority level, the maximum seat demand seen
|
||||
during the last concurrency borrowing adjustment period.
|
||||
-->
|
||||
* `apiserver_flowcontrol_demand_seats_high_watermark` 是一个表向量,
|
||||
* `apiserver_flowcontrol_demand_seats_high_watermark` 是一个测量向量,
|
||||
为每个优先级包含了上一个并发度借用调整期间所观察到的最大席位需求。
|
||||
|
||||
<!--
|
||||
|
@ -1089,7 +1123,7 @@ poorly-behaved workloads that may be harming system health.
|
|||
holding, for each priority level, the time-weighted average seat
|
||||
demand seen during the last concurrency borrowing adjustment period.
|
||||
-->
|
||||
* `apiserver_flowcontrol_demand_seats_average` 是一个表向量,
|
||||
* `apiserver_flowcontrol_demand_seats_average` 是一个测量向量,
|
||||
为每个优先级包含了上一个并发度借用调整期间所观察到的时间加权平均席位需求。
|
||||
|
||||
<!--
|
||||
|
@ -1098,7 +1132,7 @@ poorly-behaved workloads that may be harming system health.
|
|||
standard deviation of seat demand seen during the last concurrency
|
||||
borrowing adjustment period.
|
||||
-->
|
||||
* `apiserver_flowcontrol_demand_seats_stdev` 是一个表向量,
|
||||
* `apiserver_flowcontrol_demand_seats_stdev` 是一个测量向量,
|
||||
为每个优先级包含了上一个并发度借用调整期间所观察到的席位需求的时间加权总标准偏差。
|
||||
|
||||
<!--
|
||||
|
@ -1106,7 +1140,7 @@ poorly-behaved workloads that may be harming system health.
|
|||
holding, for each priority level, the smoothed enveloped seat demand
|
||||
determined at the last concurrency adjustment.
|
||||
-->
|
||||
* `apiserver_flowcontrol_demand_seats_smoothed` 是一个表向量,
|
||||
* `apiserver_flowcontrol_demand_seats_smoothed` 是一个测量向量,
|
||||
为每个优先级包含了上一个并发度调整期间确定的平滑包络席位需求。
|
||||
|
||||
<!--
|
||||
|
@ -1114,14 +1148,14 @@ poorly-behaved workloads that may be harming system health.
|
|||
each priority level, the concurrency target going into the borrowing
|
||||
allocation problem.
|
||||
-->
|
||||
* `apiserver_flowcontrol_target_seats` 是一个表向量,
|
||||
* `apiserver_flowcontrol_target_seats` 是一个测量向量,
|
||||
包含每个优先级触发借用分配问题的并发度目标值。
|
||||
|
||||
<!--
|
||||
* `apiserver_flowcontrol_seat_fair_frac` is a gauge holding the fair
|
||||
allocation fraction determined in the last borrowing adjustment.
|
||||
-->
|
||||
* `apiserver_flowcontrol_seat_fair_frac` 是一个表向量,
|
||||
* `apiserver_flowcontrol_seat_fair_frac` 是一个测量向量,
|
||||
包含了上一个借用调整期间确定的公平分配比例。
|
||||
|
||||
<!--
|
||||
|
@ -1129,31 +1163,9 @@ poorly-behaved workloads that may be harming system health.
|
|||
holding, for each priority level, the dynamic concurrency limit
|
||||
derived in the last adjustment.
|
||||
-->
|
||||
* `apiserver_flowcontrol_current_limit_seats` 是一个表向量,
|
||||
* `apiserver_flowcontrol_current_limit_seats` 是一个测量向量,
|
||||
包含每个优先级的上一次调整期间得出的动态并发限制。
|
||||
|
||||
<!--
|
||||
* `apiserver_flowcontrol_request_wait_duration_seconds` is a histogram
|
||||
vector of how long requests spent queued, broken down by the labels
|
||||
`flow_schema`, `priority_level`, and `execute`. The `execute` label
|
||||
indicates whether the request has started executing.
|
||||
-->
|
||||
* `apiserver_flowcontrol_request_wait_duration_seconds` 是一个直方图向量,
|
||||
记录请求排队的时间,
|
||||
由标签 `flow_schema`、`priority_level` 和 `execute` 进一步区分。
|
||||
标签 `execute` 表示请求是否开始执行。
|
||||
|
||||
{{< note >}}
|
||||
<!--
|
||||
Since each FlowSchema always assigns requests to a single
|
||||
PriorityLevelConfiguration, you can add the histograms for all the
|
||||
FlowSchemas for one priority level to get the effective histogram for
|
||||
requests assigned to that priority level.
|
||||
-->
|
||||
由于每个 FlowSchema 总会给请求分配 PriorityLevelConfiguration,
|
||||
因此你可以为一个优先级添加所有 FlowSchema 的直方图,以获取分配给该优先级的请求的有效直方图。
|
||||
{{< /note >}}
|
||||
|
||||
<!--
|
||||
* `apiserver_flowcontrol_request_execution_seconds` is a histogram
|
||||
vector of how long requests took to actually execute, broken down by
|
||||
|
@ -1161,7 +1173,7 @@ poorly-behaved workloads that may be harming system health.
|
|||
-->
|
||||
* `apiserver_flowcontrol_request_execution_seconds` 是一个直方图向量,
|
||||
记录请求实际执行需要花费的时间,
|
||||
由标签 `flow_schema` 和 `priority_level` 进一步区分。
|
||||
可按标签 `flow_schema` 和 `priority_level` 分解。
|
||||
|
||||
<!--
|
||||
* `apiserver_flowcontrol_watch_count_samples` is a histogram vector of
|
||||
|
@ -1170,7 +1182,7 @@ poorly-behaved workloads that may be harming system health.
|
|||
-->
|
||||
* `apiserver_flowcontrol_watch_count_samples` 是一个直方图向量,
|
||||
记录给定写的相关活动 WATCH 请求数量,
|
||||
由标签 `flow_schema` 和 `priority_level` 进一步区分。
|
||||
可按标签 `flow_schema` 和 `priority_level` 分解。
|
||||
|
||||
<!--
|
||||
* `apiserver_flowcontrol_work_estimated_seats` is a histogram vector
|
||||
|
@ -1180,7 +1192,7 @@ poorly-behaved workloads that may be harming system health.
|
|||
-->
|
||||
* `apiserver_flowcontrol_work_estimated_seats` 是一个直方图向量,
|
||||
记录与估计席位(最初阶段和最后阶段的最多人数)相关联的请求数量,
|
||||
由标签 `flow_schema` 和 `priority_level` 进一步区分。
|
||||
可按标签 `flow_schema` 和 `priority_level` 分解。
|
||||
|
||||
<!--
|
||||
* `apiserver_flowcontrol_request_dispatch_no_accommodation_total` is a
|
||||
|
@ -1191,7 +1203,17 @@ poorly-behaved workloads that may be harming system health.
|
|||
* `apiserver_flowcontrol_request_dispatch_no_accommodation_total`
|
||||
是一个事件数量的计数器,这些事件在原则上可能导致请求被分派,
|
||||
但由于并发度不足而没有被分派,
|
||||
由标签 `flow_schema` 和 `priority_level` 进一步区分。
|
||||
可按标签 `flow_schema` 和 `priority_level` 分解。
|
||||
|
||||
<!--
|
||||
* `apiserver_flowcontrol_epoch_advance_total` is a counter vector of
|
||||
the number of attempts to jump a priority level's progress meter
|
||||
backward to avoid numeric overflow, grouped by `priority_level` and
|
||||
`success`.
|
||||
-->
|
||||
* `apiserver_flowcontrol_epoch_advance_total` 是一个计数器向量,
|
||||
记录了将优先级进度计向后跳跃以避免数值溢出的尝试次数,
|
||||
按 `priority_level` 和 `success` 两个标签进行分组。
|
||||
|
||||
<!--
|
||||
## Good practices for using API Priority and Fairness
|
||||
|
|
|
@ -28,6 +28,17 @@ scheduler decisions).
|
|||
|
||||
<!-- body -->
|
||||
|
||||
{{< warning >}}
|
||||
<!--
|
||||
In contrast to the command line flags described here, the *log
|
||||
output* itself does *not* fall under the Kubernetes API stability guarantees:
|
||||
individual log entries and their formatting may change from one release
|
||||
to the next!
|
||||
-->
|
||||
与此处描述的命令行标志不同,日志输出本身不属于 Kubernetes API 的稳定性保证范围:
|
||||
单个日志条目及其格式可能会在不同版本之间发生变化!
|
||||
{{< /warning >}}
|
||||
|
||||
## Klog
|
||||
|
||||
<!--
|
||||
|
|
|
@ -35,7 +35,7 @@ Kubernetes 组件以 [Prometheus 格式](https://prometheus.io/docs/instrumentin
|
|||
## Metrics in Kubernetes
|
||||
|
||||
In most cases metrics are available on `/metrics` endpoint of the HTTP server. For components that
|
||||
doesn't expose endpoint by default it can be enabled using `--bind-address` flag.
|
||||
don't expose endpoint by default, it can be enabled using `--bind-address` flag.
|
||||
|
||||
Examples of those components:
|
||||
-->
|
||||
|
|
|
@ -3,7 +3,22 @@ title: "容器"
|
|||
weight: 40
|
||||
description: 打包应用及其运行依赖环境的技术。
|
||||
content_type: concept
|
||||
card:
|
||||
name: concepts
|
||||
weight: 50
|
||||
---
|
||||
<!--
|
||||
title: Containers
|
||||
weight: 40
|
||||
description: Technology for packaging an application along with its runtime dependencies.
|
||||
reviewers:
|
||||
- erictune
|
||||
- thockin
|
||||
content_type: concept
|
||||
card:
|
||||
name: concepts
|
||||
weight: 50
|
||||
-->
|
||||
|
||||
<!-- overview -->
|
||||
<!--
|
||||
|
|
|
@ -209,7 +209,7 @@ clients that access it.
|
|||
|
||||
<!-- image source: https://docs.google.com/drawings/d/1k2YdJgNTtNfW7_A8moIIkij-DmVgEhNrn3y2OODwqQQ/view -->
|
||||
|
||||
{{< figure src="/docs/concepts/extend-kubernetes/extension-points.png"
|
||||
{{< figure src="/docs/concepts/extend-kubernetes/extension-points.svg"
|
||||
alt="用符号表示的七个编号的 Kubernetes 扩展点"
|
||||
class="diagram-large" caption="Kubernetes 扩展点" >}}
|
||||
|
||||
|
@ -445,8 +445,8 @@ allows calling out to custom code that makes an authorization decision.
|
|||
换言之,它不会基于对象的特定字段作出不同的判决。
|
||||
|
||||
如果内置的鉴权选项无法满足你的需要,
|
||||
你可以使用[鉴权 Webhook](/zh-cn/docs/reference/access-authn-authz/webhook/) 来调用用户提供的代码,
|
||||
执行定制的鉴权决定。
|
||||
你可以使用[鉴权 Webhook](/zh-cn/docs/reference/access-authn-authz/webhook/)
|
||||
来调用用户提供的代码,执行定制的鉴权决定。
|
||||
|
||||
<!--
|
||||
### Dynamic admission control
|
||||
|
@ -645,4 +645,3 @@ not available through the webhook integration.
|
|||
* 进一步了解[扩展 API 服务器](/zh-cn/docs/concepts/extend-kubernetes/api-extension/apiserver-aggregation/)
|
||||
* 进一步了解[动态准入控制](/zh-cn/docs/reference/access-authn-authz/extensible-admission-controllers/)
|
||||
* 进一步了解 [Operator 模式](/zh-cn/docs/concepts/extend-kubernetes/operator/)
|
||||
|
||||
|
|
|
@ -417,7 +417,7 @@ Aggregated APIs offer more advanced API features and customization of other feat
|
|||
<!--
|
||||
| Feature | Description | CRDs | Aggregated API |
|
||||
| ------- | ----------- | ---- | -------------- |
|
||||
| Validation | Help users prevent errors and allow you to evolve your API independently of your clients. These features are most useful when there are many clients who can't all update at the same time. | Yes. Most validation can be specified in the CRD using [OpenAPI v3.0 validation](/docs/tasks/extend-kubernetes/custom-resources/custom-resource-definitions/#validation). Any other validations supported by addition of a [Validating Webhook](/docs/reference/access-authn-authz/admission-controllers/#validatingadmissionwebhook-alpha-in-1-8-beta-in-1-9). | Yes, arbitrary validation checks |
|
||||
| Validation | Help users prevent errors and allow you to evolve your API independently of your clients. These features are most useful when there are many clients who can't all update at the same time. | Yes. Most validation can be specified in the CRD using [OpenAPI v3.0 validation](/docs/tasks/extend-kubernetes/custom-resources/custom-resource-definitions/#validation). [CRDValidationRatcheting](/docs/tasks/extend-kubernetes/custom-resources/custom-resource-definitions/#validation-ratcheting) feature gate allows failing validations specified using OpenAPI also can be ignored if the failing part of the resource was unchanged. Any other validations supported by addition of a [Validating Webhook](/docs/reference/access-authn-authz/admission-controllers/#validatingadmissionwebhook-alpha-in-1-8-beta-in-1-9). | Yes, arbitrary validation checks |
|
||||
| Defaulting | See above | Yes, either via [OpenAPI v3.0 validation](/docs/tasks/extend-kubernetes/custom-resources/custom-resource-definitions/#defaulting) `default` keyword (GA in 1.17), or via a [Mutating Webhook](/docs/reference/access-authn-authz/admission-controllers/#mutatingadmissionwebhook) (though this will not be run when reading from etcd for old objects). | Yes |
|
||||
| Multi-versioning | Allows serving the same object through two API versions. Can help ease API changes like renaming fields. Less important if you control your client versions. | [Yes](/docs/tasks/extend-kubernetes/custom-resources/custom-resource-definition-versioning) | Yes |
|
||||
| Custom Storage | If you need storage with a different performance mode (for example, a time-series database instead of key-value store) or isolation for security (for example, encryption of sensitive information, etc.) | No | Yes |
|
||||
|
@ -431,7 +431,7 @@ Aggregated APIs offer more advanced API features and customization of other feat
|
|||
-->
|
||||
| 特性 | 描述 | CRD | 聚合 API |
|
||||
| ------- | ----------- | ---- | -------------- |
|
||||
| 合法性检查 | 帮助用户避免错误,允许你独立于客户端版本演化 API。这些特性对于由很多无法同时更新的客户端的场合。| 可以。大多数验证可以使用 [OpenAPI v3.0 合法性检查](/zh-cn/docs/tasks/extend-kubernetes/custom-resources/custom-resource-definitions/#validation) 来设定。其他合法性检查操作可以通过添加[合法性检查 Webhook](/zh-cn/docs/reference/access-authn-authz/admission-controllers/#validatingadmissionwebhook-alpha-in-1-8-beta-in-1-9)来实现。 | 可以,可执行任何合法性检查。|
|
||||
| 合法性检查 | 帮助用户避免错误,允许你独立于客户端版本演化 API。这些特性对于由很多无法同时更新的客户端的场合。| 可以。大多数验证可以使用 [OpenAPI v3.0 合法性检查](/zh-cn/docs/tasks/extend-kubernetes/custom-resources/custom-resource-definitions/#validation) 来设定。[CRDValidationRatcheting](/zh-cn/docs/tasks/extend-kubernetes/custom-resources/custom-resource-definitions/#validation-ratcheting) 特性门控允许在资源的失败部分未发生变化的情况下,忽略 OpenAPI 指定的失败验证。其他合法性检查操作可以通过添加[合法性检查 Webhook](/zh-cn/docs/reference/access-authn-authz/admission-controllers/#validatingadmissionwebhook-alpha-in-1-8-beta-in-1-9)来实现。 | 可以,可执行任何合法性检查。|
|
||||
| 默认值设置 | 同上 | 可以。可通过 [OpenAPI v3.0 合法性检查](/zh-cn/docs/tasks/extend-kubernetes/custom-resources/custom-resource-definitions/#defaulting)的 `default` 关键词(自 1.17 正式发布)或[更改性(Mutating)Webhook](/zh-cn/docs/reference/access-authn-authz/admission-controllers/#mutatingadmissionwebhook)来实现(不过从 etcd 中读取老的对象时不会执行这些 Webhook)。 | 可以。 |
|
||||
| 多版本支持 | 允许通过两个 API 版本同时提供同一对象。可帮助简化类似字段更名这类 API 操作。如果你能控制客户端版本,这一特性将不再重要。 | [可以](/zh-cn/docs/tasks/extend-kubernetes/custom-resources/custom-resource-definition-versioning)。 | 可以。 |
|
||||
| 定制存储 | 支持使用具有不同性能模式的存储(例如,要使用时间序列数据库而不是键值存储),或者因安全性原因对存储进行隔离(例如对敏感信息执行加密)。 | 不可以。 | 可以。 |
|
||||
|
|
|
@ -267,3 +267,12 @@ metadata:
|
|||
```
|
||||
|
||||
## {{% heading "whatsnext" %}}
|
||||
|
||||
<!--
|
||||
- Learn more about [Cluster Networking](/docs/concepts/cluster-administration/networking/)
|
||||
- Learn more about [Network Policies](/docs/concepts/services-networking/network-policies/)
|
||||
- Learn about the [Troubleshooting CNI plugin-related errors](/docs/tasks/administer-cluster/migrating-from-dockershim/troubleshooting-cni-plugin-related-errors/)
|
||||
-->
|
||||
- 进一步了解关于[集群网络](/zh-cn/docs/concepts/cluster-administration/networking/)的信息
|
||||
- 进一步了解关于[网络策略](/zh-cn/docs/concepts/services-networking/network-policies/)的信息
|
||||
- 进一步了解关于[排查 CNI 插件相关错误](/zh-cn/docs/tasks/administer-cluster/migrating-from-dockershim/troubleshooting-cni-plugin-related-errors/)的信息
|
|
@ -242,7 +242,7 @@ operator.
|
|||
|
||||
<!--
|
||||
* Read the {{< glossary_tooltip text="CNCF" term_id="cncf" >}}
|
||||
[Operator White Paper](https://github.com/cncf/tag-app-delivery/blob/eece8f7307f2970f46f100f51932db106db46968/operator-wg/whitepaper/Operator-WhitePaper_v1-0.md).
|
||||
[Operator White Paper](https://github.com/cncf/tag-app-delivery/blob/163962c4b1cd70d085107fc579e3e04c2e14d59c/operator-wg/whitepaper/Operator-WhitePaper_v1-0.md).
|
||||
* Learn more about [Custom Resources](/docs/concepts/extend-kubernetes/api-extension/custom-resources/)
|
||||
* Find ready-made operators on [OperatorHub.io](https://operatorhub.io/) to suit your use case
|
||||
* [Publish](https://operatorhub.io/) your operator for other people to use
|
||||
|
@ -252,7 +252,7 @@ operator.
|
|||
from Google Cloud about best practices for building operators
|
||||
-->
|
||||
|
||||
* 阅读 {{< glossary_tooltip text="CNCF" term_id="cncf" >}} [Operator 白皮书](https://github.com/cncf/tag-app-delivery/blob/eece8f7307f2970f46f100f51932db106db46968/operator-wg/whitepaper/Operator-WhitePaper_v1-0.md)。
|
||||
* 阅读 {{< glossary_tooltip text="CNCF" term_id="cncf" >}} [Operator 白皮书](https://github.com/cncf/tag-app-delivery/blob/163962c4b1cd70d085107fc579e3e04c2e14d59c/operator-wg/whitepaper/Operator-WhitePaper_v1-0.md)。
|
||||
* 详细了解[定制资源](/zh-cn/docs/concepts/extend-kubernetes/api-extension/custom-resources/)
|
||||
* 在 [OperatorHub.io](https://operatorhub.io/) 上找到现成的、适合你的 Operator
|
||||
* [发布](https://operatorhub.io/)你的 Operator,让别人也可以使用
|
||||
|
|
|
@ -7,6 +7,9 @@ weight: 20
|
|||
card:
|
||||
name: concepts
|
||||
weight: 10
|
||||
anchors:
|
||||
- anchor: "#why-you-need-kubernetes-and-what-can-it-do"
|
||||
title: 为什么选择 Kubernetes?
|
||||
no_list: true
|
||||
---
|
||||
<!--
|
||||
|
@ -21,6 +24,9 @@ weight: 20
|
|||
card:
|
||||
name: concepts
|
||||
weight: 10
|
||||
anchors:
|
||||
- anchor: "#why-you-need-kubernetes-and-what-can-it-do"
|
||||
title: Why Kubernetes?
|
||||
no_list: true
|
||||
-->
|
||||
|
||||
|
@ -271,6 +277,25 @@ Kubernetes 为你提供:
|
|||
Kubernetes 允许你存储和管理敏感信息,例如密码、OAuth 令牌和 SSH 密钥。
|
||||
你可以在不重建容器镜像的情况下部署和更新密钥和应用程序配置,也无需在堆栈配置中暴露密钥。
|
||||
|
||||
<!--
|
||||
* **Batch execution**
|
||||
In addition to services, Kubernetes can manage your batch and CI workloads, replacing containers that fail, if desired.
|
||||
* **Horizontal scaling**
|
||||
Scale your application up and down with a simple command, with a UI, or automatically based on CPU usage.
|
||||
* **IPv4/IPv6 dual-stack**
|
||||
Allocation of IPv4 and IPv6 addresses to Pods and Services
|
||||
* **Designed for extensibility**
|
||||
Add features to your Kubernetes cluster without changing upstream source code.
|
||||
-->
|
||||
* **批处理执行**
|
||||
除了服务外,Kubernetes 还可以管理你的批处理和 CI(持续集成)工作负载,如有需要,可以替换失败的容器。
|
||||
* **水平扩缩**
|
||||
使用简单的命令、用户界面或根据 CPU 使用率自动对你的应用进行扩缩。
|
||||
* **IPv4/IPv6 双栈**
|
||||
为 Pod(容器组)和 Service(服务)分配 IPv4 和 IPv6 地址。
|
||||
* **为可扩展性设计**
|
||||
在不改变上游源代码的情况下为你的 Kubernetes 集群添加功能。
|
||||
|
||||
<!--
|
||||
## What Kubernetes is not
|
||||
-->
|
||||
|
|
|
@ -5,6 +5,7 @@ description: >
|
|||
Kubernetes 集群由控制平面的组件和一组称为节点的机器组成。
|
||||
weight: 30
|
||||
card:
|
||||
title: 集群组件
|
||||
name: concepts
|
||||
weight: 20
|
||||
---
|
||||
|
@ -18,6 +19,7 @@ description: >
|
|||
plane and a set of machines called nodes.
|
||||
weight: 30
|
||||
card:
|
||||
title: Components of a cluster
|
||||
name: concepts
|
||||
weight: 20
|
||||
-->
|
||||
|
|
|
@ -29,7 +29,8 @@ objects. Labels can be used to select objects and to find
|
|||
collections of objects that satisfy certain conditions. In contrast, annotations
|
||||
are not used to identify and select objects. The metadata
|
||||
in an annotation can be small or large, structured or unstructured, and can
|
||||
include characters not permitted by labels.
|
||||
include characters not permitted by labels. It is possible to use labels as
|
||||
well as annotations in the metadata of the same object.
|
||||
|
||||
Annotations, like labels, are key/value maps:
|
||||
-->
|
||||
|
@ -38,6 +39,7 @@ Annotations, like labels, are key/value maps:
|
|||
你可以使用标签或注解将元数据附加到 Kubernetes 对象。
|
||||
标签可以用来选择对象和查找满足某些条件的对象集合。 相反,注解不用于标识和选择对象。
|
||||
注解中的元数据,可以很小,也可以很大,可以是结构化的,也可以是非结构化的,能够包含标签不允许的字符。
|
||||
可以在同一对象的元数据中同时使用标签和注解。
|
||||
|
||||
注解和标签一样,是键/值对:
|
||||
|
||||
|
|
|
@ -752,11 +752,11 @@ from getting scheduled in a failure domain.
|
|||
<!--
|
||||
Using this scope operators can prevent certain namespaces (`foo-ns` in the example below)
|
||||
from having pods that use cross-namespace pod affinity by creating a resource quota object in
|
||||
that namespace with `CrossNamespaceAffinity` scope and hard limit of 0:
|
||||
that namespace with `CrossNamespacePodAffinity` scope and hard limit of 0:
|
||||
-->
|
||||
使用此作用域操作符可以避免某些名字空间(例如下面例子中的 `foo-ns`)运行特别的 Pod,
|
||||
这类 Pod 使用跨名字空间的 Pod 亲和性约束,在该名字空间中创建了作用域为
|
||||
`CrossNamespaceAffinity` 的、硬性约束为 0 的资源配额对象。
|
||||
`CrossNamespacePodAffinity` 的、硬性约束为 0 的资源配额对象。
|
||||
|
||||
```yaml
|
||||
apiVersion: v1
|
||||
|
@ -769,17 +769,18 @@ spec:
|
|||
pods: "0"
|
||||
scopeSelector:
|
||||
matchExpressions:
|
||||
- scopeName: CrossNamespaceAffinity
|
||||
- scopeName: CrossNamespacePodAffinity
|
||||
operator: Exists
|
||||
```
|
||||
|
||||
<!--
|
||||
If operators want to disallow using `namespaces` and `namespaceSelector` by default, and
|
||||
only allow it for specific namespaces, they could configure `CrossNamespaceAffinity`
|
||||
only allow it for specific namespaces, they could configure `CrossNamespacePodAffinity`
|
||||
as a limited resource by setting the kube-apiserver flag --admission-control-config-file
|
||||
to the path of the following configuration file:
|
||||
-->
|
||||
如果集群运维人员希望默认禁止使用 `namespaces` 和 `namespaceSelector`,
|
||||
而仅仅允许在特定名字空间中这样做,他们可以将 `CrossNamespaceAffinity`
|
||||
而仅仅允许在特定名字空间中这样做,他们可以将 `CrossNamespacePodAffinity`
|
||||
作为一个被约束的资源。方法是为 `kube-apiserver` 设置标志
|
||||
`--admission-control-config-file`,使之指向如下的配置文件:
|
||||
|
||||
|
@ -794,15 +795,16 @@ plugins:
|
|||
limitedResources:
|
||||
- resource: pods
|
||||
matchScopes:
|
||||
- scopeName: CrossNamespaceAffinity
|
||||
- scopeName: CrossNamespacePodAffinity
|
||||
operator: Exists
|
||||
```
|
||||
|
||||
<!--
|
||||
With the above configuration, pods can use `namespaces` and `namespaceSelector` in pod affinity only
|
||||
if the namespace where they are created have a resource quota object with
|
||||
`CrossNamespaceAffinity` scope and a hard limit greater than or equal to the number of pods using those fields.
|
||||
`CrossNamespacePodAffinity` scope and a hard limit greater than or equal to the number of pods using those fields.
|
||||
-->
|
||||
基于上面的配置,只有名字空间中包含作用域为 `CrossNamespaceAffinity`
|
||||
基于上面的配置,只有名字空间中包含作用域为 `CrossNamespacePodAffinity`
|
||||
且硬性约束大于或等于使用 `namespaces` 和 `namespaceSelector` 字段的 Pod
|
||||
个数时,才可以在该名字空间中继续创建在其 Pod 亲和性规则中设置 `namespaces`
|
||||
或 `namespaceSelector` 的新 Pod。
|
||||
|
|
|
@ -196,23 +196,19 @@ kubelet 会自动发现这些文件系统并忽略节点本地的其它文件系
|
|||
<!--
|
||||
Some kubelet garbage collection features are deprecated in favor of eviction:
|
||||
|
||||
| Existing Flag | New Flag | Rationale |
|
||||
| ------------- | -------- | --------- |
|
||||
| `--image-gc-high-threshold` | `--eviction-hard` or `--eviction-soft` | existing eviction signals can trigger image garbage collection |
|
||||
| `--image-gc-low-threshold` | `--eviction-minimum-reclaim` | eviction reclaims achieve the same behavior |
|
||||
| `--maximum-dead-containers` | - | deprecated once old logs are stored outside of container's context |
|
||||
| `--maximum-dead-containers-per-container` | - | deprecated once old logs are stored outside of container's context |
|
||||
| `--minimum-container-ttl-duration` | - | deprecated once old logs are stored outside of container's context |
|
||||
| Existing Flag | Rationale |
|
||||
| ------------- | --------- |
|
||||
| `--maximum-dead-containers` | deprecated once old logs are stored outside of container's context |
|
||||
| `--maximum-dead-containers-per-container` | deprecated once old logs are stored outside of container's context |
|
||||
| `--minimum-container-ttl-duration` | deprecated once old logs are stored outside of container's context |
|
||||
-->
|
||||
一些 kubelet 垃圾收集功能已被弃用,以鼓励使用驱逐机制。
|
||||
|
||||
| 现有标志 | 新的标志 | 原因 |
|
||||
| ------------- | -------- | --------- |
|
||||
| `--image-gc-high-threshold` | `--eviction-hard` 或 `--eviction-soft` | 现有的驱逐信号可以触发镜像垃圾收集 |
|
||||
| `--image-gc-low-threshold` | `--eviction-minimum-reclaim` | 驱逐回收具有相同的行为 |
|
||||
| `--maximum-dead-containers` | - | 一旦旧的日志存储在容器的上下文之外就会被弃用 |
|
||||
| `--maximum-dead-containers-per-container` | - | 一旦旧的日志存储在容器的上下文之外就会被弃用 |
|
||||
| `--minimum-container-ttl-duration` | - | 一旦旧的日志存储在容器的上下文之外就会被弃用 |
|
||||
| 现有标志 | 原因 |
|
||||
| ----------------------------------------- | ----------------------------------- |
|
||||
| `--maximum-dead-containers` | 一旦旧的日志存储在容器的上下文之外就会被弃用 |
|
||||
| `--maximum-dead-containers-per-container` | 一旦旧的日志存储在容器的上下文之外就会被弃用 |
|
||||
| `--minimum-container-ttl-duration` | 一旦旧的日志存储在容器的上下文之外就会被弃用 |
|
||||
|
||||
<!--
|
||||
### Eviction thresholds
|
||||
|
|
|
@ -246,6 +246,30 @@ current policy level:
|
|||
- 对 `.spec.activeDeadlineSeconds` 的合法更新
|
||||
- 对 `.spec.tolerations` 的合法更新
|
||||
|
||||
<!--
|
||||
## Metrics
|
||||
|
||||
Here are the Prometheus metrics exposed by kube-apiserver:
|
||||
-->
|
||||
## 指标 {#metrics}
|
||||
|
||||
以下是 kube-apiserver 公开的 Prometheus 指标:
|
||||
|
||||
<!--
|
||||
- `pod_security_errors_total`: This metric indicates the number of errors preventing normal evaluation.
|
||||
Non-fatal errors may result in the latest restricted profile being used for enforcement.
|
||||
- `pod_security_evaluations_total`: This metric indicates the number of policy evaluations that have occurred,
|
||||
not counting ignored or exempt requests during exporting.
|
||||
- `pod_security_exemptions_total`: This metric indicates the number of exempt requests, not counting ignored
|
||||
or out of scope requests.
|
||||
-->
|
||||
- `pod_security_errors_total`:此指标表示妨碍正常评估的错误数量。
|
||||
如果错误是非致命的,kube-apiserver 可能会强制实施最新的受限配置。
|
||||
- `pod_security_evaluations_total`:此指标表示已发生的策略评估的数量,
|
||||
不包括导出期间被忽略或豁免的请求。
|
||||
- `pod_security_exemptions_total`:该指标表示豁免请求的数量,
|
||||
不包括被忽略或超出范围的请求。
|
||||
|
||||
## {{% heading "whatsnext" %}}
|
||||
|
||||
<!--
|
||||
|
|
|
@ -5,9 +5,9 @@
|
|||
|
||||
- You need to have these tools installed:
|
||||
|
||||
- [Python](https://www.python.org/downloads/) v3.7.x
|
||||
- [Python](https://www.python.org/downloads/) v3.7.x+
|
||||
- [Git](https://git-scm.com/book/en/v2/Getting-Started-Installing-Git)
|
||||
- [Golang](https://golang.org/doc/install) version 1.13+
|
||||
- [Golang](https://go.dev/dl/) version 1.13+
|
||||
- [Pip](https://pypi.org/project/pip/) used to install PyYAML
|
||||
- [PyYAML](https://pyyaml.org/) v5.1.2
|
||||
- [make](https://www.gnu.org/software/make/)
|
||||
|
@ -21,9 +21,9 @@
|
|||
|
||||
- 你需要安装以下工具:
|
||||
|
||||
- [Python](https://www.python.org/downloads/) v3.7.x
|
||||
- [Python](https://www.python.org/downloads/) v3.7.x+
|
||||
- [Git](https://git-scm.com/book/en/v2/Getting-Started-Installing-Git)
|
||||
- [Golang](https://golang.org/doc/install) 1.13+ 版本
|
||||
- [Golang](https://go.dev/dl/) 1.13+ 版本
|
||||
- 用来安装 PyYAML 的 [Pip](https://pypi.org/project/pip/)
|
||||
- [PyYAML](https://pyyaml.org/) v5.1.2
|
||||
- [make](https://www.gnu.org/software/make/)
|
||||
|
@ -42,4 +42,3 @@
|
|||
- 你需要知道如何为一个 GitHub 仓库创建拉取请求(PR)。
|
||||
这牵涉到创建仓库的派生(fork)副本。
|
||||
有关信息可进一步查看[基于本地副本开展工作](/zh-cn/docs/contribute/new-content/open-a-pr/#fork-the-repo)。
|
||||
|
||||
|
|
|
@ -5,7 +5,7 @@ weight: 50
|
|||
card:
|
||||
name: contribute
|
||||
weight: 50
|
||||
title: 翻译文档
|
||||
title: 本地化文档
|
||||
---
|
||||
<!--
|
||||
title: Localizing Kubernetes documentation
|
||||
|
@ -17,7 +17,7 @@ weight: 50
|
|||
card:
|
||||
name: contribute
|
||||
weight: 50
|
||||
title: Translating the docs
|
||||
title: Localizing the docs
|
||||
-->
|
||||
|
||||
<!-- overview -->
|
||||
|
@ -717,7 +717,7 @@ have the exact font used in the original SVG.
|
|||
5. **Reviewing and testing**: After making the necessary translations and
|
||||
converting text to curves, save and review the updated SVG image to ensure
|
||||
the text is properly displayed and aligned. Check
|
||||
[Preview your changes locally](https://kubernetes.io/docs/contribute/new-content/open-a-pr/#preview-locally).
|
||||
[Preview your changes locally](/docs/contribute/new-content/open-a-pr/#preview-locally).
|
||||
-->
|
||||
5. **检查和测试**:完成必要的翻译并将文本转换为曲线后,保存并检查更新后的 SVG 图片,确保文本正确显示和对齐。
|
||||
参考[在本地预览你的变更](/zh-cn/docs/contribute/new-content/open-a-pr/#preview-locally)。
|
||||
|
@ -755,9 +755,9 @@ The `main` branch holds content for the current release `{{< latest-version >}}`
|
|||
The release team creates a `{{< release-branch >}}` branch before the next
|
||||
release: v{{< skew nextMinorVersion >}}.
|
||||
-->
|
||||
目标版本 | 分支
|
||||
-----|-----
|
||||
最新版本 | [`main`](https://github.com/kubernetes/website/tree/main)
|
||||
目标版本 | 分支
|
||||
----------|-----
|
||||
最新版本 | [`main`](https://github.com/kubernetes/website/tree/main)
|
||||
上一个版本 | [`release-{{< skew prevMinorVersion >}}`](https://github.com/kubernetes/website/tree/release-{{< skew prevMinorVersion >}})
|
||||
下一个版本 | [`dev-{{< skew nextMinorVersion >}}`](https://github.com/kubernetes/website/tree/dev-{{< skew nextMinorVersion >}})
|
||||
|
||||
|
@ -980,7 +980,7 @@ permissions are required.
|
|||
For more information about working from forks or directly from the repository,
|
||||
see ["fork and clone the repo"](#fork-and-clone-the-repo).
|
||||
-->
|
||||
有关基于派生或直接从仓库开展工作的更多信息,请参见 ["派生和克隆"](#fork-and-clone-the-repo)。
|
||||
有关基于派生或直接从仓库开展工作的更多信息,请参见["派生和克隆"](#fork-and-clone-the-repo)。
|
||||
|
||||
<!--
|
||||
## Upstream contributions
|
||||
|
|
|
@ -0,0 +1,167 @@
|
|||
---
|
||||
title: Issue 管理者
|
||||
content_type: concept
|
||||
weight: 20
|
||||
---
|
||||
<!--
|
||||
title: Issue Wranglers
|
||||
content_type: concept
|
||||
weight: 20
|
||||
-->
|
||||
|
||||
<!-- overview -->
|
||||
|
||||
<!--
|
||||
Alongside the [PR Wrangler](/docs/contribute/participate/pr-wranglers),formal approvers, and reviewers, members of SIG Docs take week long shifts [triaging and categorising issues](/docs/contribute/review/for-approvers.md/#triage-and-categorize-issues) for the repository.
|
||||
-->
|
||||
除了承担 [PR 管理者](/zh-cn/docs/contribute/participate/pr-wranglers)的职责外,
|
||||
SIG Docs 正式的批准人(Approver)、评审人(Reviewer)和成员(Member)
|
||||
按周轮流[归类仓库的 Issue](/zh-cn/docs/contribute/review/for-approvers.md/#triage-and-categorize-issues)。
|
||||
|
||||
<!-- body -->
|
||||
|
||||
<!--
|
||||
## Duties
|
||||
|
||||
Each day in a week-long shift the Issue Wrangler will be responsible for:
|
||||
|
||||
- Triaging and tagging incoming issues daily. See [Triage and categorize issues](https://github.com/kubernetes/website/blob/main/content/en/docs/contribute/review/for-approvers.md/#triage-and-categorize-issues) for guidelines on how SIG Docs uses metadata.
|
||||
- Keeping an eye on stale & rotten issues within the kubernetes/website repository.
|
||||
- Maintenance of the [Issues board](https://github.com/orgs/kubernetes/projects/72/views/1).
|
||||
-->
|
||||
## 职责 {#duties}
|
||||
|
||||
在为期一周的轮值期内,Issue 管理者每天负责:
|
||||
|
||||
- 对收到的 Issue 进行日常分类和标记。有关 SIG Docs 如何使用元数据的指导说明,
|
||||
参阅[归类 Issue](https://github.com/kubernetes/website/blob/main/content/en/docs/contribute/review/for-approvers.md/#triage-and-categorize-issues)。
|
||||
- 密切关注 kubernetes/website 代码仓库中陈旧和过期的 Issue。
|
||||
- 维护 [Issues 看板](https://github.com/orgs/kubernetes/projects/72/views/1)。
|
||||
|
||||
<!--
|
||||
### Requirements
|
||||
|
||||
- Must be an active member of the Kubernetes organization.
|
||||
- A minimum of 15 [non-trivial](https://www.kubernetes.dev/docs/guide/pull-requests/#trivial-edits) contributions to Kubernetes (of which a certain amount should be directed towards kubernetes/website).
|
||||
- Performing the role in an informal capacity already
|
||||
-->
|
||||
### 要求 {#requirements}
|
||||
|
||||
- 必须是 Kubernetes 组织的活跃成员。
|
||||
- 至少为 Kubernetes 做了 15
|
||||
个[非小微](https://www.kubernetes.dev/docs/guide/pull-requests/#trivial-edits)的贡献
|
||||
(其中某些应是直接针对 kubernetes/website 的贡献)。
|
||||
- 已经以非正式身份履行该职责。
|
||||
|
||||
<!--
|
||||
### Helpful [Prow commands](https://prow.k8s.io/command-help) for wranglers
|
||||
-->
|
||||
### 对管理者有帮助的 [Prow 命令](https://prow.k8s.io/command-help)
|
||||
|
||||
<!--
|
||||
```
|
||||
# reopen an issue
|
||||
/reopen
|
||||
|
||||
# transfer issues that don't fit in k/website to another repository
|
||||
/transfer[-issue]
|
||||
|
||||
# change the state of rotten issues
|
||||
/remove-lifecycle rotten
|
||||
|
||||
# change the state of stale issues
|
||||
/remove-lifecycle stale
|
||||
|
||||
# assign sig to an issue
|
||||
/sig <sig_name>
|
||||
|
||||
# add specific area
|
||||
/area <area_name>
|
||||
|
||||
# for beginner friendly issues
|
||||
/good-first-issue
|
||||
|
||||
# issues that needs help
|
||||
/help wanted
|
||||
|
||||
# tagging issue as support specific
|
||||
/kind support
|
||||
|
||||
# to accept triaging for an issue
|
||||
/triage accepted
|
||||
|
||||
# closing an issue we won't be working on and haven't fixed yet
|
||||
/close not-planned
|
||||
```
|
||||
-->
|
||||
```
|
||||
# 重新打开 Issue
|
||||
/reopen
|
||||
|
||||
# 将不切合 k/website 的 Issue 转移到其他代码仓库
|
||||
/transfer[-issue]
|
||||
|
||||
# 更改陈旧 Issue 的状态
|
||||
/remove-lifecycle rotten
|
||||
|
||||
# 更改过期 Issue 的状态
|
||||
/remove-lifecycle stale
|
||||
|
||||
# 为 Issue 指派 SIG
|
||||
/sig <sig_name>
|
||||
|
||||
# 添加具体领域
|
||||
/area <area_name>
|
||||
|
||||
# 对新手友好的 Issue
|
||||
/good-first-issue
|
||||
|
||||
# 需要帮助的 Issue
|
||||
/help wanted
|
||||
|
||||
# 将 Issue 标记为某种支持
|
||||
/kind support
|
||||
|
||||
# 接受某个 Issue 的归类
|
||||
/triage accepted
|
||||
|
||||
# 关闭还未处理且未修复的 Issue
|
||||
/close not-planned
|
||||
```
|
||||
|
||||
<!--
|
||||
### When to close Issues
|
||||
|
||||
For an open source project to succeed, good issue management is crucial. But it is also critical to resolve issues in order to maintain the repository and communicate clearly with contributors and users.
|
||||
-->
|
||||
### 何时关闭 Issue {#when-to-close-issues}
|
||||
|
||||
一个开源项目想要成功,良好的 Issue 管理非常关键。
|
||||
但解决 Issue 也很重要,这样才能维护代码仓库,并与贡献者和用户进行清晰明确的交流。
|
||||
|
||||
<!--
|
||||
Close issues when:
|
||||
|
||||
- A similar issue is reported more than once.You will first need to tag it as /triage duplicate; link it to the main issue & then close it. It is also advisable to direct the users to the original issue.
|
||||
- It is very difficult to understand and address the issue presented by the author with the information provided.
|
||||
However, encourage the user to provide more details or reopen the issue if they can reproduce it later.
|
||||
- The same functionality is implemented elsewhere. One can close this issue and direct user to the appropriate place.
|
||||
- The reported issue is not currently planned or aligned with the project's goals.
|
||||
- If the issue appears to be spam and is clearly unrelated.
|
||||
- If the issue is related to an external limitation or dependency and is beyond the control of the project.
|
||||
-->
|
||||
关闭 Issue 的时机包括:
|
||||
|
||||
- 类似的 Issue 被多次报告。你首先需要将其标记为 /triage duplicate;
|
||||
将其链接到主要 Issue 然后关闭它。还建议将用户引导至最初的 Issue。
|
||||
- 通过所提供的信息很难理解和解决作者提出的 Issue。
|
||||
但要鼓励用户提供更多细节,或者在以后可以重现 Issue 时重新打开此 Issue 。
|
||||
- 相同的功能在其他地方已实现。管理者可以关闭此 Issue 并将用户引导至适当的位置。
|
||||
- 报告的 Issue 当前未被计划或不符合项目的目标。
|
||||
- 如果 Issue 看起来是垃圾信息并且明显不相关。
|
||||
- 如果 Issue 与外部限制或依赖项有关并且超出了本项目的控制范围。
|
||||
|
||||
<!--
|
||||
To close an issue, leave a `/close` comment on the issue.
|
||||
-->
|
||||
要关闭 Issue,可以在 Issue 中留下一条 `/close` 的评论。
|
|
@ -946,6 +946,21 @@ structure of content in a code editor better.
|
|||
|
||||
两行的留白有助于在代码编辑器中查看整个内容的结构组织。
|
||||
|
||||
<!--
|
||||
Manually wrap paragraphs in the Markdown source when appropriate. Since the git
|
||||
tool and the GitHub website generate file diffs on a line-by-line basis,
|
||||
manually wrapping long lines helps the reviewers to easily find out the changes
|
||||
made in a PR and provide feedback. It also helps the downstream localization
|
||||
teams where people track the upstream changes on a per-line basis. Line
|
||||
wrapping can happen at the end of a sentence or a punctuation character, for
|
||||
example. One exception to this is that a Markdown link or a shortcode is
|
||||
expected to be in a single line.
|
||||
-->
|
||||
适当时在 Markdown 文档中手动换行。由于 git 工具和 GitHub
|
||||
网站是逐行生成文件差异的,手动换行可以帮助审阅者轻松找到 PR 中所做的更改并提供反馈。
|
||||
它还可以帮助下游本地化团队,使其按行跟踪上游更改。例如,换行可以发生在句子或标点符号的末尾。
|
||||
一个例外是 Markdown 链接或短代码应位于一行中。
|
||||
|
||||
<!--
|
||||
### Headings and titles {#headings}
|
||||
|
||||
|
|
|
@ -568,33 +568,6 @@ Then, delete the Secret you now know the name of:
|
|||
kubectl -n examplens delete secret/example-automated-thing-token-zyxwv
|
||||
```
|
||||
|
||||
<!--
|
||||
The control plane spots that the ServiceAccount is missing its Secret,
|
||||
and creates a replacement:
|
||||
-->
|
||||
控制平面发现 ServiceAccount 缺少其 Secret,并创建一个替代项:
|
||||
|
||||
```shell
|
||||
kubectl -n examplens get serviceaccount/example-automated-thing -o yaml
|
||||
```
|
||||
|
||||
```yaml
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
annotations:
|
||||
kubectl.kubernetes.io/last-applied-configuration: |
|
||||
{"apiVersion":"v1","kind":"ServiceAccount","metadata":{"annotations":{},"name":"example-automated-thing","namespace":"examplens"}}
|
||||
creationTimestamp: "2019-07-21T07:07:07Z"
|
||||
name: example-automated-thing
|
||||
namespace: examplens
|
||||
resourceVersion: "1026"
|
||||
selfLink: /api/v1/namespaces/examplens/serviceaccounts/example-automated-thing
|
||||
uid: f23fd170-66f2-4697-b049-e1e266b7f835
|
||||
secrets:
|
||||
- name: example-automated-thing-token-4rdrh
|
||||
```
|
||||
|
||||
<!--
|
||||
## Clean up
|
||||
|
||||
|
|
|
@ -110,16 +110,15 @@ and URL path.
|
|||
<!--
|
||||
Each entry in matchImages is a pattern which can optionally contain a port and a path.
|
||||
Globs can be used in the domain, but not in the port or the path. Globs are supported
|
||||
as subdomains like <code>*.k8s.io</code> or <code>k8s.*.io</code>, and top-level-domains such as <code>k8s.*</code>.
|
||||
Matching partial subdomains like <code>app*.k8s.io</code> is also supported. Each glob can only match
|
||||
a single subdomain segment, so <code>*.io</code> does not match <code>*.k8s.io</code>.
|
||||
as subdomains like '*.k8s.io' or 'k8s.*.io', and top-level-domains such as 'k8s.*'.
|
||||
Matching partial subdomains like 'app</em>.k8s.io' is also supported. Each glob can only match
|
||||
a single subdomain segment, so *.io does not match *.k8s.io.
|
||||
-->
|
||||
<p><code>matchImages</code> 中的每个条目都是一个模式字符串,其中可以包含端口号和路径。
|
||||
域名部分可以包含通配符,但端口或路径部分不可以。
|
||||
<code>*.k8s.io</code> 或 <code>k8s.*.io</code> 等子域名以及
|
||||
<code>k8s.*</code> 这类顶级域名都支持通配符。</p>
|
||||
<p>对于 <code>app*.k8s.io</code> 这类部分子域名的匹配也是支持的。
|
||||
每个通配符只能用来匹配一个子域名段,所以 <code>*.io</code> 不会匹配 <code>*.k8s.io</code>。</p>
|
||||
域名部分可以包含通配符,但端口或路径部分不可以。'*.k8s.io' 或 'k8s.*.io' 等子域名以及
|
||||
'k8s.*' 这类顶级域名都支持通配符。</p>
|
||||
<p>对于 'app</em>.k8s.io' 这类部分子域名的匹配也是支持的。
|
||||
每个通配符只能用来匹配一个子域名段,所以 *.io 不会匹配 *.k8s.io。</p>
|
||||
<!--
|
||||
A match exists between an image and a matchImage when all of the below are true:
|
||||
-->
|
||||
|
|
|
@ -127,16 +127,16 @@ this field to null if no valid credentials can be returned for the requested ima
|
|||
<!--
|
||||
Each key in the map is a pattern which can optionally contain a port and a path.
|
||||
Globs can be used in the domain, but not in the port or the path. Globs are supported
|
||||
as subdomains like <code>*.k8s.io</code> or <code>k8s.*.io</code>, and top-level-domains such as <code>k8s.*</code>.
|
||||
Matching partial subdomains like <code>app*.k8s.io</code> is also supported. Each glob can only match
|
||||
a single subdomain segment, so <code>*.io</code> does not match <code>*.k8s.io</code>.</p>
|
||||
as subdomains like '*.k8s.io' or 'k8s.*.io', and top-level-domains such as 'k8s.*'.
|
||||
Matching partial subdomains like 'app*.k8s.io' is also supported. Each glob can only match
|
||||
a single subdomain segment, so *.io does not match *.k8s.io.</p>
|
||||
-->
|
||||
<p>
|
||||
映射中每个键值都是一个正则表达式,可以选择包含端口和路径。
|
||||
域名部分可以包含通配符,但在端口或路径中不能使用通配符。
|
||||
支持通配符作为子域,如 <code>*.k8s.io</code> 或 <code>k8s.*.io</code>,以及顶级域,如 <code>k8s.*</code>。
|
||||
还支持匹配部分子域,如 <code>app*.k8s.io</code>。每个通配符只能匹配一个子域段,
|
||||
因此 <code>*.io</code> 不匹配 <code>*.k8s.io</code>。
|
||||
支持通配符作为子域,如 '*.k8s.io' 或 'k8s.*.io',以及顶级域,如 'k8s.*'。
|
||||
还支持匹配部分子域,如 'app*.k8s.io'。每个通配符只能匹配一个子域段,
|
||||
因此 *.io 不匹配 *.k8s.io。
|
||||
</p>
|
||||
<!--
|
||||
<p>The kubelet will match images against the key when all of the below are true:</p>
|
||||
|
|
|
@ -127,16 +127,16 @@ this field to null if no valid credentials can be returned for the requested ima
|
|||
<!--
|
||||
Each key in the map is a pattern which can optionally contain a port and a path.
|
||||
Globs can be used in the domain, but not in the port or the path. Globs are supported
|
||||
as subdomains like <code>*.k8s.io</code> or <code>k8s.*.io</code>, and top-level-domains such as <code>k8s.*</code>.
|
||||
Matching partial subdomains like <code>app*.k8s.io</code> is also supported. Each glob can only match
|
||||
a single subdomain segment, so <code>*.io</code> does not match <code>*.k8s.io</code>.</p>
|
||||
as subdomains like '*.k8s.io' or 'k8s.*.io', and top-level-domains such as 'k8s.*'.
|
||||
Matching partial subdomains like 'app*.k8s.io' is also supported. Each glob can only match
|
||||
a single subdomain segment, so *.io does not match *.k8s.io.</p>
|
||||
-->
|
||||
<p>
|
||||
映射中每个键值都是一个正则表达式,可以选择包含端口和路径。
|
||||
域名部分可以包含通配符,但在端口或路径中不能使用通配符。
|
||||
支持通配符作为子域,如 <code>*.k8s.io</code> 或 <code>k8s.*.io</code>,以及顶级域,如 <code>k8s.*</code>。
|
||||
还支持匹配部分子域,如 <code>app*.k8s.io</code>。每个通配符只能匹配一个子域段,
|
||||
因此 <code>*.io</code> 不匹配 <code>*.k8s.io</code>。
|
||||
支持通配符作为子域,如 '*.k8s.io' 或 'k8s.*.io',以及顶级域,如 'k8s.*'。
|
||||
还支持匹配部分子域,如 'app*.k8s.io'。每个通配符只能匹配一个子域段,
|
||||
因此 *.io 不匹配 *.k8s.io。
|
||||
</p>
|
||||
<!--
|
||||
<p>The kubelet will match images against the key when all of the below are true:</p>
|
||||
|
|
|
@ -7,9 +7,7 @@ content_type: "api_reference"
|
|||
description: "状态(Status)是不返回其他对象的调用的返回值。"
|
||||
title: "Status"
|
||||
weight: 12
|
||||
auto_generated: true
|
||||
---
|
||||
|
||||
<!--
|
||||
api_metadata:
|
||||
apiVersion: ""
|
||||
|
@ -22,23 +20,11 @@ weight: 12
|
|||
auto_generated: true
|
||||
-->
|
||||
|
||||
<!--
|
||||
The file is auto-generated from the Go source code of the component using a generic
|
||||
[generator](https://github.com/kubernetes-sigs/reference-docs/). To learn how
|
||||
to generate the reference documentation, please read
|
||||
[Contributing to the reference documentation](/docs/contribute/generate-ref-docs/).
|
||||
To update the reference content, please follow the
|
||||
[Contributing upstream](/docs/contribute/generate-ref-docs/contribute-upstream/)
|
||||
guide. You can file document formatting bugs against the
|
||||
[reference-docs](https://github.com/kubernetes-sigs/reference-docs/) project.
|
||||
-->
|
||||
|
||||
|
||||
|
||||
`import "k8s.io/apimachinery/pkg/apis/meta/v1"`
|
||||
|
||||
|
||||
<!-- Status is a return value for calls that don't return other objects. -->
|
||||
<!--
|
||||
Status is a return value for calls that don't return other objects.
|
||||
-->
|
||||
状态(Status)是不返回其他对象的调用的返回值。
|
||||
|
||||
<hr>
|
||||
|
@ -49,13 +35,16 @@ guide. You can file document formatting bugs against the
|
|||
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
|
||||
-->
|
||||
|
||||
APIVersion 定义对象表示的版本化模式。
|
||||
apiVersion 定义对象表示的版本化模式。
|
||||
服务器应将已识别的模式转换为最新的内部值,并可能拒绝无法识别的值。
|
||||
更多信息: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
|
||||
|
||||
- **code** (int32)
|
||||
|
||||
<!-- Suggested HTTP return code for this status, 0 if not set. -->
|
||||
<!--
|
||||
Suggested HTTP return code for this status, 0 if not set.
|
||||
-->
|
||||
|
||||
此状态的建议 HTTP 返回代码,如果未设置,则为 0。
|
||||
|
||||
- **details** (StatusDetails)
|
||||
|
@ -63,69 +52,75 @@ guide. You can file document formatting bugs against the
|
|||
<!--
|
||||
Extended data associated with the reason. Each reason may define its own extended details.
|
||||
This field is optional and the data returned is not guaranteed to conform to any schema except that defined by the reason type.
|
||||
|
||||
<a name="StatusDetails"></a>
|
||||
*StatusDetails is a set of additional properties that MAY be set by the server to provide additional information about a response. The Reason field of a Status object defines what attributes will be set. Clients must ignore fields that do not match the defined type of each attribute, and should assume that any attribute may be empty, invalid, or under defined.*
|
||||
-->
|
||||
|
||||
与原因(Reason)相关的扩展数据。每个原因都可以定义自己的扩展细节。
|
||||
此字段是可选的,并且不保证返回的数据符合任何模式,除非由原因类型定义。
|
||||
|
||||
<a name="StatusDetails"></a>
|
||||
<!--
|
||||
*StatusDetails is a set of additional properties that MAY be set by the server to provide additional information about a response.
|
||||
The Reason field of a Status object defines what attributes will be set.
|
||||
Clients must ignore fields that do not match the defined type of each attribute,
|
||||
and should assume that any attribute may be empty, invalid, or under defined.*
|
||||
-->
|
||||
*StatusDetails 是一组附加属性,可以由服务器设置以提供有关响应的附加信息。*
|
||||
*状态对象的原因字段定义将设置哪些属性。*
|
||||
*客户端必须忽略与每个属性的定义类型不匹配的字段,并且应该假定任何属性可能为空、无效或未定义。*
|
||||
**StatusDetails 是一组附加属性,可以由服务器设置以提供有关响应的附加信息。
|
||||
状态对象的原因字段定义将设置哪些属性。
|
||||
客户端必须忽略与每个属性的定义类型不匹配的字段,并且应该假定任何属性可能为空、无效或未定义。**
|
||||
|
||||
- **details.causes** ([]StatusCause)
|
||||
|
||||
<!--
|
||||
The Causes array includes more details associated with the StatusReason failure.
|
||||
Not all StatusReasons may provide detailed causes.
|
||||
|
||||
<a name="StatusCause"></a>
|
||||
*StatusCause provides more information about an api.Status failure, including cases when multiple errors are encountered.*
|
||||
-->
|
||||
Causes 数组包含与 StatusReason 故障相关的更多详细信息。
|
||||
|
||||
causes 数组包含与 StatusReason 故障相关的更多详细信息。
|
||||
并非所有 StatusReasons 都可以提供详细的原因。
|
||||
|
||||
<a name="StatusCause"></a>
|
||||
<!--
|
||||
*StatusCause provides more information about an api.Status failure, including cases when multiple errors are encountered.*
|
||||
-->
|
||||
*StatusCause 提供有关 api.Status 失败的更多信息,包括遇到多个错误的情况。*
|
||||
**StatusCause 提供有关 api.Status 失败的更多信息,包括遇到多个错误的情况。**
|
||||
|
||||
- **details.causes.field** (string)
|
||||
|
||||
<!--
|
||||
The field of the resource that has caused this error, as named by its JSON serialization.
|
||||
May include dot and postfix notation for nested attributes. Arrays are zero-indexed.
|
||||
Fields may appear more than once in an array of causes due to fields having multiple errors. Optional.
|
||||
-->
|
||||
导致此错误的资源字段,由其 JSON 序列化命名。
|
||||
可能包括嵌套属性的点和后缀表示法。数组是从零开始索引的。
|
||||
由于字段有多个错误,字段可能会在一系列原因中出现多次。可选。
|
||||
|
||||
<!--
|
||||
The field of the resource that has caused this error, as named by its JSON serialization. May include dot and postfix notation for nested attributes. Arrays are zero-indexed. Fields may appear more than once in an array of causes due to fields having multiple errors. Optional.
|
||||
|
||||
Examples:
|
||||
"name" - the field "name" on the current resource
|
||||
"items[0].name" - the field "name" on the first array entry in "items"
|
||||
-->
|
||||
|
||||
导致此错误的资源字段,由其 JSON 序列化命名。
|
||||
可能包括嵌套属性的点和后缀表示法。数组是从零开始索引的。
|
||||
由于字段有多个错误,字段可能会在一系列原因中出现多次。可选。
|
||||
|
||||
示例:
|
||||
- “name”:当前资源上的字段 “name”
|
||||
- “items[0].name”:“items” 中第一个数组条目上的字段 “name”
|
||||
|
||||
- **details.causes.message** (string)
|
||||
|
||||
<!-- A human-readable description of the cause of the error. This field may be presented as-is to a reader. -->
|
||||
<!--
|
||||
A human-readable description of the cause of the error. This field may be presented as-is to a reader.
|
||||
-->
|
||||
|
||||
对错误原因的可读描述。该字段可以按原样呈现给读者。
|
||||
|
||||
- **details.causes.reason** (string)
|
||||
|
||||
<!-- A machine-readable description of the cause of the error. If this value is empty there is no information available. -->
|
||||
<!--
|
||||
A machine-readable description of the cause of the error. If this value is empty there is no information available.
|
||||
-->
|
||||
|
||||
错误原因的机器可读描述。如果此值为空,则没有可用信息。
|
||||
|
||||
- **details.group** (string)
|
||||
|
||||
<!-- The group attribute of the resource associated with the status StatusReason. -->
|
||||
<!--
|
||||
The group attribute of the resource associated with the status StatusReason.
|
||||
-->
|
||||
|
||||
与状态 StatusReason 关联的资源的组属性。
|
||||
|
||||
- **details.kind** (string)
|
||||
|
@ -135,13 +130,17 @@ guide. You can file document formatting bugs against the
|
|||
On some operations may differ from the requested resource Kind.
|
||||
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
|
||||
-->
|
||||
与状态 StatusReason 关联的资源的种类属性。
|
||||
在某些操作上可能与请求的资源种类不同。
|
||||
|
||||
与状态 StatusReason 关联的资源的类别属性。
|
||||
在某些操作上可能与请求的资源类别不同。
|
||||
更多信息: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
|
||||
|
||||
- **details.name** (string)
|
||||
|
||||
<!-- The name attribute of the resource associated with the status StatusReason (when there is a single name which can be described). -->
|
||||
<!--
|
||||
The name attribute of the resource associated with the status StatusReason (when there is a single name which can be described).
|
||||
-->
|
||||
|
||||
与状态 StatusReason 关联的资源的名称属性(当有一个可以描述的名称时)。
|
||||
|
||||
- **details.retryAfterSeconds** (int32)
|
||||
|
@ -151,6 +150,7 @@ guide. You can file document formatting bugs against the
|
|||
Some errors may indicate the client must take an alternate action -
|
||||
for those errors this field may indicate how long to wait before taking the alternate action.
|
||||
-->
|
||||
|
||||
如果指定,则应重试操作前的时间(以秒为单位)。
|
||||
一些错误可能表明客户端必须采取替代操作——对于这些错误,此字段可能指示在采取替代操作之前等待多长时间。
|
||||
|
||||
|
@ -160,33 +160,37 @@ guide. You can file document formatting bugs against the
|
|||
UID of the resource. (when there is a single resource which can be described).
|
||||
More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids
|
||||
-->
|
||||
资源的 UID(当有单个可以描述的资源时)。
|
||||
更多信息: https://kubernetes.io/zh-cn/docs/concepts/overview/working-with-objects/names#uids
|
||||
|
||||
资源的 UID(当有单个可以描述的资源时)。更多信息:
|
||||
https://kubernetes.io/zh-cn/docs/concepts/overview/working-with-objects/names#uids
|
||||
|
||||
- **kind** (string)
|
||||
|
||||
<!--
|
||||
Kind is a string value representing the REST resource this object represents.
|
||||
Servers may infer this from the endpoint the client submits requests to.
|
||||
Cannot be updated. In CamelCase.
|
||||
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
|
||||
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
|
||||
-->
|
||||
Kind 是一个字符串值,表示此对象表示的 REST 资源。
|
||||
|
||||
kind 是一个字符串值,表示此对象表示的 REST 资源。
|
||||
服务器可以从客户端提交请求的端点推断出这一点。
|
||||
无法更新。驼峰式规则。
|
||||
更多信息: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
|
||||
无法更新。驼峰式规则。更多信息:
|
||||
https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
|
||||
|
||||
- **message** (string)
|
||||
|
||||
<!-- A human-readable description of the status of this operation. -->
|
||||
<!--
|
||||
A human-readable description of the status of this operation.
|
||||
-->
|
||||
|
||||
此操作状态的人类可读描述。
|
||||
|
||||
- **metadata** (<a href="{{< ref "../common-definitions/list-meta#ListMeta" >}}">ListMeta</a>)
|
||||
|
||||
<!-- Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds -->
|
||||
标准列表元数据。
|
||||
更多信息: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
|
||||
<!--
|
||||
Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
|
||||
-->
|
||||
|
||||
标准的列表元数据。更多信息:
|
||||
https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
|
||||
|
||||
- **reason** (string)
|
||||
|
||||
|
@ -195,14 +199,16 @@ guide. You can file document formatting bugs against the
|
|||
If this value is empty there is no information available.
|
||||
A Reason clarifies an HTTP status code but does not override it.
|
||||
-->
|
||||
|
||||
机器可读的说明,说明此操作为何处于“失败”状态。
|
||||
如果此值为空,则没有可用信息。
|
||||
Reason 澄清了 HTTP 状态代码,但不会覆盖它。
|
||||
reason 澄清了 HTTP 状态代码,但不会覆盖它。
|
||||
|
||||
- **status** (string)
|
||||
|
||||
<!--
|
||||
Status of the operation. One of: "Success" or "Failure". More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
|
||||
-->
|
||||
操作状态。“Success”或“Failure” 之一。
|
||||
更多信息: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
|
||||
|
||||
操作状态。“Success”或“Failure” 之一。更多信息:
|
||||
https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
|
||||
|
|
|
@ -7,231 +7,222 @@ content_type: "api_reference"
|
|||
description: ""
|
||||
title: "常用参数"
|
||||
weight: 11
|
||||
auto_generated: true
|
||||
---
|
||||
|
||||
<!--
|
||||
The file is auto-generated from the Go source code of the component using a generic
|
||||
[generator](https://github.com/kubernetes-sigs/reference-docs/). To learn how
|
||||
to generate the reference documentation, please read
|
||||
[Contributing to the reference documentation](/docs/contribute/generate-ref-docs/).
|
||||
To update the reference content, please follow the
|
||||
[Contributing upstream](/docs/contribute/generate-ref-docs/contribute-upstream/)
|
||||
guide. You can file document formatting bugs against the
|
||||
[reference-docs](https://github.com/kubernetes-sigs/reference-docs/) project.
|
||||
api_metadata:
|
||||
apiVersion: ""
|
||||
import: ""
|
||||
kind: "Common Parameters"
|
||||
content_type: "api_reference"
|
||||
description: ""
|
||||
title: "Common Parameters"
|
||||
weight: 11
|
||||
auto_generated: true
|
||||
-->
|
||||
|
||||
## allowWatchBookmarks {#allowWatchBookmarks}
|
||||
|
||||
<!--
|
||||
allowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.
|
||||
|
||||
<hr>
|
||||
-->
|
||||
allowWatchBookmarks 字段请求类型为 BOOKMARK 的监视事件。
|
||||
没有实现书签的服务器可能会忽略这个标志,并根据服务器的判断发送书签。
|
||||
客户端不应该假设书签会在任何特定的时间间隔返回,也不应该假设服务器会在会话期间发送任何书签事件。
|
||||
如果当前请求不是 watch 请求,则忽略该字段。
|
||||
|
||||
<hr>
|
||||
|
||||
## continue {#continue}
|
||||
|
||||
<!--
|
||||
The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token.
|
||||
The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key".
|
||||
-->
|
||||
当需要从服务器检索更多结果时,应该设置 continue 选项。由于这个值是服务器定义的,
|
||||
客户端只能使用先前查询结果中具有相同查询参数的 continue 值(continue 值除外),
|
||||
服务器可能拒绝它识别不到的 continue 值。
|
||||
如果指定的 continue 值不再有效,无论是由于过期(通常是 5 到 15 分钟)
|
||||
还是服务器上的配置更改,服务器将响应 "410 ResourceExpired" 错误和一个 continue 令牌。
|
||||
<!--
|
||||
If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the "next key".
|
||||
-->
|
||||
如果客户端需要一个一致的列表,它必须在没有 continue 字段的情况下重新发起 list 请求。
|
||||
否则,客户端可能会发送另一个带有 410 错误令牌的 list 请求,服务器将响应从下一个键开始的列表,
|
||||
但列表数据来自最新的快照,这与之前
|
||||
的列表结果不一致。第一个列表请求之后的对象创建,修改,或删除的对象将被包含在响应中,
|
||||
只要他们的键是在“下一个键”之后。
|
||||
但列表数据来自最新的快照,这与之前的列表结果不一致。
|
||||
第一个列表请求之后被创建、修改或删除的对象将被包含在响应中,只要它们的键是在“下一个键”之后。
|
||||
|
||||
<!--
|
||||
This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
|
||||
-->
|
||||
当 watch 字段为 true 时,不支持此字段。客户端可以从服务器返回的最后一个 resourceVersion
|
||||
值开始监视,就不会错过任何修改。
|
||||
|
||||
<hr>
|
||||
|
||||
## dryRun {#dryRun}
|
||||
|
||||
<!--
|
||||
When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
|
||||
<hr>
|
||||
-->
|
||||
表示不应该持久化所请求的修改。无效或无法识别的 dryRun 指令将导致错误响应,
|
||||
并且服务器不再对请求进行进一步处理。有效值为:
|
||||
|
||||
- All: 将处理所有的演练阶段
|
||||
|
||||
<hr>
|
||||
|
||||
## fieldManager {#fieldManager}
|
||||
|
||||
<!--
|
||||
fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
|
||||
<hr>
|
||||
-->
|
||||
fieldManager 是与进行这些更改的参与者或实体相关联的名称。
|
||||
长度小于或128个字符且仅包含可打印字符,如 https://golang.org/pkg/unicode/#IsPrint 所定义。
|
||||
|
||||
<hr>
|
||||
|
||||
## fieldSelector {#fieldSelector}
|
||||
|
||||
<!--
|
||||
A selector to restrict the list of returned objects by their fields. Defaults to everything.
|
||||
<hr>
|
||||
-->
|
||||
限制所返回对象的字段的选择器。默认为返回所有字段。
|
||||
|
||||
<hr>
|
||||
|
||||
## fieldValidation {#fieldValidation}
|
||||
|
||||
<!--
|
||||
fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields.
|
||||
fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
|
||||
-->
|
||||
fieldValidation 指示服务器如何处理请求(POST/PUT/PATCH)中包含未知或重复字段的对象。
|
||||
<!--
|
||||
Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23.
|
||||
-->
|
||||
有效值为:
|
||||
|
||||
- Ignore:这将忽略从对象中默默删除的所有未知字段,并将忽略除解码器遇到的最后一个重复字段之外的所有字段。
|
||||
这是在 v1.23 之前的默认行为。
|
||||
<!--
|
||||
- Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+
|
||||
-->
|
||||
- Warn:这将针对从对象中删除的各个未知字段以及所遇到的各个重复字段,分别通过标准警告响应头发出警告。
|
||||
如果没有其他错误,请求仍然会成功,并且只会保留所有重复字段中的最后一个。
|
||||
这是 v1.23+ 版本中的默认设置。
|
||||
<!--
|
||||
- Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.
|
||||
-->
|
||||
- Strict:如果从对象中删除任何未知字段,或者存在任何重复字段,将使请求失败并返回 BadRequest 错误。
|
||||
|
||||
从服务器返回的错误将包含所有遇到的未知和重复的字段。
|
||||
|
||||
<hr>
|
||||
|
||||
## force {#force}
|
||||
|
||||
<!--
|
||||
Force is going to "force" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
|
||||
<hr>
|
||||
-->
|
||||
Force 将“强制”应用请求。这意味着用户将重新获得他人拥有的冲突领域。
|
||||
对于非应用补丁请求,Force 标志必须不设置。
|
||||
|
||||
<hr>
|
||||
|
||||
## gracePeriodSeconds {#gracePeriodSeconds}
|
||||
|
||||
<!--
|
||||
The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
|
||||
<hr>
|
||||
-->
|
||||
删除对象前的持续时间(秒数)。值必须为非负整数。取值为 0 表示立即删除。
|
||||
如果该值为 nil,将使用指定类型的默认宽限期。如果没有指定,默认为每个对象的设置值。
|
||||
0 表示立即删除。
|
||||
|
||||
<hr>
|
||||
|
||||
## labelSelector {#labelSelector}
|
||||
|
||||
<!--
|
||||
A selector to restrict the list of returned objects by their labels. Defaults to everything.
|
||||
<hr>
|
||||
-->
|
||||
通过标签限制返回对象列表的选择器。默认为返回所有对象。
|
||||
|
||||
<hr>
|
||||
|
||||
## limit {#limit}
|
||||
|
||||
<!--
|
||||
limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results.
|
||||
limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true.
|
||||
-->
|
||||
limit 是一个列表调用返回的最大响应数。如果有更多的条目,服务器会将列表元数据上的
|
||||
limit 是一个列表调用返回的最大响应数。如果有更多的条目,服务器会将列表元数据上的
|
||||
'continue' 字段设置为一个值,该值可以用于相同的初始查询来检索下一组结果。
|
||||
<!--
|
||||
Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true.
|
||||
-->
|
||||
设置 limit 可能会在所有请求的对象被过滤掉的情况下返回少于请求的条目数量(下限为零),
|
||||
并且客户端应该只根据 continue 字段是否存在来确定是否有更多的结果可用。
|
||||
服务器可能选择不支持 limit 参数,并将返回所有可用的结果。
|
||||
如果指定了 limit 并且 continue 字段为空,客户端可能会认为没有更多的结果可用。
|
||||
如果 watch 为 true,则不支持此字段。
|
||||
|
||||
<!--
|
||||
The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests.
|
||||
-->
|
||||
服务器保证在使用 continue 时返回的对象将与不带 limit 的列表调用相同,——
|
||||
也就是说,在发出第一个请求后所创建、修改或删除的对象将不包含在任何后续的继续请求中。
|
||||
<!--
|
||||
This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
|
||||
<hr>
|
||||
The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
|
||||
-->
|
||||
服务器保证在使用 continue 时返回的对象将与不带 limit 的列表调用相同,
|
||||
也就是说,在发出第一个请求后所创建、修改或删除的对象将不包含在任何后续的继续请求中。
|
||||
这有时被称为一致性快照,确保使用 limit 的客户端在分块接收非常大的结果的客户端能够看到所有可能的对象。
|
||||
如果对象在分块列表期间被更新,则返回计算第一个列表结果时存在的对象版本。
|
||||
|
||||
<hr>
|
||||
|
||||
## namespace {#namespace}
|
||||
|
||||
<!--
|
||||
object name and auth scope, such as for teams and projects
|
||||
<hr>
|
||||
-->
|
||||
|
||||
对象名称和身份验证范围,例如用于团队和项目。
|
||||
|
||||
<hr>
|
||||
|
||||
## pretty {#pretty}
|
||||
|
||||
<!--
|
||||
If 'true', then the output is pretty printed.
|
||||
<hr>
|
||||
-->
|
||||
|
||||
如果设置为 'true' ,那么输出是规范的打印。
|
||||
如果设置为 'true',那么输出是规范的打印。
|
||||
|
||||
<hr>
|
||||
|
||||
## propagationPolicy {#propagationPolicy}
|
||||
|
||||
<!--
|
||||
Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
|
||||
<hr>
|
||||
-->
|
||||
该字段决定是否以及如何执行垃圾收集。可以设置此字段或 OrphanDependents,但不能同时设置。
|
||||
默认策略由 metadata.finalizers 和特定资源的默认策略设置决定。可接受的值是:
|
||||
|
||||
- 'Orphan':孤立依赖项;
|
||||
- 'Background':允许垃圾回收器后台删除依赖;
|
||||
- 'Foreground':一个级联策略,前台删除所有依赖项。
|
||||
|
||||
<hr>
|
||||
|
||||
## resourceVersion {#resourceVersion}
|
||||
|
||||
<!--
|
||||
resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details.
|
||||
|
||||
Defaults to unset
|
||||
<hr>
|
||||
-->
|
||||
resourceVersion 对请求所针对的资源版本设置约束。
|
||||
详情请参见 https://kubernetes.io/zh-cn/docs/reference/using-api/api-concepts/#resource-versions。
|
||||
详情请参见 https://kubernetes.io/zh-cn/docs/reference/using-api/api-concepts/#resource-versions
|
||||
|
||||
默认不设置。
|
||||
|
||||
默认不设置
|
||||
<hr>
|
||||
|
||||
## resourceVersionMatch {#resourceVersionMatch}
|
||||
|
||||
<!--
|
||||
resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details.
|
||||
|
||||
Defaults to unset
|
||||
<hr>
|
||||
-->
|
||||
resourceVersionMatch 字段决定如何将 resourceVersion 应用于列表调用。
|
||||
强烈建议对设置了 resourceVersion 的列表调用设置 resourceVersion 匹配,
|
||||
具体请参见 https://kubernetes.io/zh-cn/docs/reference/using-api/api-concepts/#resource-versions。
|
||||
具体请参见 https://kubernetes.io/zh-cn/docs/reference/using-api/api-concepts/#resource-versions
|
||||
|
||||
默认不设置
|
||||
默认不设置。
|
||||
|
||||
<hr>
|
||||
|
||||
## sendInitialEvents {#sendInitialEvents}
|
||||
|
||||
<!--
|
||||
`sendInitialEvents=true` may be set together with `watch=true`. In that case,
|
||||
the watch stream will begin with synthetic events to produce the current state of objects in the collection.
|
||||
Once all such events have been sent, a synthetic "Bookmark" event will be sent.
|
||||
The bookmark will report the ResourceVersion (RV) corresponding to the set of objects,
|
||||
and be marked with `"k8s.io/initial-events-end": "true"` annotation. Afterwards,
|
||||
the watch stream will proceed as usual, sending watch events corresponding to
|
||||
changes (subsequent to the RV) to objects watched.
|
||||
`sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic "Bookmark" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `"k8s.io/initial-events-end": "true"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched.
|
||||
-->
|
||||
`sendInitialEvents=true` 可以和 `watch=true` 一起设置。
|
||||
在这种情况下,监视通知流将从合成事件开始,以生成集合中对象的当前状态。
|
||||
|
@ -240,9 +231,7 @@ ResourceVersion(RV),并标有 `"k8s.io/initial-events-end": "true"` 注解
|
|||
之后,监视通知流将照常进行,发送与所监视的对象的变更(在 RV 之后)对应的监视事件。
|
||||
|
||||
<!--
|
||||
When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set.
|
||||
The semantic of the watch request is as following:
|
||||
- `resourceVersionMatch` = NotOlderThan
|
||||
When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan
|
||||
is interpreted as "data at least as new as the provided `resourceVersion`"
|
||||
and the bookmark event is send when the state is synced
|
||||
to a `resourceVersion` at least as fresh as the one provided by the ListOptions.
|
||||
|
@ -254,11 +243,12 @@ The semantic of the watch request is as following:
|
|||
-->
|
||||
当设置了 sendInitialEvents 选项时,我们还需要设置 resourceVersionMatch
|
||||
选项。watch 请求的语义如下:
|
||||
|
||||
- `resourceVersionMatch` = NotOlderThan
|
||||
被解释为"数据至少与提供的 `resourceVersion` 一样新",
|
||||
最迟当状态同步到与 ListOptions 提供的版本一样新的 `resourceVersion` 时,
|
||||
发送 bookmark 事件。如果 `resourceVersion` 未设置,这将被解释为"一致读取",
|
||||
最迟当状态同步到开始处理请求的那一刻时,发送 bookmark 事件。
|
||||
被解释为"数据至少与提供的 `resourceVersion` 一样新",
|
||||
最迟当状态同步到与 ListOptions 提供的版本一样新的 `resourceVersion` 时,
|
||||
发送 bookmark 事件。如果 `resourceVersion` 未设置,这将被解释为"一致读取",
|
||||
最迟当状态同步到开始处理请求的那一刻时,发送 bookmark 事件。
|
||||
- `resourceVersionMatch` 设置为任何其他值或返回 unsetInvalid 错误。
|
||||
|
||||
<!--
|
||||
|
@ -270,17 +260,18 @@ true,否则默认为 false。
|
|||
<hr>
|
||||
|
||||
## timeoutSeconds {#timeoutSeconds}
|
||||
|
||||
<!--
|
||||
Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
|
||||
<hr>
|
||||
-->
|
||||
list/watch 调用的超时秒数。这选项限制调用的持续时间,无论是否有活动。
|
||||
|
||||
<hr>
|
||||
|
||||
## watch {#watch}
|
||||
|
||||
<!--
|
||||
Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
|
||||
<hr>
|
||||
-->
|
||||
监视对所述资源的更改,并将其这类变更以添加、更新和删除通知流的形式返回。指定 resourceVersion。
|
||||
|
||||
|
|
|
@ -30,7 +30,7 @@ After renewal, in order to make changes effective, is is required to restart con
|
|||
续订后,为了使更改生效,需要重新启动控制平面组件,并最终重新分发更新的证书,以防文件在其他地方使用。
|
||||
|
||||
```
|
||||
kubeadm alpha renew controller-manager.conf [flags]
|
||||
kubeadm certs renew controller-manager.conf [flags]
|
||||
```
|
||||
|
||||
<!--
|
||||
|
|
|
@ -12,7 +12,7 @@ Output a kubeconfig file for an additional user.
|
|||
为其他用户输出一个 kubeconfig 文件。
|
||||
|
||||
```
|
||||
kubeadm alpha kubeconfig user [flags]
|
||||
kubeadm kubeconfig user [flags]
|
||||
```
|
||||
|
||||
<!--
|
||||
|
|
|
@ -20,7 +20,6 @@ The "reset" command executes the following phases:
|
|||
|
||||
```
|
||||
preflight Run reset pre-flight checks
|
||||
update-cluster-status Remove this node from the ClusterStatus object.
|
||||
remove-etcd-member Remove a local etcd member.
|
||||
cleanup-node Run cleanup node.
|
||||
```
|
||||
|
|
|
@ -43,10 +43,10 @@ daemons installed:
|
|||
要运行节点一致性测试,节点必须满足与标准 Kubernetes 节点相同的前提条件。节点至少应安装以下守护程序:
|
||||
|
||||
<!--
|
||||
* Container Runtime (Docker)
|
||||
* CRI-compatible container runtimes such as Docker, Containerd and CRI-O
|
||||
* Kubelet
|
||||
-->
|
||||
* 容器运行时 (Docker)
|
||||
* 与 CRI 兼容的容器运行时,例如 Docker、Containerd 和 CRI-O
|
||||
* Kubelet
|
||||
|
||||
<!--
|
||||
|
@ -74,7 +74,6 @@ To run the node conformance test, perform the following steps:
|
|||
一些其他的 kubelet 命令行参数可能会被用到:
|
||||
* `--cloud-provider`:如果使用 `--cloud-provider=gce`,需要移除这个参数来运行测试。
|
||||
|
||||
|
||||
<!--
|
||||
2. Run the node conformance test with command:
|
||||
|
||||
|
@ -190,4 +189,4 @@ test**, because it requires much more complex configuration to run non-conforman
|
|||
-->
|
||||
|
||||
* 测试会在节点上遗留一些 Docker 镜像,包括节点一致性测试本身的镜像和功能测试相关的镜像。
|
||||
* 测试会在节点上遗留一些死的容器。这些容器是在功能测试的过程中创建的。
|
||||
* 测试会在节点上遗留一些死的容器。这些容器是在功能测试的过程中创建的。
|
||||
|
|
|
@ -174,6 +174,10 @@ If you haven't already set up a cluster locally, run `minikube start` to create
|
|||
http://172.17.0.15:31637
|
||||
```
|
||||
|
||||
```shell
|
||||
curl http://172.17.0.15:31637
|
||||
```
|
||||
|
||||
<!--
|
||||
The output is similar to:
|
||||
-->
|
||||
|
|
|
@ -495,12 +495,12 @@ etcd 支持内置快照。快照可以从使用 `etcdctl snapshot save` 命令
|
|||
|
||||
<!--
|
||||
Below is an example for taking a snapshot of the keyspace served by
|
||||
`$ENDPOINT` to the file `snapshotdb`:
|
||||
`$ENDPOINT` to the file `snapshot.db`:
|
||||
-->
|
||||
下面是一个示例,用于获取 `$ENDPOINT` 所提供的键空间的快照到文件 `snapshotdb`:
|
||||
下面是一个示例,用于获取 `$ENDPOINT` 所提供的键空间的快照到文件 `snapshot.db`:
|
||||
|
||||
```shell
|
||||
ETCDCTL_API=3 etcdctl --endpoints $ENDPOINT snapshot save snapshotdb
|
||||
ETCDCTL_API=3 etcdctl --endpoints $ENDPOINT snapshot save snapshot.db
|
||||
```
|
||||
|
||||
<!--
|
||||
|
@ -509,7 +509,7 @@ Verify the snapshot:
|
|||
验证快照:
|
||||
|
||||
```shell
|
||||
ETCDCTL_API=3 etcdctl --write-out=table snapshot status snapshotdb
|
||||
ETCDCTL_API=3 etcdctl --write-out=table snapshot status snapshot.db
|
||||
```
|
||||
|
||||
```console
|
||||
|
@ -616,26 +616,30 @@ Here is an example:
|
|||
例如:
|
||||
|
||||
```shell
|
||||
ETCDCTL_API=3 etcdctl --endpoints 10.2.0.9:2379 snapshot restore snapshotdb
|
||||
ETCDCTL_API=3 etcdctl --endpoints 10.2.0.9:2379 snapshot restore snapshot.db
|
||||
```
|
||||
|
||||
<!--
|
||||
Another example for restoring using etcdctl options:
|
||||
Another example for restoring using `etcdctl` options:
|
||||
-->
|
||||
恢复时也可以指定操作选项,例如:
|
||||
恢复时使用 `etcdctl` 选项的另一个示例:
|
||||
|
||||
```shell
|
||||
ETCDCTL_API=3 etcdctl snapshot restore --data-dir <data-dir-location> snapshotdb
|
||||
ETCDCTL_API=3 etcdctl snapshot restore --data-dir <data-dir-location> snapshot.db
|
||||
```
|
||||
|
||||
<!--
|
||||
Yet another example would be to first export the environment variable
|
||||
where `<data-dir-location>` is a directory that will be created during the restore process.
|
||||
|
||||
Yet another example would be to first export the `ETCDCTL_API` environment variable
|
||||
-->
|
||||
另一个例子是先导出环境变量:
|
||||
其中 `<data-dir-location>` 是将在恢复过程中创建的目录。
|
||||
|
||||
另一个例子是先导出 `ETCDCTL_API` 环境变量:
|
||||
|
||||
```shell
|
||||
export ETCDCTL_API=3
|
||||
etcdctl snapshot restore --data-dir <data-dir-location> snapshotdb
|
||||
etcdctl snapshot restore --data-dir <data-dir-location> snapshot.db
|
||||
```
|
||||
|
||||
<!--
|
||||
|
@ -736,3 +740,12 @@ you perform defragmentation, you use a tool such as [etcd-defrag](https://github
|
|||
Kubernetes 项目建议在执行碎片整理时,
|
||||
使用诸如 [etcd-defrag](https://github.com/ahrtr/etcd-defrag) 之类的工具。
|
||||
{{< /note >}}
|
||||
|
||||
<!--
|
||||
You can also run the defragmentation tool as a Kubernetes CronJob, to make sure that
|
||||
defragmentation happens regularly. See [`etcd-defrag-cronjob.yaml`](https://github.com/ahrtr/etcd-defrag/blob/main/doc/etcd-defrag-cronjob.yaml)
|
||||
for details.
|
||||
-->
|
||||
你还可以将碎片整理工具作为 Kubernetes CronJob 运行,以确保定期进行碎片整理。
|
||||
有关详细信息,请参阅
|
||||
[`etcd-defrag-cronjob.yaml`](https://github.com/ahrtr/etcd-defrag/blob/main/doc/etcd-defrag-cronjob.yaml)。
|
||||
|
|
|
@ -420,6 +420,24 @@ control plane automatically cleans up the long-lived token from that Secret.
|
|||
当你删除一个与某 Secret 相关联的 ServiceAccount 时,Kubernetes 的控制面会自动清理该
|
||||
Secret 中长期有效的令牌。
|
||||
|
||||
{{< note >}}
|
||||
<!--
|
||||
If you view the ServiceAccount using:
|
||||
|
||||
` kubectl get serviceaccount build-robot -o yaml`
|
||||
|
||||
You can't see the `build-robot-secret` Secret in the ServiceAccount API objects
|
||||
[`.secrets`](/docs/reference/kubernetes-api/authentication-resources/service-account-v1/) field
|
||||
because that field is only populated with auto-generated Secrets.
|
||||
-->
|
||||
如果你使用以下命令查看 ServiceAccount:
|
||||
|
||||
` kubectl get serviceaccount build-robot -o yaml`
|
||||
|
||||
在 ServiceAccount API 对象中看不到 `build-robot-secret` Secret,
|
||||
[`.secrets`](/zh-cn/docs/reference/kubernetes-api/authentication-resources/service-account-v1/) 字段,
|
||||
因为该字段只会填充自动生成的 Secret。
|
||||
{{< /note >}}
|
||||
<!--
|
||||
## Add ImagePullSecrets to a service account
|
||||
|
||||
|
|
|
@ -66,7 +66,8 @@ docker login
|
|||
When prompted, enter your Docker ID, and then the credential you want to use (access token,
|
||||
or the password for your Docker ID).
|
||||
|
||||
The login process creates or updates a `config.json` file that holds an authorization token. Review [how Kubernetes interprets this file](/docs/concepts/containers/images#config-json).
|
||||
The login process creates or updates a `config.json` file that holds an authorization token.
|
||||
Review [how Kubernetes interprets this file](/docs/concepts/containers/images#config-json).
|
||||
|
||||
View the `config.json` file:
|
||||
-->
|
||||
|
@ -99,7 +100,9 @@ The output contains a section similar to this:
|
|||
{{< note >}}
|
||||
<!--
|
||||
If you use a Docker credentials store, you won't see that `auth` entry but a `credsStore` entry with the name of the store as value.
|
||||
In that case, you can create a secret directly. See [Create a Secret by providing credentials on the command line](#create-a-secret-by-providing-credentials-on-the-command-line).
|
||||
In that case, you can create a secret directly.
|
||||
See [Create a Secret by providing credentials on the command line](#create-a-secret-by-providing-credentials-on-the-command-line).
|
||||
|
||||
-->
|
||||
如果使用 Docker 凭据仓库,则不会看到 `auth` 条目,看到的将是以仓库名称作为值的 `credsStore` 条目。
|
||||
在这种情况下,你可以直接创建一个 Secret。
|
||||
|
@ -353,9 +356,20 @@ kubectl get pod private-reg
|
|||
|
||||
{{< note >}}
|
||||
<!--
|
||||
In case the Pod fails to start with the status `ImagePullBackOff`, view the Pod events:
|
||||
To use image pull secrets for a Pod (or a Deployment, or other object that
|
||||
has a pod template that you are using), you need to make sure that the appropriate
|
||||
Secret does exist in the right namespace. The namespace to use is the same
|
||||
namespace where you defined the Pod.
|
||||
-->
|
||||
如果 Pod 以状态 `ImagePullBackOff` 启动失败,查看 Pod 事件:
|
||||
要为 Pod(或 Deployment,或其他有 Pod 模板的对象)使用镜像拉取 Secret,
|
||||
你需要确保合适的 Secret 确实存在于正确的名字空间中。
|
||||
要使用的是你定义 Pod 时所用的名字空间。
|
||||
{{< /note >}}
|
||||
|
||||
<!--
|
||||
Also, in case the Pod fails to start with the status `ImagePullBackOff`, view the Pod events:
|
||||
-->
|
||||
此外,如果 Pod 启动失败,状态为 `ImagePullBackOff`,查看 Pod 事件:
|
||||
|
||||
```shell
|
||||
kubectl describe pod private-reg
|
||||
|
@ -383,8 +397,6 @@ Events:
|
|||
... FailedToRetrieveImagePullSecret ... Unable to retrieve some image pull secrets (<regcred>); attempting to pull the image may not succeed.
|
||||
```
|
||||
|
||||
{{< /note >}}
|
||||
|
||||
## {{% heading "whatsnext" %}}
|
||||
|
||||
<!--
|
||||
|
|
|
@ -25,6 +25,13 @@ You may also visit the [troubleshooting overview document](/docs/tasks/debug/) f
|
|||
对于应用的调试,请参阅[应用故障排查指南](/zh-cn/docs/tasks/debug/debug-application/)。
|
||||
你也可以访问[故障排查](/zh-cn/docs/tasks/debug/)来获取更多的信息。
|
||||
|
||||
<!--
|
||||
For troubleshooting {{<glossary_tooltip text="kubectl" term_id="kubectl">}}, refer to
|
||||
[Troubleshooting kubectl](/docs/tasks/debug/debug-cluster/troubleshoot-kubectl/).
|
||||
-->
|
||||
有关 {{<glossary_tooltip text="kubectl" term_id="kubectl">}} 的故障排查,
|
||||
请参阅 [kubectl 故障排查](/zh-cn/docs/tasks/debug/debug-cluster/troubleshoot-kubectl/)。
|
||||
|
||||
<!-- body -->
|
||||
|
||||
<!--
|
||||
|
@ -469,4 +476,4 @@ This is an incomplete list of things that could go wrong, and how to adjust your
|
|||
* 使用 `kubectl debug node` [调试 Kubernetes 节点](/zh-cn/docs/tasks/debug/debug-cluster/kubectl-node-debug)
|
||||
* 使用 `crictl` 来[调试 Kubernetes 节点](/zh-cn/docs/tasks/debug/debug-cluster/crictl/)
|
||||
* 获取更多关于 [Kubernetes 审计](/zh-cn/docs/tasks/debug/debug-cluster/audit/)的信息
|
||||
* 使用 `telepresence` [本地开发和调试服务](/zh-cn/docs/tasks/debug/debug-cluster/local-debugging/)
|
||||
* 使用 `telepresence` [本地开发和调试服务](/zh-cn/docs/tasks/debug/debug-cluster/local-debugging/)
|
||||
|
|
|
@ -61,7 +61,7 @@ file for the Pod defines a command and two arguments:
|
|||
-->
|
||||
本示例中,将创建一个只包含单个容器的 Pod。在此 Pod 配置文件中设置了一个命令与两个参数:
|
||||
|
||||
{{< codenew file="pods/commands.yaml" >}}
|
||||
{{% code_sample file="pods/commands.yaml" %}}
|
||||
|
||||
<!--
|
||||
1. Create a Pod based on the YAML configuration file:
|
||||
|
|
|
@ -80,7 +80,7 @@ The following methods exist for installing kubectl on Windows:
|
|||
下载 `kubectl` 校验和文件:
|
||||
|
||||
```powershell
|
||||
curl.exe -LO "https://dl.k8s.io/v{{< skew currentPatchVersion >}}/bin/windows/amd64/kubectl-convert.exe.sha256"
|
||||
curl.exe -LO "https://dl.k8s.io/v{{< skew currentPatchVersion >}}/bin/windows/amd64/kubectl.exe.sha256"
|
||||
```
|
||||
|
||||
<!--
|
||||
|
|
|
@ -467,7 +467,7 @@ source for this page).
|
|||
下面的示例展示如何实现这点(查看本页的 Markdown 源码):
|
||||
|
||||
```none
|
||||
{{</* codenew file="pods/storage/gce-volume.yaml" */>}}
|
||||
{{</* alert color="warning" >}}This is a warning.{{< /alert */>}}
|
||||
```
|
||||
|
||||
<!--
|
||||
|
|
|
@ -104,14 +104,14 @@ Kubernetes 文档的这一部分包含教程。
|
|||
* [Apply Pod Security Standards at Cluster level](/docs/tutorials/security/cluster-level-pss/)
|
||||
* [Apply Pod Security Standards at Namespace level](/docs/tutorials/security/ns-level-pss/)
|
||||
* [AppArmor](/zh-cn/docs/tutorials/security/apparmor/)
|
||||
* [seccomp](/zh-cn/docs/tutorials/security/seccomp/)
|
||||
* [Seccomp](/zh-cn/docs/tutorials/security/seccomp/)
|
||||
-->
|
||||
## 安全 {#security}
|
||||
|
||||
* [在集群级别应用 Pod 安全标准](/zh-cn/docs/tutorials/security/cluster-level-pss/)
|
||||
* [在名字空间级别应用 Pod 安全标准](/zh-cn/docs/tutorials/security/ns-level-pss/)
|
||||
* [AppArmor](/zh-cn/docs/tutorials/security/apparmor/)
|
||||
* [seccomp](/zh-cn/docs/tutorials/security/seccomp/)
|
||||
* [Seccomp](/zh-cn/docs/tutorials/security/seccomp/)
|
||||
|
||||
## {{% heading "whatsnext" %}}
|
||||
|
||||
|
|
|
@ -13,33 +13,25 @@ content_type: tutorial
|
|||
<!-- overview -->
|
||||
|
||||
<!--
|
||||
This page provides a real world example of how to configure Redis using a ConfigMap and builds upon the [Configure a Pod to Use a ConfigMap](/docs/tasks/configure-pod-container/configure-pod-configmap/) task.
|
||||
This page provides a real world example of how to configure Redis using a ConfigMap and
|
||||
builds upon the [Configure a Pod to Use a ConfigMap](/docs/tasks/configure-pod-container/configure-pod-configmap/) task.
|
||||
-->
|
||||
这篇文档基于[配置 Pod 以使用 ConfigMap](/zh-cn/docs/tasks/configure-pod-container/configure-pod-configmap/)
|
||||
这个任务,提供了一个使用 ConfigMap 来配置 Redis 的真实案例。
|
||||
|
||||
|
||||
|
||||
## {{% heading "objectives" %}}
|
||||
|
||||
|
||||
<!--
|
||||
* Create a ConfigMap with Redis configuration values
|
||||
* Create a Redis Pod that mounts and uses the created ConfigMap
|
||||
* Verify that the configuration was correctly applied.
|
||||
-->
|
||||
|
||||
* 使用 Redis 配置的值创建一个 ConfigMap
|
||||
* 创建一个 Redis Pod,挂载并使用创建的 ConfigMap
|
||||
* 验证配置已经被正确应用
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
## {{% heading "prerequisites" %}}
|
||||
|
||||
|
||||
{{< include "task-tutorial-prereqs.md" >}} {{< version-check >}}
|
||||
|
||||
<!--
|
||||
|
@ -49,11 +41,8 @@ This page provides a real world example of how to configure Redis using a Config
|
|||
* 此页面上显示的示例适用于 `kubectl` 1.14 及以上的版本。
|
||||
* 理解[配置 Pod 以使用 ConfigMap](/zh-cn/docs/tasks/configure-pod-container/configure-pod-configmap/)。
|
||||
|
||||
|
||||
|
||||
<!-- lessoncontent -->
|
||||
|
||||
|
||||
<!--
|
||||
## Real World Example: Configuring Redis using a ConfigMap
|
||||
|
||||
|
@ -81,7 +70,7 @@ EOF
|
|||
<!--
|
||||
Apply the ConfigMap created above, along with a Redis pod manifest:
|
||||
-->
|
||||
应用上面创建的 ConfigMap 以及 Redis pod 清单:
|
||||
应用上面创建的 ConfigMap 以及 Redis Pod 清单:
|
||||
|
||||
```shell
|
||||
kubectl apply -f example-redis-config.yaml
|
||||
|
@ -109,7 +98,7 @@ ConfigMap above as `/redis-master/redis.conf` inside the Pod.
|
|||
这样做的最终效果是将上面 `example-redis-config` 配置中 `data.redis-config`
|
||||
的数据作为 Pod 中的 `/redis-master/redis.conf` 公开。
|
||||
|
||||
{{< codenew file="pods/config/redis-pod.yaml" >}}
|
||||
{{% code_sample file="pods/config/redis-pod.yaml" %}}
|
||||
|
||||
<!--
|
||||
Examine the created objects:
|
||||
|
@ -210,12 +199,12 @@ Now let's add some configuration values to the `example-redis-config` ConfigMap:
|
|||
-->
|
||||
现在,向 `example-redis-config` ConfigMap 添加一些配置:
|
||||
|
||||
{{< codenew file="pods/config/example-redis-config.yaml" >}}
|
||||
{{% code_sample file="pods/config/example-redis-config.yaml" %}}
|
||||
|
||||
<!--
|
||||
Apply the updated ConfigMap:
|
||||
-->
|
||||
应用更新的 ConfigMap:
|
||||
应用更新的 ConfigMap:
|
||||
|
||||
```shell
|
||||
kubectl apply -f example-redis-config.yaml
|
||||
|
@ -366,11 +355,7 @@ kubectl delete pod/redis configmap/example-redis-config
|
|||
|
||||
## {{% heading "whatsnext" %}}
|
||||
|
||||
|
||||
<!--
|
||||
* Learn more about [ConfigMaps](/docs/tasks/configure-pod-container/configure-pod-configmap/).
|
||||
-->
|
||||
* 了解有关 [ConfigMaps](/zh-cn/docs/tasks/configure-pod-container/configure-pod-configmap/) 的更多信息。
|
||||
|
||||
|
||||
|
||||
* 了解有关 [ConfigMap](/zh-cn/docs/tasks/configure-pod-container/configure-pod-configmap/) 的更多信息。
|
||||
|
|
|
@ -88,6 +88,7 @@ Open the Kubernetes dashboard. You can do this two different ways:
|
|||
Open a **new** terminal, and run:
|
||||
-->
|
||||
打开一个**新的**终端,然后运行:
|
||||
|
||||
```shell
|
||||
# 启动一个新的终端,并保持此命令运行。
|
||||
minikube dashboard
|
||||
|
@ -241,6 +242,25 @@ Deployment 是管理 Pod 创建和扩展的推荐方法。
|
|||
kubectl config view
|
||||
```
|
||||
|
||||
<!--
|
||||
1. View application logs for a container in a pod.
|
||||
-->
|
||||
1. 查看 Pod 中容器的应用程序日志。
|
||||
|
||||
```shell
|
||||
kubectl logs hello-node-5f76cf6ccf-br9b5
|
||||
```
|
||||
|
||||
<!--
|
||||
The output is similar to:
|
||||
-->
|
||||
输出类似于:
|
||||
|
||||
```
|
||||
I0911 09:19:26.677397 1 log.go:195] Started HTTP server on port 8080
|
||||
I0911 09:19:26.677586 1 log.go:195] Started UDP server on port 8081
|
||||
```
|
||||
|
||||
{{< note >}}
|
||||
<!--
|
||||
For more information about `kubectl` commands, see the [kubectl overview](/docs/reference/kubectl/).
|
||||
|
@ -332,7 +352,8 @@ Kubernetes [*Service*](/docs/concepts/services-networking/service/).
|
|||
<!--
|
||||
## Enable addons
|
||||
|
||||
The minikube tool includes a set of built-in {{< glossary_tooltip text="addons" term_id="addons" >}} that can be enabled, disabled and opened in the local Kubernetes environment.
|
||||
The minikube tool includes a set of built-in {{< glossary_tooltip text="addons" term_id="addons" >}}
|
||||
hat can be enabled, disabled and opened in the local Kubernetes environment.
|
||||
|
||||
1. List the currently supported addons:
|
||||
-->
|
||||
|
@ -487,11 +508,12 @@ If you want to use minikube again to learn more about Kubernetes, you don't need
|
|||
## {{% heading "whatsnext" %}}
|
||||
|
||||
<!--
|
||||
* Tutorial to _[deploy your first app on Kubernetes with kubectl](/docs/tutorials/kubernetes-basics/deploy-app/deploy-intro/)_.
|
||||
* Learn more about [Deployment objects](/docs/concepts/workloads/controllers/deployment/).
|
||||
* Learn more about [Deploying applications](/docs/tasks/run-application/run-stateless-application-deployment/).
|
||||
* Learn more about [Service objects](/docs/concepts/services-networking/service/).
|
||||
-->
|
||||
* **[使用 kubectl 在 Kubernetes 上部署你的第一个应用程序](/zh-cn/docs/tutorials/kubernetes-basics/deploy-app/deploy-intro/)**教程。
|
||||
* 进一步了解 [Deployment 对象](/zh-cn/docs/concepts/workloads/controllers/deployment/)。
|
||||
* 进一步了解[部署应用](/zh-cn/docs/tasks/run-application/run-stateless-application-deployment/)。
|
||||
* 进一步了解 [Service 对象](/zh-cn/docs/concepts/services-networking/service/)。
|
||||
|
||||
|
|
|
@ -203,7 +203,7 @@ AppArmor 目前处于 Beta 阶段,因此选项以注解形式设定。
|
|||
AppArmor profiles are specified *per-container*. To specify the AppArmor profile to run a Pod
|
||||
container with, add an annotation to the Pod's metadata:
|
||||
-->
|
||||
AppArmor 配置文件是按 **逐个容器** 的形式来设置的。
|
||||
AppArmor 配置文件是按**逐个容器**的形式来设置的。
|
||||
要指定用来运行 Pod 容器的 AppArmor 配置文件,请向 Pod 的 metadata 添加注解:
|
||||
|
||||
```yaml
|
||||
|
@ -329,7 +329,7 @@ Next, we'll run a simple "Hello AppArmor" pod with the deny-write profile:
|
|||
-->
|
||||
接下来,我们将运行一个带有拒绝写入配置文件的简单 “Hello AppArmor” Pod:
|
||||
|
||||
{{< codenew file="pods/security/hello-apparmor.yaml" >}}
|
||||
{{% code_sample file="pods/security/hello-apparmor.yaml" %}}
|
||||
|
||||
```shell
|
||||
kubectl create -f ./hello-apparmor.yaml
|
||||
|
|
|
@ -45,12 +45,12 @@ Pod 安全是一个准入控制器,当新的 Pod 被创建时,它会根据 K
|
|||
<!--
|
||||
Install the following on your workstation:
|
||||
|
||||
- [KinD](https://kind.sigs.k8s.io/docs/user/quick-start/#installation)
|
||||
- [kind](https://kind.sigs.k8s.io/docs/user/quick-start/#installation)
|
||||
- [kubectl](/docs/tasks/tools/)
|
||||
-->
|
||||
在你的工作站中安装以下内容:
|
||||
|
||||
- [KinD](https://kind.sigs.k8s.io/docs/user/quick-start/#installation)
|
||||
- [kind](https://kind.sigs.k8s.io/docs/user/quick-start/#installation)
|
||||
- [kubectl](/zh-cn/docs/tasks/tools/)
|
||||
|
||||
<!--
|
||||
|
@ -368,11 +368,11 @@ following:
|
|||
|
||||
{{<note>}}
|
||||
<!--
|
||||
If you use Docker Desktop with KinD on macOS, you can
|
||||
If you use Docker Desktop with *kind* on macOS, you can
|
||||
add `/tmp` as a Shared Directory under the menu item
|
||||
**Preferences > Resources > File Sharing**.
|
||||
-->
|
||||
如果你在 macOS 上使用 Docker Desktop 和 KinD,
|
||||
如果你在 macOS 上使用 Docker Desktop 和 kind,
|
||||
你可以在菜单项 **Preferences > Resources > File Sharing**
|
||||
下添加 `/tmp` 作为共享目录。
|
||||
{{</note>}}
|
||||
|
@ -411,7 +411,7 @@ following:
|
|||
<!--
|
||||
1. Point kubectl to the cluster:
|
||||
-->
|
||||
6. 将 kubectl 指向集群
|
||||
6. 将 kubectl 指向集群:
|
||||
|
||||
```shell
|
||||
kubectl cluster-info --context kind-psa-with-cluster-pss
|
||||
|
|
|
@ -123,13 +123,13 @@ into the cluster.
|
|||
|
||||
{{< tabs name="tab_with_code" >}}
|
||||
{{< tab name="audit.json" >}}
|
||||
{{< codenew file="pods/security/seccomp/profiles/audit.json" >}}
|
||||
{{% code_sample file="pods/security/seccomp/profiles/audit.json" %}}
|
||||
{{< /tab >}}
|
||||
{{< tab name="violation.json" >}}
|
||||
{{< codenew file="pods/security/seccomp/profiles/violation.json" >}}
|
||||
{{% code_sample file="pods/security/seccomp/profiles/violation.json" %}}
|
||||
{{< /tab >}}
|
||||
{{< tab name="fine-grained.json" >}}
|
||||
{{< codenew file="pods/security/seccomp/profiles/fine-grained.json" >}}
|
||||
{{% code_sample file="pods/security/seccomp/profiles/fine-grained.json" %}}
|
||||
{{< /tab >}}
|
||||
{{< /tabs >}}
|
||||
|
||||
|
@ -170,7 +170,7 @@ onto a node.
|
|||
Kind 在 Docker 中运行 Kubernetes,因此集群的每个节点都是一个容器。
|
||||
这允许将文件挂载到每个容器的文件系统中,类似于将文件加载到节点上。
|
||||
|
||||
{{< codenew file="pods/security/seccomp/kind.yaml" >}}
|
||||
{{% code_sample file="pods/security/seccomp/kind.yaml" %}}
|
||||
|
||||
<!--
|
||||
Download that example kind configuration, and save it to a file named `kind.yaml`:
|
||||
|
@ -298,7 +298,7 @@ for all its containers:
|
|||
-->
|
||||
这是一个 Pod 的清单,它要求其所有容器使用 `RuntimeDefault` seccomp 配置文件:
|
||||
|
||||
{{< codenew file="pods/security/seccomp/ga/default-pod.yaml" >}}
|
||||
{{% code_sample file="pods/security/seccomp/ga/default-pod.yaml" %}}
|
||||
|
||||
<!--
|
||||
Create that Pod:
|
||||
|
@ -346,7 +346,7 @@ Here's a manifest for that Pod:
|
|||
|
||||
这是该 Pod 的清单:
|
||||
|
||||
{{< codenew file="pods/security/seccomp/ga/audit-pod.yaml" >}}
|
||||
{{% code_sample file="pods/security/seccomp/ga/audit-pod.yaml" %}}
|
||||
|
||||
{{< note >}}
|
||||
<!--
|
||||
|
@ -515,7 +515,7 @@ The manifest for this demonstration is:
|
|||
|
||||
此演示的清单是:
|
||||
|
||||
{{< codenew file="pods/security/seccomp/ga/violation-pod.yaml" >}}
|
||||
{{% code_sample file="pods/security/seccomp/ga/violation-pod.yaml" %}}
|
||||
|
||||
<!--
|
||||
Attempt to create the Pod in the cluster:
|
||||
|
@ -585,7 +585,7 @@ The manifest for this example is:
|
|||
|
||||
此示例的清单是:
|
||||
|
||||
{{< codenew file="pods/security/seccomp/ga/fine-pod.yaml" >}}
|
||||
{{% code_sample file="pods/security/seccomp/ga/fine-pod.yaml" %}}
|
||||
|
||||
<!--
|
||||
Create the Pod in your cluster:
|
||||
|
|
|
@ -61,9 +61,9 @@ Let's say you have a Deployment containing of a single `nginx` replica
|
|||
|
||||
假设你有包含单个 nginx 副本(仅用于演示目的)的一个 Deployment 和一个 Service:
|
||||
|
||||
{{% code file="service/pod-with-graceful-termination.yaml" %}}
|
||||
{{% code_sample file="service/pod-with-graceful-termination.yaml" %}}
|
||||
|
||||
{{% code file="service/explore-graceful-termination-nginx.yaml" %}}
|
||||
{{% code_sample file="service/explore-graceful-termination-nginx.yaml" %}}
|
||||
|
||||
<!--
|
||||
Now create the Deployment Pod and Service using the above files:
|
||||
|
|
|
@ -57,7 +57,7 @@ external IP address.
|
|||
-->
|
||||
1. 在集群中运行 Hello World 应用程序:
|
||||
|
||||
{{< codenew file="service/load-balancer-example.yaml" >}}
|
||||
{{% code_sample file="service/load-balancer-example.yaml" %}}
|
||||
|
||||
```shell
|
||||
kubectl apply -f https://k8s.io/examples/service/load-balancer-example.yaml
|
||||
|
|
|
@ -13,7 +13,7 @@ spec:
|
|||
readinessProbe:
|
||||
tcpSocket:
|
||||
port: 8080
|
||||
initialDelaySeconds: 5
|
||||
initialDelaySeconds: 15
|
||||
periodSeconds: 10
|
||||
livenessProbe:
|
||||
tcpSocket:
|
||||
|
|
|
@ -1,4 +1,7 @@
|
|||
branches:
|
||||
- release: "1.25"
|
||||
finalPatchRelease: "1.25.15"
|
||||
endOfLifeDate: 2023-10-28
|
||||
- release: "1.24"
|
||||
finalPatchRelease: "1.24.17"
|
||||
endOfLifeDate: 2023-07-28
|
||||
|
|
|
@ -103,66 +103,3 @@ schedules:
|
|||
- release: 1.26.0
|
||||
cherryPickDeadline: ""
|
||||
targetDate: 2022-12-09
|
||||
- release: 1.25
|
||||
releaseDate: 2022-08-23
|
||||
maintenanceModeStartDate: 2023-08-28
|
||||
endOfLifeDate: 2023-10-28
|
||||
next:
|
||||
release: 1.25.15
|
||||
cherryPickDeadline: 2023-10-13
|
||||
targetDate: 2023-10-18
|
||||
previousPatches:
|
||||
- release: 1.25.15
|
||||
cherryPickDeadline: 2023-10-13
|
||||
targetDate: 2023-10-18
|
||||
- release: 1.25.14
|
||||
cherryPickDeadline: 2023-09-08
|
||||
targetDate: 2023-09-13
|
||||
- release: 1.25.13
|
||||
cherryPickDeadline: 2023-08-04
|
||||
targetDate: 2023-08-23
|
||||
- release: 1.25.12
|
||||
cherryPickDeadline: 2023-07-14
|
||||
targetDate: 2023-07-19
|
||||
- release: 1.25.11
|
||||
cherryPickDeadline: 2023-06-09
|
||||
targetDate: 2023-06-14
|
||||
- release: 1.25.10
|
||||
cherryPickDeadline: 2023-05-12
|
||||
targetDate: 2023-05-17
|
||||
- release: 1.25.9
|
||||
cherryPickDeadline: 2023-04-07
|
||||
targetDate: 2023-04-12
|
||||
- release: 1.25.8
|
||||
cherryPickDeadline: 2023-03-10
|
||||
targetDate: 2023-03-15
|
||||
- release: 1.25.7
|
||||
cherryPickDeadline: 2023-02-10
|
||||
targetDate: 2023-02-15
|
||||
note: >-
|
||||
[Some container images might be **unsigned** due to a temporary issue with the promotion process](https://groups.google.com/a/kubernetes.io/g/dev/c/MwSx761slM0/m/4ajkeUl0AQAJ)
|
||||
- release: 1.25.6
|
||||
cherryPickDeadline: 2023-01-13
|
||||
targetDate: 2023-01-18
|
||||
- release: 1.25.5
|
||||
cherryPickDeadline: 2022-12-02
|
||||
targetDate: 2022-12-08
|
||||
- release: 1.25.4
|
||||
cherryPickDeadline: 2022-11-04
|
||||
targetDate: 2022-11-09
|
||||
- release: 1.25.3
|
||||
cherryPickDeadline: 2022-10-07
|
||||
targetDate: 2022-10-12
|
||||
- release: 1.25.2
|
||||
cherryPickDeadline: 2022-09-20
|
||||
targetDate: 2022-09-21
|
||||
note: >-
|
||||
[Out-of-Band release to fix the regression introduced in 1.25.1](https://groups.google.com/a/kubernetes.io/g/dev/c/tA6LNOQTR4Q/m/zL73maPTAQAJ)
|
||||
- release: 1.25.1
|
||||
cherryPickDeadline: 2022-09-09
|
||||
targetDate: 2022-09-14
|
||||
note: >-
|
||||
[Regression](https://groups.google.com/a/kubernetes.io/g/dev/c/tA6LNOQTR4Q/m/zL73maPTAQAJ)
|
||||
- release: 1.25.0
|
||||
cherryPickDeadline: ""
|
||||
targetDate: 2022-08-23
|
||||
|
|
|
@ -24,7 +24,7 @@
|
|||
</form>
|
||||
</div>
|
||||
<div class="col-12 col-md-8 offset-md-2">
|
||||
<h2 class="ml-4">{{ .Title }}</h2>
|
||||
<h2 class="search-title ml-3">{{ .Title }}</h2>
|
||||
{{ if .Site.Params.gcs_engine_id }}
|
||||
<script>
|
||||
(function() {
|
||||
|
|
|
@ -1,5 +1,17 @@
|
|||
document.querySelector('html').classList.add('search');
|
||||
|
||||
document.addEventListener('DOMContentLoaded', function() {
|
||||
let searchTerm = new URLSearchParams(window.location.search).get('q');
|
||||
let fetchingElem = document.getElementById('bing-results-container');
|
||||
let searchTitle = document.querySelector('.search-title');
|
||||
|
||||
if (!searchTerm) {
|
||||
if (fetchingElem) fetchingElem.style.display = 'none';
|
||||
if (searchTitle) searchTitle.style.display = 'none';
|
||||
|
||||
}
|
||||
});
|
||||
|
||||
window.renderGoogleSearchResults = () => {
|
||||
var cx = '013288817511911618469:elfqqbqldzg';
|
||||
var gcse = document.createElement('script');
|
||||
|
@ -33,30 +45,36 @@
|
|||
}
|
||||
|
||||
window.renderBingSearchResults = () => {
|
||||
var searchTerm = window.location.search.split("=")[1].split("&")[0].replace(/%20/g,' '),
|
||||
page = window.location.search.split("=")[2],
|
||||
q = "site:kubernetes.io " + searchTerm;
|
||||
let urlParams = new URLSearchParams(window.location.search);
|
||||
let searchTerm = urlParams.get("q") || "";
|
||||
let page = urlParams.get("page") || 1;
|
||||
let q = searchTerm;
|
||||
let results = '';
|
||||
let offset = (page - 1) * 10;
|
||||
let ajaxConf = {};
|
||||
|
||||
page = (!page) ? 1 : page.split("&")[0];
|
||||
if (!searchTerm) return;
|
||||
|
||||
var results = '', pagination = '', offset = (page - 1) * 10, ajaxConf = {};
|
||||
ajaxConf.url = 'https://kubernetes-io-search.azurewebsites.net/api/bingsearchproxy';
|
||||
ajaxConf.data = { q: q, offset: offset };
|
||||
ajaxConf.type = "GET";
|
||||
|
||||
ajaxConf.url = 'https://api.cognitive.microsoft.com/bingcustomsearch/v7.0/search';
|
||||
ajaxConf.data = { q: q, offset: offset, customConfig: '320659264' };
|
||||
ajaxConf.type = "GET";
|
||||
ajaxConf.beforeSend = function(xhr){ xhr.setRequestHeader('Ocp-Apim-Subscription-Key', '51efd23677624e04b4abe921225ea7ec'); };
|
||||
$.ajax(ajaxConf).done(function(res) {
|
||||
if (res.status === 500) {
|
||||
console.log('Server Error');
|
||||
return;
|
||||
}
|
||||
|
||||
$.ajax(ajaxConf).done(function(res) {
|
||||
if (res.webPages == null) return; // If no result, 'webPages' is 'undefined'
|
||||
var paginationAnchors = window.getPaginationAnchors(Math.ceil(res.webPages.totalEstimatedMatches / 10));
|
||||
res.webPages.value.map(ob => { results += window.getResultMarkupString(ob); })
|
||||
if (res.webPages == null) return; // If no result, 'webPages' is 'undefined'
|
||||
var paginationAnchors = window.getPaginationAnchors(Math.ceil(res.webPages.totalEstimatedMatches / 10));
|
||||
res.webPages.value.map(ob => { results += window.getResultMarkupString(ob); })
|
||||
|
||||
if($('#bing-results-container').length > 0) $('#bing-results-container').html(results);
|
||||
if($('#bing-pagination-container').length > 0) $('#bing-pagination-container').html(paginationAnchors);
|
||||
});
|
||||
if($('#bing-results-container').length > 0) $('#bing-results-container').html(results);
|
||||
if($('#bing-pagination-container').length > 0) $('#bing-pagination-container').html(paginationAnchors);
|
||||
});
|
||||
}
|
||||
|
||||
//China Verification
|
||||
// China Verification.
|
||||
var path = "path=/;"
|
||||
d = new Date()
|
||||
d.setTime(d.getTime() + (7 * 24 * 60 * 60 * 1000))
|
||||
|
|
Loading…
Reference in New Issue