Merge branch 'kubernetes:main' into master
|
@ -11,7 +11,7 @@
|
|||
For overall help on editing and submitting pull requests, visit:
|
||||
https://kubernetes.io/docs/contribute/start/#improve-existing-content
|
||||
|
||||
Use the default base branch, “master”, if you're documenting existing
|
||||
Use the default base branch, “main”, if you're documenting existing
|
||||
features in the English localization.
|
||||
|
||||
If you're working on a different localization (not English), see
|
||||
|
|
9
Makefile
|
@ -6,8 +6,9 @@ NETLIFY_FUNC = $(NODE_BIN)/netlify-lambda
|
|||
# but this can be overridden when calling make, e.g.
|
||||
# CONTAINER_ENGINE=podman make container-image
|
||||
CONTAINER_ENGINE ?= docker
|
||||
IMAGE_REGISTRY ?= gcr.io/k8s-staging-sig-docs
|
||||
IMAGE_VERSION=$(shell scripts/hash-files.sh Dockerfile Makefile | cut -c 1-12)
|
||||
CONTAINER_IMAGE = kubernetes-hugo:v$(HUGO_VERSION)-$(IMAGE_VERSION)
|
||||
CONTAINER_IMAGE = $(IMAGE_REGISTRY)/k8s-website-hugo:v$(HUGO_VERSION)-$(IMAGE_VERSION)
|
||||
CONTAINER_RUN = $(CONTAINER_ENGINE) run --rm --interactive --tty --volume $(CURDIR):/src
|
||||
|
||||
CCRED=\033[0;31m
|
||||
|
@ -19,7 +20,11 @@ help: ## Show this help.
|
|||
@awk 'BEGIN {FS = ":.*?## "} /^[a-zA-Z_-]+:.*?## / {sub("\\\\n",sprintf("\n%22c"," "), $$2);printf "\033[36m%-20s\033[0m %s\n", $$1, $$2}' $(MAKEFILE_LIST)
|
||||
|
||||
module-check:
|
||||
@git submodule status --recursive | awk '/^[+-]/ {printf "\033[31mWARNING\033[0m Submodule not initialized: \033[34m%s\033[0m\n",$$2}' 1>&2
|
||||
@git submodule status --recursive | awk '/^[+-]/ {err = 1; printf "\033[31mWARNING\033[0m Submodule not initialized: \033[34m%s\033[0m\n",$$2} END { if (err != 0) print "You need to run \033[32mmake module-init\033[0m to initialize missing modules first"; exit err }' 1>&2
|
||||
|
||||
module-init:
|
||||
@echo "Initializing submodules..." 1>&2
|
||||
@git submodule update --init --recursive --depth 1
|
||||
|
||||
all: build ## Build site with production settings and put deliverables in ./public
|
||||
|
||||
|
|
|
@ -1,11 +1,9 @@
|
|||
aliases:
|
||||
sig-docs-blog-owners: # Approvers for blog content
|
||||
- castrojo
|
||||
- kbarnard10
|
||||
- onlydole
|
||||
- mrbobbytables
|
||||
sig-docs-blog-reviewers: # Reviewers for blog content
|
||||
- castrojo
|
||||
- kbarnard10
|
||||
- mrbobbytables
|
||||
- onlydole
|
||||
|
@ -31,9 +29,7 @@ aliases:
|
|||
- reylejano
|
||||
- savitharaghunathan
|
||||
- sftim
|
||||
- steveperry-53
|
||||
- tengqm
|
||||
- zparnold
|
||||
sig-docs-en-reviews: # PR reviews for English content
|
||||
- bradtopol
|
||||
- celestehorgan
|
||||
|
@ -44,9 +40,7 @@ aliases:
|
|||
- onlydole
|
||||
- rajeshdeshpande02
|
||||
- sftim
|
||||
- steveperry-53
|
||||
- tengqm
|
||||
- zparnold
|
||||
sig-docs-es-owners: # Admins for Spanish content
|
||||
- raelga
|
||||
- electrocucaracha
|
||||
|
@ -138,10 +132,11 @@ aliases:
|
|||
- ClaudiaJKang
|
||||
- gochist
|
||||
- ianychoi
|
||||
- seokho-son
|
||||
- ysyukr
|
||||
- jihoon-seo
|
||||
- pjhwa
|
||||
- seokho-son
|
||||
- yoonian
|
||||
- ysyukr
|
||||
sig-docs-leads: # Website chairs and tech leads
|
||||
- irvifa
|
||||
- jimangel
|
||||
|
@ -163,6 +158,7 @@ aliases:
|
|||
# zhangxiaoyu-zidif
|
||||
sig-docs-zh-reviews: # PR reviews for Chinese content
|
||||
- chenrui333
|
||||
- chenxuc
|
||||
- howieyuen
|
||||
- idealhack
|
||||
- pigletfly
|
||||
|
@ -235,10 +231,12 @@ aliases:
|
|||
- parispittman
|
||||
# authoritative source: https://git.k8s.io/sig-release/OWNERS_ALIASES
|
||||
sig-release-leads:
|
||||
- cpanato # SIG Technical Lead
|
||||
- hasheddan # SIG Technical Lead
|
||||
- jeremyrickard # SIG Technical Lead
|
||||
- justaugustus # SIG Chair
|
||||
- LappleApple # SIG Program Manager
|
||||
- puerco # SIG Technical Lead
|
||||
- saschagrunert # SIG Chair
|
||||
release-engineering-approvers:
|
||||
- cpanato # Release Manager
|
||||
|
@ -250,10 +248,11 @@ aliases:
|
|||
release-engineering-reviewers:
|
||||
- ameukam # Release Manager Associate
|
||||
- jimangel # Release Manager Associate
|
||||
- markyjackson-taulia # Release Manager Associate
|
||||
- mkorbi # Release Manager Associate
|
||||
- palnabarun # Release Manager Associate
|
||||
- onlydole # Release Manager Associate
|
||||
- sethmccombs # Release Manager Associate
|
||||
- thejoycekung # Release Manager Associate
|
||||
- verolop # Release Manager Associate
|
||||
- wilsonehusin # Release Manager Associate
|
||||
- wilsonehusin # Release Manager Associate
|
|
@ -1,6 +1,6 @@
|
|||
# Kubernetesのドキュメント
|
||||
|
||||
[![Netlify Status](https://api.netlify.com/api/v1/badges/be93b718-a6df-402a-b4a4-855ba186c97d/deploy-status)](https://app.netlify.com/sites/kubernetes-io-master-staging/deploys) [![GitHub release](https://img.shields.io/github/release/kubernetes/website.svg)](https://github.com/kubernetes/website/releases/latest)
|
||||
[![Netlify Status](https://api.netlify.com/api/v1/badges/be93b718-a6df-402a-b4a4-855ba186c97d/deploy-status)](https://app.netlify.com/sites/kubernetes-io-main-staging/deploys) [![GitHub release](https://img.shields.io/github/release/kubernetes/website.svg)](https://github.com/kubernetes/website/releases/latest)
|
||||
|
||||
このリポジトリには、[KubernetesのWebサイトとドキュメント](https://kubernetes.io/)をビルドするために必要な全アセットが格納されています。貢献に興味を持っていただきありがとうございます!
|
||||
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
# 쿠버네티스 문서화
|
||||
|
||||
[![Netlify Status](https://api.netlify.com/api/v1/badges/be93b718-a6df-402a-b4a4-855ba186c97d/deploy-status)](https://app.netlify.com/sites/kubernetes-io-master-staging/deploys) [![GitHub release](https://img.shields.io/github/release/kubernetes/website.svg)](https://github.com/kubernetes/website/releases/latest)
|
||||
[![Netlify Status](https://api.netlify.com/api/v1/badges/be93b718-a6df-402a-b4a4-855ba186c97d/deploy-status)](https://app.netlify.com/sites/kubernetes-io-main-staging/deploys) [![GitHub release](https://img.shields.io/github/release/kubernetes/website.svg)](https://github.com/kubernetes/website/releases/latest)
|
||||
|
||||
이 저장소에는 [쿠버네티스 웹사이트 및 문서](https://kubernetes.io/)를 빌드하는 데 필요한 자산이 포함되어 있습니다. 기여해주셔서 감사합니다!
|
||||
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
# Dokumentacja projektu Kubernetes
|
||||
|
||||
[![Netlify Status](https://api.netlify.com/api/v1/badges/be93b718-a6df-402a-b4a4-855ba186c97d/deploy-status)](https://app.netlify.com/sites/kubernetes-io-master-staging/deploys) [![GitHub release](https://img.shields.io/github/release/kubernetes/website.svg)](https://github.com/kubernetes/website/releases/latest)
|
||||
[![Netlify Status](https://api.netlify.com/api/v1/badges/be93b718-a6df-402a-b4a4-855ba186c97d/deploy-status)](https://app.netlify.com/sites/kubernetes-io-main-staging/deploys) [![GitHub release](https://img.shields.io/github/release/kubernetes/website.svg)](https://github.com/kubernetes/website/releases/latest)
|
||||
|
||||
W tym repozytorium znajdziesz wszystko, czego potrzebujesz do zbudowania [strony internetowej Kubernetesa wraz z dokumentacją](https://kubernetes.io/). Bardzo nam miło, że chcesz wziąć udział w jej współtworzeniu!
|
||||
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
# A documentação do Kubernetes
|
||||
|
||||
[![Netlify Status](https://api.netlify.com/api/v1/badges/be93b718-a6df-402a-b4a4-855ba186c97d/deploy-status)](https://app.netlify.com/sites/kubernetes-io-master-staging/deploys) [![GitHub release](https://img.shields.io/github/release/kubernetes/website.svg)](https://github.com/kubernetes/website/releases/latest)
|
||||
[![Netlify Status](https://api.netlify.com/api/v1/badges/be93b718-a6df-402a-b4a4-855ba186c97d/deploy-status)](https://app.netlify.com/sites/kubernetes-io-main-staging/deploys) [![GitHub release](https://img.shields.io/github/release/kubernetes/website.svg)](https://github.com/kubernetes/website/releases/latest)
|
||||
|
||||
Bem-vindos! Este repositório contém todos os recursos necessários para criar o [website e documentação do Kubernetes](https://kubernetes.io/). Estamos muito satisfeitos por você querer contribuir!
|
||||
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
<!-- # The Kubernetes documentation -->
|
||||
# Документація Kubernetes
|
||||
|
||||
[![Netlify Status](https://api.netlify.com/api/v1/badges/be93b718-a6df-402a-b4a4-855ba186c97d/deploy-status)](https://app.netlify.com/sites/kubernetes-io-master-staging/deploys) [![GitHub release](https://img.shields.io/github/release/kubernetes/website.svg)](https://github.com/kubernetes/website/releases/latest)
|
||||
[![Netlify Status](https://api.netlify.com/api/v1/badges/be93b718-a6df-402a-b4a4-855ba186c97d/deploy-status)](https://app.netlify.com/sites/kubernetes-io-main-staging/deploys) [![GitHub release](https://img.shields.io/github/release/kubernetes/website.svg)](https://github.com/kubernetes/website/releases/latest)
|
||||
|
||||
<!-- This repository contains the assets required to build the [Kubernetes website and documentation](https://kubernetes.io/). We're glad that you want to contribute! -->
|
||||
Вітаємо! В цьому репозиторії міститься все необхідне для роботи над [сайтом і документацією Kubernetes](https://kubernetes.io/). Ми щасливі, що ви хочете зробити свій внесок!
|
||||
|
|
|
@ -4,7 +4,7 @@
|
|||
# The Kubernetes documentation
|
||||
-->
|
||||
|
||||
[![Netlify Status](https://api.netlify.com/api/v1/badges/be93b718-a6df-402a-b4a4-855ba186c97d/deploy-status)](https://app.netlify.com/sites/kubernetes-io-master-staging/deploys) [![GitHub release](https://img.shields.io/github/release/kubernetes/website.svg)](https://github.com/kubernetes/website/releases/latest)
|
||||
[![Netlify Status](https://api.netlify.com/api/v1/badges/be93b718-a6df-402a-b4a4-855ba186c97d/deploy-status)](https://app.netlify.com/sites/kubernetes-io-main-staging/deploys) [![GitHub release](https://img.shields.io/github/release/kubernetes/website.svg)](https://github.com/kubernetes/website/releases/latest)
|
||||
|
||||
<!--
|
||||
This repository contains the assets required to build the [Kubernetes website and documentation](https://kubernetes.io/). We're glad that you want to contribute!
|
||||
|
|
108
README.md
|
@ -1,13 +1,13 @@
|
|||
# The Kubernetes documentation
|
||||
|
||||
[![Netlify Status](https://api.netlify.com/api/v1/badges/be93b718-a6df-402a-b4a4-855ba186c97d/deploy-status)](https://app.netlify.com/sites/kubernetes-io-master-staging/deploys) [![GitHub release](https://img.shields.io/github/release/kubernetes/website.svg)](https://github.com/kubernetes/website/releases/latest)
|
||||
[![Netlify Status](https://api.netlify.com/api/v1/badges/be93b718-a6df-402a-b4a4-855ba186c97d/deploy-status)](https://app.netlify.com/sites/kubernetes-io-main-staging/deploys) [![GitHub release](https://img.shields.io/github/release/kubernetes/website.svg)](https://github.com/kubernetes/website/releases/latest)
|
||||
|
||||
This repository contains the assets required to build the [Kubernetes website and documentation](https://kubernetes.io/). We're glad that you want to contribute!
|
||||
|
||||
+ [Contributing to the docs](#contributing-to-the-docs)
|
||||
+ [Localization ReadMes](#localization-readmemds)
|
||||
- [Contributing to the docs](#contributing-to-the-docs)
|
||||
- [Localization ReadMes](#localization-readmemds)
|
||||
|
||||
# Using this repository
|
||||
## Using this repository
|
||||
|
||||
You can run the website locally using Hugo (Extended version), or you can run it in a container runtime. We strongly recommend using the container runtime, as it gives deployment consistency with the live website.
|
||||
|
||||
|
@ -22,14 +22,14 @@ To use this repository, you need the following installed locally:
|
|||
|
||||
Before you start, install the dependencies. Clone the repository and navigate to the directory:
|
||||
|
||||
```
|
||||
```bash
|
||||
git clone https://github.com/kubernetes/website.git
|
||||
cd website
|
||||
```
|
||||
|
||||
The Kubernetes website uses the [Docsy Hugo theme](https://github.com/google/docsy#readme). Even if you plan to run the website in a container, we strongly recommend pulling in the submodule and other development dependencies by running the following:
|
||||
|
||||
```
|
||||
```bash
|
||||
# pull in the Docsy submodule
|
||||
git submodule update --init --recursive --depth 1
|
||||
```
|
||||
|
@ -38,14 +38,14 @@ git submodule update --init --recursive --depth 1
|
|||
|
||||
To build the site in a container, run the following to build the container image and run it:
|
||||
|
||||
```
|
||||
```bash
|
||||
make container-image
|
||||
make container-serve
|
||||
```
|
||||
|
||||
If you see errors, it probably means that the hugo container did not have enough computing resources available. To solve it, increase the amount of allowed CPU and memory usage for Docker on your machine ([MacOSX](https://docs.docker.com/docker-for-mac/#resources) and [Windows](https://docs.docker.com/docker-for-windows/#resources)).
|
||||
|
||||
Open up your browser to http://localhost:1313 to view the website. As you make changes to the source files, Hugo updates the website and forces a browser refresh.
|
||||
Open up your browser to <http://localhost:1313> to view the website. As you make changes to the source files, Hugo updates the website and forces a browser refresh.
|
||||
|
||||
## Running the website locally using Hugo
|
||||
|
||||
|
@ -59,54 +59,55 @@ npm ci
|
|||
make serve
|
||||
```
|
||||
|
||||
This will start the local Hugo server on port 1313. Open up your browser to http://localhost:1313 to view the website. As you make changes to the source files, Hugo updates the website and forces a browser refresh.
|
||||
This will start the local Hugo server on port 1313. Open up your browser to <http://localhost:1313> to view the website. As you make changes to the source files, Hugo updates the website and forces a browser refresh.
|
||||
|
||||
## Building the API reference pages
|
||||
|
||||
The API reference pages located in `content/en/docs/reference/kubernetes-api` are built from the Swagger specification, using https://github.com/kubernetes-sigs/reference-docs/tree/master/gen-resourcesdocs.
|
||||
The API reference pages located in `content/en/docs/reference/kubernetes-api` are built from the Swagger specification, using <https://github.com/kubernetes-sigs/reference-docs/tree/master/gen-resourcesdocs>.
|
||||
|
||||
To update the reference pages for a new Kubernetes release (replace v1.20 in the following examples with the release to update to):
|
||||
|
||||
1. Pull the `kubernetes-resources-reference` submodule:
|
||||
|
||||
```
|
||||
git submodule update --init --recursive --depth 1
|
||||
```
|
||||
```bash
|
||||
git submodule update --init --recursive --depth 1
|
||||
```
|
||||
|
||||
2. Create a new API revision into the submodule, and add the Swagger specification:
|
||||
|
||||
```
|
||||
mkdir api-ref-generator/gen-resourcesdocs/api/v1.20
|
||||
curl 'https://raw.githubusercontent.com/kubernetes/kubernetes/master/api/openapi-spec/swagger.json' > api-ref-generator/gen-resourcesdocs/api/v1.20/swagger.json
|
||||
```
|
||||
```bash
|
||||
mkdir api-ref-generator/gen-resourcesdocs/api/v1.20
|
||||
curl 'https://raw.githubusercontent.com/kubernetes/kubernetes/master/api/openapi-spec/swagger.json' > api-ref-generator/gen-resourcesdocs/api/v1.20/swagger.json
|
||||
```
|
||||
|
||||
3. Copy the table of contents and fields configuration for the new release from a previous one:
|
||||
|
||||
```
|
||||
mkdir api-ref-generator/gen-resourcesdocs/api/v1.20
|
||||
cp api-ref-generator/gen-resourcesdocs/api/v1.19/* api-ref-generator/gen-resourcesdocs/api/v1.20/
|
||||
```
|
||||
```bash
|
||||
mkdir api-ref-generator/gen-resourcesdocs/api/v1.20
|
||||
cp api-ref-generator/gen-resourcesdocs/api/v1.19/* api-ref-generator/gen-resourcesdocs/api/v1.20/
|
||||
```
|
||||
|
||||
4. Adapt the files `toc.yaml` and `fields.yaml` to reflect the changes between the two releases
|
||||
|
||||
5. Next, build the pages:
|
||||
|
||||
```
|
||||
make api-reference
|
||||
```
|
||||
```bash
|
||||
make api-reference
|
||||
```
|
||||
|
||||
You can test the results locally by making and serving the site from a container image:
|
||||
You can test the results locally by making and serving the site from a container image:
|
||||
|
||||
```
|
||||
make container-image
|
||||
make container-serve
|
||||
```
|
||||
```bash
|
||||
make container-image
|
||||
make container-serve
|
||||
```
|
||||
|
||||
In a web browser, go to http://localhost:1313/docs/reference/kubernetes-api/ to view the API reference.
|
||||
In a web browser, go to <http://localhost:1313/docs/reference/kubernetes-api/> to view the API reference.
|
||||
|
||||
6. When all changes of the new contract are reflected into the configuration files `toc.yaml` and `fields.yaml`, create a Pull Request with the newly generated API reference pages.
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### error: failed to transform resource: TOCSS: failed to transform "scss/main.scss" (text/x-scss): this feature is not available in your current Hugo version
|
||||
|
||||
Hugo is shipped in two set of binaries for technical reasons. The current website runs based on the **Hugo Extended** version only. In the [release page](https://github.com/gohugoio/hugo/releases) look for archives with `extended` in the name. To confirm, run `hugo version` and look for the word `extended`.
|
||||
|
@ -115,7 +116,7 @@ Hugo is shipped in two set of binaries for technical reasons. The current websit
|
|||
|
||||
If you run `make serve` on macOS and receive the following error:
|
||||
|
||||
```
|
||||
```bash
|
||||
ERROR 2020/08/01 19:09:18 Error: listen tcp 127.0.0.1:1313: socket: too many open files
|
||||
make: *** [serve] Error 1
|
||||
```
|
||||
|
@ -124,7 +125,7 @@ Try checking the current limit for open files:
|
|||
|
||||
`launchctl limit maxfiles`
|
||||
|
||||
Then run the following commands (adapted from https://gist.github.com/tombigel/d503800a282fcadbee14b537735d202c):
|
||||
Then run the following commands (adapted from <https://gist.github.com/tombigel/d503800a282fcadbee14b537735d202c>):
|
||||
|
||||
```shell
|
||||
#!/bin/sh
|
||||
|
@ -147,8 +148,7 @@ sudo launchctl load -w /Library/LaunchDaemons/limit.maxfiles.plist
|
|||
|
||||
This works for Catalina as well as Mojave macOS.
|
||||
|
||||
|
||||
# Get involved with SIG Docs
|
||||
## Get involved with SIG Docs
|
||||
|
||||
Learn more about SIG Docs Kubernetes community and meetings on the [community page](https://github.com/kubernetes/community/tree/master/sig-docs#meetings).
|
||||
|
||||
|
@ -157,39 +157,39 @@ You can also reach the maintainers of this project at:
|
|||
- [Slack](https://kubernetes.slack.com/messages/sig-docs) [Get an invite for this Slack](https://slack.k8s.io/)
|
||||
- [Mailing List](https://groups.google.com/forum/#!forum/kubernetes-sig-docs)
|
||||
|
||||
# Contributing to the docs
|
||||
## Contributing to the docs
|
||||
|
||||
You can click the **Fork** button in the upper-right area of the screen to create a copy of this repository in your GitHub account. This copy is called a *fork*. Make any changes you want in your fork, and when you are ready to send those changes to us, go to your fork and create a new pull request to let us know about it.
|
||||
You can click the **Fork** button in the upper-right area of the screen to create a copy of this repository in your GitHub account. This copy is called a _fork_. Make any changes you want in your fork, and when you are ready to send those changes to us, go to your fork and create a new pull request to let us know about it.
|
||||
|
||||
Once your pull request is created, a Kubernetes reviewer will take responsibility for providing clear, actionable feedback. As the owner of the pull request, **it is your responsibility to modify your pull request to address the feedback that has been provided to you by the Kubernetes reviewer.**
|
||||
Once your pull request is created, a Kubernetes reviewer will take responsibility for providing clear, actionable feedback. As the owner of the pull request, **it is your responsibility to modify your pull request to address the feedback that has been provided to you by the Kubernetes reviewer.**
|
||||
|
||||
Also, note that you may end up having more than one Kubernetes reviewer provide you feedback or you may end up getting feedback from a Kubernetes reviewer that is different than the one initially assigned to provide you feedback.
|
||||
|
||||
Furthermore, in some cases, one of your reviewers might ask for a technical review from a Kubernetes tech reviewer when needed. Reviewers will do their best to provide feedback in a timely fashion but response time can vary based on circumstances.
|
||||
Furthermore, in some cases, one of your reviewers might ask for a technical review from a Kubernetes tech reviewer when needed. Reviewers will do their best to provide feedback in a timely fashion but response time can vary based on circumstances.
|
||||
|
||||
For more information about contributing to the Kubernetes documentation, see:
|
||||
|
||||
* [Contribute to Kubernetes docs](https://kubernetes.io/docs/contribute/)
|
||||
* [Page Content Types](https://kubernetes.io/docs/contribute/style/page-content-types/)
|
||||
* [Documentation Style Guide](https://kubernetes.io/docs/contribute/style/style-guide/)
|
||||
* [Localizing Kubernetes Documentation](https://kubernetes.io/docs/contribute/localization/)
|
||||
- [Contribute to Kubernetes docs](https://kubernetes.io/docs/contribute/)
|
||||
- [Page Content Types](https://kubernetes.io/docs/contribute/style/page-content-types/)
|
||||
- [Documentation Style Guide](https://kubernetes.io/docs/contribute/style/style-guide/)
|
||||
- [Localizing Kubernetes Documentation](https://kubernetes.io/docs/contribute/localization/)
|
||||
|
||||
# Localization `README.md`'s
|
||||
## Localization `README.md`'s
|
||||
|
||||
| Language | Language |
|
||||
|---|---|
|
||||
|[Chinese](README-zh.md)|[Korean](README-ko.md)|
|
||||
|[French](README-fr.md)|[Polish](README-pl.md)|
|
||||
|[German](README-de.md)|[Portuguese](README-pt.md)|
|
||||
|[Hindi](README-hi.md)|[Russian](README-ru.md)|
|
||||
|[Indonesian](README-id.md)|[Spanish](README-es.md)|
|
||||
|[Italian](README-it.md)|[Ukrainian](README-uk.md)|
|
||||
|[Japanese](README-ja.md)|[Vietnamese](README-vi.md)|
|
||||
| Language | Language |
|
||||
| -------------------------- | -------------------------- |
|
||||
| [Chinese](README-zh.md) | [Korean](README-ko.md) |
|
||||
| [French](README-fr.md) | [Polish](README-pl.md) |
|
||||
| [German](README-de.md) | [Portuguese](README-pt.md) |
|
||||
| [Hindi](README-hi.md) | [Russian](README-ru.md) |
|
||||
| [Indonesian](README-id.md) | [Spanish](README-es.md) |
|
||||
| [Italian](README-it.md) | [Ukrainian](README-uk.md) |
|
||||
| [Japanese](README-ja.md) | [Vietnamese](README-vi.md) |
|
||||
|
||||
# Code of conduct
|
||||
## Code of conduct
|
||||
|
||||
Participation in the Kubernetes community is governed by the [CNCF Code of Conduct](https://github.com/cncf/foundation/blob/master/code-of-conduct.md).
|
||||
|
||||
# Thank you!
|
||||
## Thank you
|
||||
|
||||
Kubernetes thrives on community participation, and we appreciate your contributions to our website and our documentation!
|
||||
|
|
|
@ -0,0 +1,61 @@
|
|||
---
|
||||
layout: blog
|
||||
title: "{{ replace .Name "-" " " | title }}"
|
||||
date: {{ .Date }}
|
||||
draft: true
|
||||
slug: <seo-friendly-version-of-title-separated-by-dashes>
|
||||
---
|
||||
|
||||
**Author:** <your name> (<your organization name>), <another author's name> (<their organization>)
|
||||
|
||||
<!--
|
||||
Instructions:
|
||||
- Replace these instructions and the following text with your content.
|
||||
- Replace `<angle bracket placeholders>` with actual values. For example, you would update `date: <yyyy>-<mm>-<dd>` to look something like `date: 2021-10-21`.
|
||||
- For convenience, use third-party tools to author and collaborate on your content.
|
||||
- To save time and effort in reviews, check your content's spelling, grammar, and style before contributing.
|
||||
- Feel free to ask for assistance in the Kubernetes Slack channel, [#sig-docs-blog](https://kubernetes.slack.com/archives/CJDHVD54J).
|
||||
-->
|
||||
|
||||
Replace this first line of your content with one to three sentences that summarize the blog post.
|
||||
|
||||
## This is a section heading
|
||||
|
||||
To help the reader, organize your content into sections that contain about three to six paragraphs.
|
||||
|
||||
If you're documenting commands, separate the commands from the outputs, like this:
|
||||
|
||||
1. Verify that the Secret exists by running the following command:
|
||||
|
||||
```shell
|
||||
kubectl get secrets
|
||||
```
|
||||
|
||||
The response should be like this:
|
||||
|
||||
```shell
|
||||
NAME TYPE DATA AGE
|
||||
mysql-pass-c57bb4t7mf Opaque 1 9s
|
||||
```
|
||||
|
||||
You're free to create any sections you like. Below are a few common patterns we see at the end of blog posts.
|
||||
|
||||
## What’s next?
|
||||
|
||||
This optional section describes the future of the thing you've just described in the post.
|
||||
|
||||
## How can I learn more?
|
||||
|
||||
This optional section provides links to more information. Please avoid promoting and over-represent your organization.
|
||||
|
||||
## How do I get involved?
|
||||
|
||||
An optional section that links to resources for readers to get involved, and acknowledgments of individual contributors, such as:
|
||||
|
||||
* [The name of a channel on Slack, #a-channel](https://<a-workspace>.slack.com/messages/<a-channel>)
|
||||
|
||||
* [A link to a "contribute" page with more information](<https://github.com/kubernetes/community/blob/master/sig-storage/README.md#contact>).
|
||||
|
||||
* Acknowledgements and thanks to the contributors. <person's name> ([<github id>](https://github.com/<github id>)) who did X, Y, and Z.
|
||||
|
||||
* Those interested in getting involved with the design and development of <project>, join the [<name of the SIG>](https://github.com/project/community/tree/master/<sig-group>). We’re rapidly growing and always welcome new contributors.
|
|
@ -0,0 +1,25 @@
|
|||
# See https://cloud.google.com/cloud-build/docs/build-config
|
||||
|
||||
# this must be specified in seconds. If omitted, defaults to 600s (10 mins)
|
||||
timeout: 1200s
|
||||
# this prevents errors if you don't use both _GIT_TAG and _PULL_BASE_REF,
|
||||
# or any new substitutions added in the future.
|
||||
options:
|
||||
substitution_option: ALLOW_LOOSE
|
||||
steps:
|
||||
# It's fine to bump the tag to a recent version, as needed
|
||||
- name: "gcr.io/k8s-testimages/gcb-docker-gcloud:v20190906-745fed4"
|
||||
entrypoint: make
|
||||
env:
|
||||
- DOCKER_CLI_EXPERIMENTAL=enabled
|
||||
- TAG=$_GIT_TAG
|
||||
- BASE_REF=$_PULL_BASE_REF
|
||||
args:
|
||||
- container-image
|
||||
substitutions:
|
||||
# _GIT_TAG will be filled with a git-based tag for the image, of the form vYYYYMMDD-hash, and
|
||||
# can be used as a substitution
|
||||
_GIT_TAG: "12345"
|
||||
# _PULL_BASE_REF will contain the ref that was pushed to to trigger this build -
|
||||
# a branch like 'master' or 'release-0.2', or a tag like 'v0.2'.
|
||||
_PULL_BASE_REF: "master"
|
|
@ -26,7 +26,7 @@ Die Add-Ons in den einzelnen Kategorien sind alphabetisch sortiert - Die Reihenf
|
|||
* [CNI-Genie](https://github.com/Huawei-PaaS/CNI-Genie) ermöglicht das nahtlose Verbinden von Kubernetes mit einer Reihe an CNI-Plugins wie z.B. Calico, Canal, Flannel, Romana, oder Weave.
|
||||
* [Contiv](http://contiv.github.io) bietet konfigurierbares Networking (Native L3 auf BGP, Overlay mit vxlan, Klassisches L2, Cisco-SDN/ACI) für verschiedene Anwendungszwecke und auch umfangreiches Policy-Framework. Das Contiv-Projekt ist vollständig [Open Source](http://github.com/contiv). Der [installer](http://github.com/contiv/install) bietet sowohl kubeadm als auch nicht-kubeadm basierte Installationen.
|
||||
* [Contrail](http://www.juniper.net/us/en/products-services/sdn/contrail/contrail-networking/), basierend auf [Tungsten Fabric](https://tungsten.io), ist eine Open Source, multi-Cloud Netzwerkvirtualisierungs- und Policy-Management Plattform. Contrail und Tungsten Fabric sind mit Orechstratoren wie z.B. Kubernetes, OpenShift, OpenStack und Mesos integriert und bieten Isolationsmodi für Virtuelle Maschinen, Container (bzw. Pods) und Bare Metal workloads.
|
||||
* [Flannel](https://github.com/coreos/flannel/blob/master/Documentation/kubernetes.md) ist ein Overlay-Network-Provider der mit Kubernetes genutzt werden kann.
|
||||
* [Flannel](https://github.com/flannel-io/flannel#deploying-flannel-manually) ist ein Overlay-Network-Provider der mit Kubernetes genutzt werden kann.
|
||||
* [Knitter](https://github.com/ZTE/Knitter/) ist eine Network-Lösung die Mehrfach-Network in Kubernetes ermöglicht.
|
||||
* [Multus](https://github.com/Intel-Corp/multus-cni) ist ein Multi-Plugin für Mehrfachnetzwerk-Unterstützung um alle CNI-Plugins (z.B. Calico, Cilium, Contiv, Flannel), zusätzlich zu SRIOV-, DPDK-, OVS-DPDK- und VPP-Basierten Workloads in Kubernetes zu unterstützen.
|
||||
* [NSX-T](https://docs.vmware.com/en/VMware-NSX-T/2.0/nsxt_20_ncp_kubernetes.pdf) Container Plug-in (NCP) bietet eine Integration zwischen VMware NSX-T und einem Orchestator wie z.B. Kubernetes. Außerdem bietet es eine Integration zwischen NSX-T und Containerbasierten CaaS/PaaS-Plattformen wie z.B. Pivotal Container Service (PKS) und OpenShift.
|
||||
|
|
|
@ -43,12 +43,12 @@ Kubernetes is open source giving you the freedom to take advantage of on-premise
|
|||
<button id="desktopShowVideoButton" onclick="kub.showVideo()">Watch Video</button>
|
||||
<br>
|
||||
<br>
|
||||
<a href="https://events.linuxfoundation.org/kubecon-cloudnativecon-north-america/?utm_source=kubernetes.io&utm_medium=nav&utm_campaign=kccncna20" button id="desktopKCButton">Attend KubeCon NA virtually on November 17-20, 2020</a>
|
||||
<a href="https://events.linuxfoundation.org/kubecon-cloudnativecon-north-america/?utm_source=kubernetes.io&utm_medium=nav&utm_campaign=kccncna21" button id="desktopKCButton">Attend KubeCon North America on October 11-15, 2021</a>
|
||||
<br>
|
||||
<br>
|
||||
<br>
|
||||
<br>
|
||||
<a href="https://events.linuxfoundation.org/kubecon-cloudnativecon-europe/?utm_source=kubernetes.io&utm_medium=nav&utm_campaign=kccnceu21" button id="desktopKCButton">Attend KubeCon EU virtually on May 4 – 7, 2021</a>
|
||||
<a href="https://events.linuxfoundation.org/kubecon-cloudnativecon-europe-2022/?utm_source=kubernetes.io&utm_medium=nav&utm_campaign=kccnceu22" button id="desktopKCButton">Attend KubeCon Europe on May 17-20, 2022</a>
|
||||
</div>
|
||||
<div id="videoPlayer">
|
||||
<iframe data-url="https://www.youtube.com/embed/H06qrNmGqyE?autoplay=1" frameborder="0" allowfullscreen></iframe>
|
||||
|
@ -58,4 +58,4 @@ Kubernetes is open source giving you the freedom to take advantage of on-premise
|
|||
|
||||
{{< blocks/kubernetes-features >}}
|
||||
|
||||
{{< blocks/case-studies >}}
|
||||
{{< blocks/case-studies >}}
|
||||
|
|
|
@ -140,7 +140,7 @@ The local persistent volume beta feature is not complete by far. Some notable en
|
|||
|
||||
## Complementary features
|
||||
|
||||
[Pod priority and preemption](/docs/concepts/configuration/pod-priority-preemption/) is another Kubernetes feature that is complementary to local persistent volumes. When your application uses local storage, it must be scheduled to the specific node where the local volume resides. You can give your local storage workload high priority so if that node ran out of room to run your workload, Kubernetes can preempt lower priority workloads to make room for it.
|
||||
[Pod priority and preemption](/docs/concepts/scheduling-eviction/pod-priority-preemption/) is another Kubernetes feature that is complementary to local persistent volumes. When your application uses local storage, it must be scheduled to the specific node where the local volume resides. You can give your local storage workload high priority so if that node ran out of room to run your workload, Kubernetes can preempt lower priority workloads to make room for it.
|
||||
|
||||
[Pod disruption budget](/docs/concepts/workloads/pods/disruptions/) is also very important for those workloads that must maintain quorum. Setting a disruption budget for your workload ensures that it does not drop below quorum due to voluntary disruption events, such as node drains during upgrade.
|
||||
|
||||
|
|
|
@ -94,7 +94,7 @@ JOSH BERKUS: That goes into release notes. I mean, keep in mind that one of the
|
|||
|
||||
However, stuff happens, and we do occasionally have to do those. And so far, our main way to identify that to people actually is in the release notes. If you look at [the current release notes](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG-1.11.md#no-really-you-must-do-this-before-you-upgrade), there are actually two things in there right now that are sort of breaking changes.
|
||||
|
||||
One of them is the bit with [priority and preemption](/docs/concepts/configuration/pod-priority-preemption/) in that preemption being on by default now allows badly behaved users of the system to cause trouble in new ways. I'd actually have to look at the release notes to see what the second one was...
|
||||
One of them is the bit with [priority and preemption](/docs/concepts/scheduling-eviction/pod-priority-preemption/) in that preemption being on by default now allows badly behaved users of the system to cause trouble in new ways. I'd actually have to look at the release notes to see what the second one was...
|
||||
|
||||
TIM PEPPER: The [JSON capitalization case sensitivity](https://github.com/kubernetes/kubernetes/issues/64612).
|
||||
|
||||
|
|
|
@ -104,7 +104,7 @@ Master and Worker nodes should be protected from overload and resource exhaustio
|
|||
|
||||
Resource consumption by the control plane will correlate with the number of pods and the pod churn rate. Very large and very small clusters will benefit from non-default [settings](/docs/reference/command-line-tools-reference/kube-apiserver/) of kube-apiserver request throttling and memory. Having these too high can lead to request limit exceeded and out of memory errors.
|
||||
|
||||
On worker nodes, [Node Allocatable](/docs/tasks/administer-cluster/reserve-compute-resources/) should be configured based on a reasonable supportable workload density at each node. Namespaces can be created to subdivide the worker node cluster into multiple virtual clusters with resource CPU and memory [quotas](/docs/tasks/administer-cluster/manage-resources/memory-default-namespace/). Kubelet handling of [out of resource](/docs/tasks/administer-cluster/out-of-resource/) conditions can be configured.
|
||||
On worker nodes, [Node Allocatable](/docs/tasks/administer-cluster/reserve-compute-resources/) should be configured based on a reasonable supportable workload density at each node. Namespaces can be created to subdivide the worker node cluster into multiple virtual clusters with resource CPU and memory [quotas](/docs/tasks/administer-cluster/manage-resources/memory-default-namespace/). Kubelet handling of [out of resource](/docs/concepts/scheduling-eviction/node-pressure-eviction/) conditions can be configured.
|
||||
|
||||
## Security
|
||||
|
||||
|
@ -166,7 +166,7 @@ Some critical state is held outside etcd. Certificates, container images, and ot
|
|||
* Cloud provider specific account and configuration data
|
||||
|
||||
## Considerations for your production workloads
|
||||
Anti-affinity specifications can be used to split clustered services across backing hosts, but at this time the settings are used only when the pod is scheduled. This means that Kubernetes can restart a failed node of your clustered application, but does not have a native mechanism to rebalance after a fail back. This is a topic worthy of a separate blog, but supplemental logic might be useful to achieve optimal workload placements after host or worker node recoveries or expansions. The [Pod Priority and Preemption feature](/docs/concepts/configuration/pod-priority-preemption/) can be used to specify a preferred triage in the event of resource shortages caused by failures or bursting workloads.
|
||||
Anti-affinity specifications can be used to split clustered services across backing hosts, but at this time the settings are used only when the pod is scheduled. This means that Kubernetes can restart a failed node of your clustered application, but does not have a native mechanism to rebalance after a fail back. This is a topic worthy of a separate blog, but supplemental logic might be useful to achieve optimal workload placements after host or worker node recoveries or expansions. The [Pod Priority and Preemption feature](/docs/concepts/scheduling-eviction/pod-priority-preemption/) can be used to specify a preferred triage in the event of resource shortages caused by failures or bursting workloads.
|
||||
|
||||
For stateful services, external attached volume mounts are the standard Kubernetes recommendation for a non-clustered service (e.g., a typical SQL database). At this time Kubernetes managed snapshots of these external volumes is in the category of a [roadmap feature request](https://docs.google.com/presentation/d/1dgxfnroRAu0aF67s-_bmeWpkM1h2LCxe6lB1l1oS0EQ/edit#slide=id.g3ca07c98c2_0_47), likely to align with the Container Storage Interface (CSI) integration. Thus performing backups of such a service would involve application specific, in-pod activity that is beyond the scope of this document. While awaiting better Kubernetes support for a snapshot and backup workflow, running your database service in a VM rather than a container, and exposing it to your Kubernetes workload may be worth considering.
|
||||
|
||||
|
|
|
@ -8,7 +8,7 @@ date: 2019-04-16
|
|||
|
||||
Kubernetes is well-known for running scalable workloads. It scales your workloads based on their resource usage. When a workload is scaled up, more instances of the application get created. When the application is critical for your product, you want to make sure that these new instances are scheduled even when your cluster is under resource pressure. One obvious solution to this problem is to over-provision your cluster resources to have some amount of slack resources available for scale-up situations. This approach often works, but costs more as you would have to pay for the resources that are idle most of the time.
|
||||
|
||||
[Pod priority and preemption](https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/) is a scheduler feature made generally available in Kubernetes 1.14 that allows you to achieve high levels of scheduling confidence for your critical workloads without overprovisioning your clusters. It also provides a way to improve resource utilization in your clusters without sacrificing the reliability of your essential workloads.
|
||||
[Pod priority and preemption](/docs/concepts/scheduling-eviction/pod-priority-preemption/) is a scheduler feature made generally available in Kubernetes 1.14 that allows you to achieve high levels of scheduling confidence for your critical workloads without overprovisioning your clusters. It also provides a way to improve resource utilization in your clusters without sacrificing the reliability of your essential workloads.
|
||||
|
||||
## Guaranteed scheduling with controlled cost
|
||||
|
||||
|
|
|
@ -55,7 +55,7 @@ The team has made progress in the last few months that is well worth celebrating
|
|||
|
||||
- The K8s-Infrastructure Working Group released an automated billing report that they start every meeting off by reviewing as a group.
|
||||
- DNS for k8s.io and kubernetes.io are also fully [community-owned](https://groups.google.com/g/kubernetes-dev/c/LZTYJorGh7c/m/u-ydk-yNEgAJ), with community members able to [file issues](https://github.com/kubernetes/k8s.io/issues/new?assignees=&labels=wg%2Fk8s-infra&template=dns-request.md&title=DNS+REQUEST%3A+%3Cyour-dns-record%3E) to manage records.
|
||||
- The container registry [k8s.gcr.io](https://github.com/kubernetes/k8s.io/tree/master/k8s.gcr.io) is also fully community-owned and available for all Kubernetes subprojects to use.
|
||||
- The container registry [k8s.gcr.io](https://github.com/kubernetes/k8s.io/tree/main/k8s.gcr.io) is also fully community-owned and available for all Kubernetes subprojects to use.
|
||||
- The Kubernetes [publishing-bot](https://github.com/kubernetes/publishing-bot) responsible for keeping k8s.io/kubernetes/staging repositories published to their own top-level repos (For example: [kubernetes/api](https://github.com/kubernetes/api)) runs on a community-owned cluster.
|
||||
- The gcsweb.k8s.io service used to provide anonymous access to GCS buckets for kubernetes artifacts runs on a community-owned cluster.
|
||||
- There is also an automated process of promoting all our container images. This includes a fully documented infrastructure, managed by the Kubernetes community, with automated processes for provisioning permissions.
|
||||
|
|
|
@ -186,7 +186,7 @@ metadata:
|
|||
|
||||
### Role Oriented Design
|
||||
|
||||
When you put it all together, you have a single load balancing infrastructure that can be safely shared by multiple teams. The Gateway API not only a more expressive API for advanced routing, but is also a role-oriented API, designed for multi-tenant infrastructure. Its extensibility ensures that it will evolve for future use-cases while preserving portability. Ultimately these characteristics will allow Gateway API to adapt to different organizational models and implementations well into the future.
|
||||
When you put it all together, you have a single load balancing infrastructure that can be safely shared by multiple teams. The Gateway API is not only a more expressive API for advanced routing, but is also a role-oriented API, designed for multi-tenant infrastructure. Its extensibility ensures that it will evolve for future use-cases while preserving portability. Ultimately these characteristics will allow the Gateway API to adapt to different organizational models and implementations well into the future.
|
||||
|
||||
### Try it out and get involved
|
||||
|
||||
|
@ -194,4 +194,4 @@ There are many resources to check out to learn more.
|
|||
|
||||
* Check out the [user guides](https://gateway-api.sigs.k8s.io/guides/getting-started/) to see what use-cases can be addressed.
|
||||
* Try out one of the [existing Gateway controllers ](https://gateway-api.sigs.k8s.io/references/implementations/)
|
||||
* Or [get involved](https://gateway-api.sigs.k8s.io/contributing/community/) and help design and influence the future of Kubernetes service networking!
|
||||
* Or [get involved](https://gateway-api.sigs.k8s.io/contributing/community/) and help design and influence the future of Kubernetes service networking!
|
||||
|
|
|
@ -0,0 +1,467 @@
|
|||
---
|
||||
layout: blog
|
||||
title: "Writing a Controller for Pod Labels"
|
||||
date: 2021-06-21
|
||||
slug: writing-a-controller-for-pod-labels
|
||||
---
|
||||
|
||||
**Authors**: Arthur Busser (Padok)
|
||||
|
||||
[Operators][what-is-an-operator] are proving to be an excellent solution to
|
||||
running stateful distributed applications in Kubernetes. Open source tools like
|
||||
the [Operator SDK][operator-sdk] provide ways to build reliable and maintainable
|
||||
operators, making it easier to extend Kubernetes and implement custom
|
||||
scheduling.
|
||||
|
||||
Kubernetes operators run complex software inside your cluster. The open source
|
||||
community has already built [many operators][operatorhub] for distributed
|
||||
applications like Prometheus, Elasticsearch, or Argo CD. Even outside of
|
||||
open source, operators can help to bring new functionality to your Kubernetes
|
||||
cluster.
|
||||
|
||||
An operator is a set of [custom resources][custom-resource-definitions] and a
|
||||
set of [controllers][controllers]. A controller watches for changes to specific
|
||||
resources in the Kubernetes API and reacts by creating, updating, or deleting
|
||||
resources.
|
||||
|
||||
The Operator SDK is best suited for building fully-featured operators.
|
||||
Nonetheless, you can use it to write a single controller. This post will walk
|
||||
you through writing a Kubernetes controller in Go that will add a `pod-name`
|
||||
label to pods that have a specific annotation.
|
||||
|
||||
## Why do we need a controller for this?
|
||||
|
||||
I recently worked on a project where we needed to create a Service that routed
|
||||
traffic to a specific Pod in a ReplicaSet. The problem is that a Service can
|
||||
only select pods by label, and all pods in a ReplicaSet have the same labels.
|
||||
There are two ways to solve this problem:
|
||||
|
||||
1. Create a Service without a selector and manage the Endpoints or
|
||||
EndpointSlices for that Service directly. We would need to write a custom
|
||||
controller to insert our Pod's IP address into those resources.
|
||||
2. Add a label to the Pod with a unique value. We could then use this label in
|
||||
our Service's selector. Again, we would need to write a custom controller to
|
||||
add this label.
|
||||
|
||||
A controller is a control loop that tracks one or more Kubernetes resource
|
||||
types. The controller from option n°2 above only needs to track pods, which
|
||||
makes it simpler to implement. This is the option we are going to walk through
|
||||
by writing a Kubernetes controller that adds a `pod-name` label to our pods.
|
||||
|
||||
StatefulSets [do this natively][statefulset-pod-name-label] by adding a
|
||||
`pod-name` label to each Pod in the set. But what if we don't want to or can't
|
||||
use StatefulSets?
|
||||
|
||||
We rarely create pods directly; most often, we use a Deployment, ReplicaSet, or
|
||||
another high-level resource. We can specify labels to add to each Pod in the
|
||||
PodSpec, but not with dynamic values, so no way to replicate a StatefulSet's
|
||||
`pod-name` label.
|
||||
|
||||
We tried using a [mutating admission webhook][mutating-admission-webhook]. When
|
||||
anyone creates a Pod, the webhook patches the Pod with a label containing the
|
||||
Pod's name. Disappointingly, this does not work: not all pods have a name before
|
||||
being created. For instance, when the ReplicaSet controller creates a Pod, it
|
||||
sends a `namePrefix` to the Kubernetes API server and not a `name`. The API
|
||||
server generates a unique name before persisting the new Pod to etcd, but only
|
||||
after calling our admission webhook. So in most cases, we can't know a Pod's
|
||||
name with a mutating webhook.
|
||||
|
||||
Once a Pod exists in the Kubernetes API, it is mostly immutable, but we can
|
||||
still add a label. We can even do so from the command line:
|
||||
|
||||
```bash
|
||||
kubectl label my-pod my-label-key=my-label-value
|
||||
```
|
||||
|
||||
We need to watch for changes to any pods in the Kubernetes API and add the label
|
||||
we want. Rather than do this manually, we are going to write a controller that
|
||||
does it for us.
|
||||
|
||||
## Bootstrapping a controller with the Operator SDK
|
||||
|
||||
A controller is a reconciliation loop that reads the desired state of a resource
|
||||
from the Kubernetes API and takes action to bring the cluster's actual state
|
||||
closer to the desired state.
|
||||
|
||||
In order to write this controller as quickly as possible, we are going to use
|
||||
the Operator SDK. If you don't have it installed, follow the
|
||||
[official documentation][operator-sdk-installation].
|
||||
|
||||
```terminal
|
||||
$ operator-sdk version
|
||||
operator-sdk version: "v1.4.2", commit: "4b083393be65589358b3e0416573df04f4ae8d9b", kubernetes version: "v1.19.4", go version: "go1.15.8", GOOS: "darwin", GOARCH: "amd64"
|
||||
```
|
||||
|
||||
Let's create a new directory to write our controller in:
|
||||
|
||||
```bash
|
||||
mkdir label-operator && cd label-operator
|
||||
```
|
||||
|
||||
Next, let's initialize a new operator, to which we will add a single controller.
|
||||
To do this, you will need to specify a domain and a repository. The domain
|
||||
serves as a prefix for the group your custom Kubernetes resources will belong
|
||||
to. Because we are not going to be defining custom resources, the domain does
|
||||
not matter. The repository is going to be the name of the Go module we are going
|
||||
to write. By convention, this is the repository where you will be storing your
|
||||
code.
|
||||
|
||||
As an example, here is the command I ran:
|
||||
|
||||
```bash
|
||||
# Feel free to change the domain and repo values.
|
||||
operator-sdk init --domain=padok.fr --repo=github.com/busser/label-operator
|
||||
```
|
||||
|
||||
Next, we need a create a new controller. This controller will handle pods and
|
||||
not a custom resource, so no need to generate the resource code. Let's run this
|
||||
command to scaffold the code we need:
|
||||
|
||||
```bash
|
||||
operator-sdk create api --group=core --version=v1 --kind=Pod --controller=true --resource=false
|
||||
```
|
||||
|
||||
We now have a new file: `controllers/pod_controller.go`. This file contains a
|
||||
`PodReconciler` type with two methods that we need to implement. The first is
|
||||
`Reconcile`, and it looks like this for now:
|
||||
|
||||
```go
|
||||
func (r *PodReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
|
||||
_ = r.Log.WithValues("pod", req.NamespacedName)
|
||||
|
||||
// your logic here
|
||||
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
```
|
||||
|
||||
The `Reconcile` method is called whenever a Pod is created, updated, or deleted.
|
||||
The name and namespace of the Pod are in the `ctrl.Request` the method receives
|
||||
as a parameter.
|
||||
|
||||
The second method is `SetupWithManager` and for now it looks like this:
|
||||
|
||||
```go
|
||||
func (r *PodReconciler) SetupWithManager(mgr ctrl.Manager) error {
|
||||
return ctrl.NewControllerManagedBy(mgr).
|
||||
// Uncomment the following line adding a pointer to an instance of the controlled resource as an argument
|
||||
// For().
|
||||
Complete(r)
|
||||
}
|
||||
```
|
||||
|
||||
The `SetupWithManager` method is called when the operator starts. It serves to
|
||||
tell the operator framework what types our `PodReconciler` needs to watch. To
|
||||
use the same `Pod` type used by Kubernetes internally, we need to import some of
|
||||
its code. All of the Kubernetes source code is open source, so you can import
|
||||
any part you like in your own Go code. You can find a complete list of available
|
||||
packages in the Kubernetes source code or [here on pkg.go.dev][pkg-go-dev]. To
|
||||
use pods, we need the `k8s.io/api/core/v1` package.
|
||||
|
||||
```go
|
||||
package controllers
|
||||
|
||||
import (
|
||||
// other imports...
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
// other imports...
|
||||
)
|
||||
```
|
||||
|
||||
Lets use the `Pod` type in `SetupWithManager` to tell the operator framework we
|
||||
want to watch pods:
|
||||
|
||||
```go
|
||||
func (r *PodReconciler) SetupWithManager(mgr ctrl.Manager) error {
|
||||
return ctrl.NewControllerManagedBy(mgr).
|
||||
For(&corev1.Pod{}).
|
||||
Complete(r)
|
||||
}
|
||||
```
|
||||
|
||||
Before moving on, we should set the RBAC permissions our controller needs. Above
|
||||
the `Reconcile` method, we have some default permissions:
|
||||
|
||||
```go
|
||||
// +kubebuilder:rbac:groups=core,resources=pods,verbs=get;list;watch;create;update;patch;delete
|
||||
// +kubebuilder:rbac:groups=core,resources=pods/status,verbs=get;update;patch
|
||||
// +kubebuilder:rbac:groups=core,resources=pods/finalizers,verbs=update
|
||||
```
|
||||
|
||||
We don't need all of those. Our controller will never interact with a Pod's
|
||||
status or its finalizers. It only needs to read and update pods. Lets remove the
|
||||
unnecessary permissions and keep only what we need:
|
||||
|
||||
```go
|
||||
// +kubebuilder:rbac:groups=core,resources=pods,verbs=get;list;watch;update;patch
|
||||
```
|
||||
|
||||
We are now ready to write our controller's reconciliation logic.
|
||||
|
||||
## Implementing reconciliation
|
||||
|
||||
Here is what we want our `Reconcile` method to do:
|
||||
|
||||
1. Use the Pod's name and namespace from the `ctrl.Request` to fetch the Pod
|
||||
from the Kubernetes API.
|
||||
2. If the Pod has an `add-pod-name-label` annotation, add a `pod-name` label to
|
||||
the Pod; if the annotation is missing, don't add the label.
|
||||
3. Update the Pod in the Kubernetes API to persist the changes made.
|
||||
|
||||
Lets define some constants for the annotation and label:
|
||||
|
||||
```go
|
||||
const (
|
||||
addPodNameLabelAnnotation = "padok.fr/add-pod-name-label"
|
||||
podNameLabel = "padok.fr/pod-name"
|
||||
)
|
||||
```
|
||||
|
||||
The first step in our reconciliation function is to fetch the Pod we are working
|
||||
on from the Kubernetes API:
|
||||
|
||||
```go
|
||||
// Reconcile handles a reconciliation request for a Pod.
|
||||
// If the Pod has the addPodNameLabelAnnotation annotation, then Reconcile
|
||||
// will make sure the podNameLabel label is present with the correct value.
|
||||
// If the annotation is absent, then Reconcile will make sure the label is too.
|
||||
func (r *PodReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
|
||||
log := r.Log.WithValues("pod", req.NamespacedName)
|
||||
|
||||
/*
|
||||
Step 0: Fetch the Pod from the Kubernetes API.
|
||||
*/
|
||||
|
||||
var pod corev1.Pod
|
||||
if err := r.Get(ctx, req.NamespacedName, &pod); err != nil {
|
||||
log.Error(err, "unable to fetch Pod")
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
```
|
||||
|
||||
Our `Reconcile` method will be called when a Pod is created, updated, or
|
||||
deleted. In the deletion case, our call to `r.Get` will return a specific error.
|
||||
Let's import the package that defines this error:
|
||||
|
||||
```go
|
||||
package controllers
|
||||
|
||||
import (
|
||||
// other imports...
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
// other imports...
|
||||
)
|
||||
```
|
||||
|
||||
We can now handle this specific error and — since our controller does not care
|
||||
about deleted pods — explicitly ignore it:
|
||||
|
||||
```go
|
||||
/*
|
||||
Step 0: Fetch the Pod from the Kubernetes API.
|
||||
*/
|
||||
|
||||
var pod corev1.Pod
|
||||
if err := r.Get(ctx, req.NamespacedName, &pod); err != nil {
|
||||
if apierrors.IsNotFound(err) {
|
||||
// we'll ignore not-found errors, since we can get them on deleted requests.
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
log.Error(err, "unable to fetch Pod")
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
```
|
||||
|
||||
Next, lets edit our Pod so that our dynamic label is present if and only if our
|
||||
annotation is present:
|
||||
|
||||
```go
|
||||
/*
|
||||
Step 1: Add or remove the label.
|
||||
*/
|
||||
|
||||
labelShouldBePresent := pod.Annotations[addPodNameLabelAnnotation] == "true"
|
||||
labelIsPresent := pod.Labels[podNameLabel] == pod.Name
|
||||
|
||||
if labelShouldBePresent == labelIsPresent {
|
||||
// The desired state and actual state of the Pod are the same.
|
||||
// No further action is required by the operator at this moment.
|
||||
log.Info("no update required")
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
|
||||
if labelShouldBePresent {
|
||||
// If the label should be set but is not, set it.
|
||||
if pod.Labels == nil {
|
||||
pod.Labels = make(map[string]string)
|
||||
}
|
||||
pod.Labels[podNameLabel] = pod.Name
|
||||
log.Info("adding label")
|
||||
} else {
|
||||
// If the label should not be set but is, remove it.
|
||||
delete(pod.Labels, podNameLabel)
|
||||
log.Info("removing label")
|
||||
}
|
||||
```
|
||||
|
||||
Finally, let's push our updated Pod to the Kubernetes API:
|
||||
|
||||
```go
|
||||
/*
|
||||
Step 2: Update the Pod in the Kubernetes API.
|
||||
*/
|
||||
|
||||
if err := r.Update(ctx, &pod); err != nil {
|
||||
log.Error(err, "unable to update Pod")
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
```
|
||||
|
||||
When writing our updated Pod to the Kubernetes API, there is a risk that the Pod
|
||||
has been updated or deleted since we first read it. When writing a Kubernetes
|
||||
controller, we should keep in mind that we are not the only actors in the
|
||||
cluster. When this happens, the best thing to do is start the reconciliation
|
||||
from scratch, by requeuing the event. Lets do exactly that:
|
||||
|
||||
```go
|
||||
/*
|
||||
Step 2: Update the Pod in the Kubernetes API.
|
||||
*/
|
||||
|
||||
if err := r.Update(ctx, &pod); err != nil {
|
||||
if apierrors.IsConflict(err) {
|
||||
// The Pod has been updated since we read it.
|
||||
// Requeue the Pod to try to reconciliate again.
|
||||
return ctrl.Result{Requeue: true}, nil
|
||||
}
|
||||
if apierrors.IsNotFound(err) {
|
||||
// The Pod has been deleted since we read it.
|
||||
// Requeue the Pod to try to reconciliate again.
|
||||
return ctrl.Result{Requeue: true}, nil
|
||||
}
|
||||
log.Error(err, "unable to update Pod")
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
```
|
||||
|
||||
Let's remember to return successfully at the end of the method:
|
||||
|
||||
```go
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
```
|
||||
|
||||
And that's it! We are now ready to run the controller on our cluster.
|
||||
|
||||
## Run the controller on your cluster
|
||||
|
||||
To run our controller on your cluster, we need to run the operator. For that,
|
||||
all you will need is `kubectl`. If you don't have a Kubernetes cluster at hand,
|
||||
I recommend you start one locally with [KinD (Kubernetes in Docker)][kind].
|
||||
|
||||
All it takes to run the operator from your machine is this command:
|
||||
|
||||
```bash
|
||||
make run
|
||||
```
|
||||
|
||||
After a few seconds, you should see the operator's logs. Notice that our
|
||||
controller's `Reconcile` method was called for all pods already running in the
|
||||
cluster.
|
||||
|
||||
Let's keep the operator running and, in another terminal, create a new Pod:
|
||||
|
||||
```bash
|
||||
kubectl run --image=nginx my-nginx
|
||||
```
|
||||
|
||||
The operator should quickly print some logs, indicating that it reacted to the
|
||||
Pod's creation and subsequent changes in status:
|
||||
|
||||
```text
|
||||
INFO controllers.Pod no update required {"pod": "default/my-nginx"}
|
||||
INFO controllers.Pod no update required {"pod": "default/my-nginx"}
|
||||
INFO controllers.Pod no update required {"pod": "default/my-nginx"}
|
||||
INFO controllers.Pod no update required {"pod": "default/my-nginx"}
|
||||
```
|
||||
|
||||
Lets check the Pod's labels:
|
||||
|
||||
```terminal
|
||||
$ kubectl get pod my-nginx --show-labels
|
||||
NAME READY STATUS RESTARTS AGE LABELS
|
||||
my-nginx 1/1 Running 0 11m run=my-nginx
|
||||
```
|
||||
|
||||
Let's add an annotation to the Pod so that our controller knows to add our
|
||||
dynamic label to it:
|
||||
|
||||
```bash
|
||||
kubectl annotate pod my-nginx padok.fr/add-pod-name-label=true
|
||||
```
|
||||
|
||||
Notice that the controller immediately reacted and produced a new line in its
|
||||
logs:
|
||||
|
||||
```text
|
||||
INFO controllers.Pod adding label {"pod": "default/my-nginx"}
|
||||
```
|
||||
|
||||
```terminal
|
||||
$ kubectl get pod my-nginx --show-labels
|
||||
NAME READY STATUS RESTARTS AGE LABELS
|
||||
my-nginx 1/1 Running 0 13m padok.fr/pod-name=my-nginx,run=my-nginx
|
||||
```
|
||||
|
||||
Bravo! You just successfully wrote a Kubernetes controller capable of adding
|
||||
labels with dynamic values to resources in your cluster.
|
||||
|
||||
Controllers and operators, both big and small, can be an important part of your
|
||||
Kubernetes journey. Writing operators is easier now than it has ever been. The
|
||||
possibilities are endless.
|
||||
|
||||
## What next?
|
||||
|
||||
If you want to go further, I recommend starting by deploying your controller or
|
||||
operator inside a cluster. The `Makefile` generated by the Operator SDK will do
|
||||
most of the work.
|
||||
|
||||
When deploying an operator to production, it is always a good idea to implement
|
||||
robust testing. The first step in that direction is to write unit tests.
|
||||
[This documentation][operator-sdk-testing] will guide you in writing tests for
|
||||
your operator. I wrote tests for the operator we just wrote; you can find all of
|
||||
my code in [this GitHub repository][github-repo].
|
||||
|
||||
## How to learn more?
|
||||
|
||||
The [Operator SDK documentation][operator-sdk-docs] goes into detail on how you
|
||||
can go further and implement more complex operators.
|
||||
|
||||
When modeling a more complex use-case, a single controller acting on built-in
|
||||
Kubernetes types may not be enough. You may need to build a more complex
|
||||
operator with [Custom Resource Definitions (CRDs)][custom-resource-definitions]
|
||||
and multiple controllers. The Operator SDK is a great tool to help you do this.
|
||||
|
||||
If you want to discuss building an operator, join the [#kubernetes-operator][slack-channel]
|
||||
channel in the [Kubernetes Slack workspace][slack-workspace]!
|
||||
|
||||
<!-- Links -->
|
||||
|
||||
[controllers]: https://kubernetes.io/docs/concepts/architecture/controller/
|
||||
[custom-resource-definitions]: https://kubernetes.io/docs/concepts/extend-kubernetes/api-extension/custom-resources/
|
||||
[kind]: https://kind.sigs.k8s.io/docs/user/quick-start/#installation
|
||||
[github-repo]: https://github.com/busser/label-operator
|
||||
[mutating-admission-webhook]: https://kubernetes.io/docs/reference/access-authn-authz/admission-controllers/#mutatingadmissionwebhook
|
||||
[operator-sdk]: https://sdk.operatorframework.io/
|
||||
[operator-sdk-docs]: https://sdk.operatorframework.io/docs/
|
||||
[operator-sdk-installation]: https://sdk.operatorframework.io/docs/installation/
|
||||
[operator-sdk-testing]: https://sdk.operatorframework.io/docs/building-operators/golang/testing/
|
||||
[operatorhub]: https://operatorhub.io/
|
||||
[pkg-go-dev]: https://pkg.go.dev/k8s.io/api
|
||||
[slack-channel]: https://kubernetes.slack.com/messages/kubernetes-operators
|
||||
[slack-workspace]: https://slack.k8s.io/
|
||||
[statefulset-pod-name-label]: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#pod-name-label
|
||||
[what-is-an-operator]: https://kubernetes.io/docs/concepts/extend-kubernetes/operator/
|
|
@ -0,0 +1,49 @@
|
|||
---
|
||||
layout: blog
|
||||
title: "Announcing Kubernetes Community Group Annual Reports"
|
||||
description: >
|
||||
Introducing brand new Kubernetes Community Group Annual Reports for
|
||||
Special Interest Groups and Working Groups.
|
||||
date: 2021-06-28T10:00:00-08:00
|
||||
slug: Announcing-Kubernetes-Community-Group-Annual-Reports
|
||||
---
|
||||
|
||||
**Authors:** Divya Mohan
|
||||
|
||||
{{< figure src="k8s_annual_report_2020.svg" alt="Community annual report 2020" link="https://www.cncf.io/reports/kubernetes-community-annual-report-2020/" >}}
|
||||
|
||||
Given the growth and scale of the Kubernetes project, the existing reporting mechanisms were proving to be inadequate and challenging.
|
||||
Kubernetes is a large open source project. With over 100000 commits just to the main k/kubernetes repository, hundreds of other code
|
||||
repositories in the project, and thousands of contributors, there's a lot going on. In fact, there are 37 contributor groups at the time of
|
||||
writing. We also value all forms of contribution and not just code changes.
|
||||
|
||||
With that context in mind, the challenge of reporting on all this activity was a call to action for exploring better options. Therefore
|
||||
inspired by the Apache Software Foundation’s [open guide to PMC Reporting](https://www.apache.org/foundation/board/reporting) and the
|
||||
[CNCF project Annual Reporting](https://www.cncf.io/cncf-annual-report-2020/), the Kubernetes project is proud to announce the
|
||||
**Kubernetes Community Group Annual Reports for Special Interest Groups (SIGs) and Working Groups (WGs)**. In its flagship edition,
|
||||
the [2020 Summary report](https://www.cncf.io/reports/kubernetes-community-annual-report-2020/) focuses on bettering the
|
||||
Kubernetes ecosystem by assessing and promoting the healthiness of the groups within the upstream community.
|
||||
|
||||
Previously, the mechanisms for the Kubernetes project overall to report on groups and their activities were
|
||||
[devstats](https://k8s.devstats.cncf.io/), GitHub data, issues, to measure the healthiness of a given UG/WG/SIG/Committee. As a
|
||||
project spanning several diverse communities, it was essential to have something that captured the human side of things. With 50,000+
|
||||
contributors, it’s easy to assume that the project has enough help and this report surfaces more information than /help-wanted and
|
||||
/good-first-issue for end users. This is how we sustain the project. Paraphrasing one of the Steering Committee members,
|
||||
[Paris Pittman](https://github.com/parispittman), “There was a requirement for tighter feedback loops - ones that involved more than just
|
||||
GitHub data and issues. Given that Kubernetes, as a project, has grown in scale and number of contributors over the years, we have
|
||||
outgrown the existing reporting mechanisms."
|
||||
|
||||
The existing communication channels between the Steering committee members and the folks leading the groups and committees were also required
|
||||
to be made as open and as bi-directional as possible. Towards achieving this very purpose, every group and committee has been assigned a
|
||||
liaison from among the steering committee members for kick off, help, or guidance needed throughout the process. According to
|
||||
[Davanum Srinivas a.k.a. dims](https://github.com/dims), “... That was one of the main motivations behind this report. People (leading the
|
||||
groups/committees) know that they can reach out to us and there’s a vehicle for them to reach out to us… This is our way of setting up a
|
||||
two-way feedback for them." The progress on these action items would be updated and tracked on the monthly Steering Committee meetings
|
||||
ensuring that this is not a one-off activity. Quoting [Nikhita Raghunath](https://github.com/nikhita), one of the Steering Committee members,
|
||||
“... Once we have a base, the liaisons will work with these groups to ensure that the problems are resolved. When we have a report next year,
|
||||
we’ll have a look at the progress made and how we could still do better. But the idea is definitely to not stop at the report.”
|
||||
|
||||
With this report, we hope to empower our end user communities with information that they can use to identify ways in which they can support
|
||||
the project as well as a sneak peek into the roadmap for upcoming features. As a community, we thrive on feedback and would love to hear your
|
||||
views about the report. You can get in touch with the [Steering Committee](https://github.com/kubernetes/steering#contact) via
|
||||
[Slack](https://kubernetes.slack.com/messages/steering-committee) or via the [mailing list](steering@kubernetes.io).
|
After Width: | Height: | Size: 1.2 MiB |
|
@ -0,0 +1,275 @@
|
|||
---
|
||||
layout: blog
|
||||
title: "Kubernetes API and Feature Removals In 1.22: Here’s What You Need To Know"
|
||||
date: 2021-07-14
|
||||
slug: upcoming-changes-in-kubernetes-1-22
|
||||
---
|
||||
|
||||
**Authors**: Krishna Kilari (Amazon Web Services), Tim Bannister (The Scale Factory)
|
||||
|
||||
As the Kubernetes API evolves, APIs are periodically reorganized or upgraded.
|
||||
When APIs evolve, the old APIs they replace are deprecated, and eventually removed.
|
||||
See [Kubernetes API removals](#kubernetes-api-removals) to read more about Kubernetes'
|
||||
policy on removing APIs.
|
||||
|
||||
We want to make sure you're aware of some upcoming removals. These are
|
||||
beta APIs that you can use in current, supported Kubernetes versions,
|
||||
and they are already deprecated. The reason for all of these removals
|
||||
is that they have been superseded by a newer, stable (“GA”) API.
|
||||
|
||||
Kubernetes 1.22, due for release in August 2021, will remove a number of deprecated
|
||||
APIs.
|
||||
[Kubernetes 1.22 Release Information](https://www.kubernetes.dev/resources/release/)
|
||||
has details on the schedule for the v1.22 release.
|
||||
|
||||
## API removals for Kubernetes v1.22 {#api-changes}
|
||||
|
||||
The **v1.22** release will stop serving the API versions we've listed immediately below.
|
||||
These are all beta APIs that were previously deprecated in favor of newer and more stable
|
||||
API versions.
|
||||
<!-- sorted by API group -->
|
||||
|
||||
* Beta versions of the `ValidatingWebhookConfiguration` and `MutatingWebhookConfiguration` API (the **admissionregistration.k8s.io/v1beta1** API versions)
|
||||
* The beta `CustomResourceDefinition` API (**apiextensions.k8s.io/v1beta1**)
|
||||
* The beta `APIService` API (**apiregistration.k8s.io/v1beta1**)
|
||||
* The beta `TokenReview` API (**authentication.k8s.io/v1beta1**)
|
||||
* Beta API versions of `SubjectAccessReview`, `LocalSubjectAccessReview`, `SelfSubjectAccessReview` (API versions from **authorization.k8s.io/v1beta1**)
|
||||
* The beta `CertificateSigningRequest` API (**certificates.k8s.io/v1beta1**)
|
||||
* The beta `Lease` API (**coordination.k8s.io/v1beta1**)
|
||||
* All beta `Ingress` APIs (the **extensions/v1beta1** and **networking.k8s.io/v1beta1** API versions)
|
||||
|
||||
The Kubernetes documentation covers these
|
||||
[API removals for v1.22](/docs/reference/using-api/deprecation-guide/#v1-22) and explains
|
||||
how each of those APIs change between beta and stable.
|
||||
|
||||
## What to do
|
||||
|
||||
We're going to run through each of the resources that are affected by these removals
|
||||
and explain the steps you'll need to take.
|
||||
|
||||
`Ingress`
|
||||
: Migrate to use the **networking.k8s.io/v1**
|
||||
[Ingress](/docs/reference/kubernetes-api/service-resources/ingress-v1/) API,
|
||||
[available since v1.19](/blog/2020/08/26/kubernetes-release-1.19-accentuate-the-paw-sitive/#ingress-graduates-to-general-availability).
|
||||
The related API [IngressClass](/docs/reference/kubernetes-api/service-resources/ingress-class-v1/)
|
||||
is designed to complement the [Ingress](/docs/concepts/services-networking/ingress/)
|
||||
concept, allowing you to configure multiple kinds of Ingress within one cluster.
|
||||
If you're currently using the deprecated
|
||||
[`kubernetes.io/ingress.class`](https://kubernetes.io/docs/reference/labels-annotations-taints/#kubernetes-io-ingress-class-deprecated)
|
||||
annotation, plan to switch to using the `.spec.ingressClassName` field instead.
|
||||
On any cluster running Kubernetes v1.19 or later, you can use the v1 API to
|
||||
retrieve or update existing Ingress objects, even if they were created using an
|
||||
older API version.
|
||||
|
||||
When you convert an Ingress to the v1 API, you should review each rule in that Ingress.
|
||||
Older Ingresses use the legacy `ImplementationSpecific` path type. Instead of `ImplementationSpecific`, switch [path matching](/docs/concepts/services-networking/ingress/#path-types) to either `Prefix` or `Exact`. One of the benefits of moving to these alternative path types is that it becomes easier to migrate between different Ingress classes.
|
||||
|
||||
**ⓘ** As well as upgrading _your_ own use of the Ingress API as a client, make sure that
|
||||
every ingress controller that you use is compatible with the v1 Ingress API.
|
||||
Read [Ingress Prerequisites](/docs/concepts/services-networking/ingress/#prerequisites)
|
||||
for more context about Ingress and ingress controllers.
|
||||
|
||||
`ValidatingWebhookConfiguration` and `MutatingWebhookConfiguration`
|
||||
: Migrate to use the **admissionregistration.k8s.io/v1** API versions of
|
||||
[ValidatingWebhookConfiguration](/docs/reference/kubernetes-api/extend-resources/validating-webhook-configuration-v1/)
|
||||
and [MutatingWebhookConfiguration](/docs/reference/kubernetes-api/extend-resources/mutating-webhook-configuration-v1/),
|
||||
available since v1.16.
|
||||
You can use the v1 API to retrieve or update existing objects, even if they were created using an older API version.
|
||||
|
||||
`CustomResourceDefinition`
|
||||
: Migrate to use the [CustomResourceDefinition](/docs/reference/kubernetes-api/extend-resources/custom-resource-definition-v1/)
|
||||
**apiextensions.k8s.io/v1** API, available since v1.16.
|
||||
You can use the v1 API to retrieve or update existing objects, even if they were created
|
||||
using an older API version. If you defined any custom resources in your cluster, those
|
||||
are still served after you upgrade.
|
||||
|
||||
If you're using external CustomResourceDefinitions, you can use
|
||||
[`kubectl convert`](#kubectl-convert) to translate existing manifests to use the newer API.
|
||||
Because there are some functional differences between beta and stable CustomResourceDefinitions,
|
||||
our advice is to test out each one to make sure it works how you expect after the upgrade.
|
||||
|
||||
`APIService`
|
||||
: Migrate to use the **apiregistration.k8s.io/v1** [APIService](/docs/reference/kubernetes-api/cluster-resources/api-service-v1/)
|
||||
API, available since v1.10.
|
||||
You can use the v1 API to retrieve or update existing objects, even if they were created using an older API version.
|
||||
If you already have API aggregation using an APIService object, this aggregation continues
|
||||
to work after you upgrade.
|
||||
|
||||
`TokenReview`
|
||||
: Migrate to use the **authentication.k8s.io/v1** [TokenReview](/docs/reference/kubernetes-api/authentication-resources/token-review-v1/)
|
||||
API, available since v1.10.
|
||||
|
||||
As well as serving this API via HTTP, the Kubernetes API server uses the same format to
|
||||
[send](/docs/reference/access-authn-authz/authentication/#webhook-token-authentication)
|
||||
TokenReviews to webhooks. The v1.22 release continues to use the v1beta1 API for TokenReviews
|
||||
sent to webhooks by default. See [Looking ahead](#looking-ahead) for some specific tips about
|
||||
switching to the stable API.
|
||||
|
||||
`SubjectAccessReview`, `SelfSubjectAccessReview` and `LocalSubjectAccessReview`
|
||||
: Migrate to use the **authorization.k8s.io/v1** versions of those
|
||||
[authorization APIs](/docs/reference/kubernetes-api/authorization-resources/), available since v1.6.
|
||||
|
||||
`CertificateSigningRequest`
|
||||
: Migrate to use the **certificates.k8s.io/v1**
|
||||
[CertificateSigningRequest](/docs/reference/kubernetes-api/authentication-resources/certificate-signing-request-v1/)
|
||||
API, available since v1.19.
|
||||
You can use the v1 API to retrieve or update existing objects, even if they were created
|
||||
using an older API version. Existing issued certificates retain their validity when you upgrade.
|
||||
|
||||
`Lease`
|
||||
: Migrate to use the **coordination.k8s.io/v1** [Lease](/docs/reference/kubernetes-api/cluster-resources/lease-v1/)
|
||||
API, available since v1.14.
|
||||
You can use the v1 API to retrieve or update existing objects, even if they were created
|
||||
using an older API version.
|
||||
|
||||
### `kubectl convert`
|
||||
|
||||
There is a plugin to `kubectl` that provides the `kubectl convert` subcommand.
|
||||
It's an official plugin that you can download as part of Kubernetes.
|
||||
See [Download Kubernetes](/releases/download/) for more details.
|
||||
|
||||
You can use `kubectl convert` to update manifest files to use a different API
|
||||
version. For example, if you have a manifest in source control that uses the beta
|
||||
Ingress API, you can check that definition out,
|
||||
and run
|
||||
`kubectl convert -f <manifest> --output-version <group>/<version>`.
|
||||
You can use the `kubectl convert` command to automatically convert an
|
||||
existing manifest.
|
||||
|
||||
For example, to convert an older Ingress definition to
|
||||
`networking.k8s.io/v1`, you can run:
|
||||
```bash
|
||||
kubectl convert -f ./legacy-ingress.yaml --output-version networking.k8s.io/v1
|
||||
```
|
||||
|
||||
The automatic conversion uses a similar technique to how the Kubernetes control plane
|
||||
updates objects that were originally created using an older API version. Because it's
|
||||
a mechanical conversion, you might need to go in and change the manifest to adjust
|
||||
defaults etc.
|
||||
|
||||
### Rehearse for the upgrade
|
||||
|
||||
If you manage your cluster's API server component, you can try out these API
|
||||
removals before you upgrade to Kubernetes v1.22.
|
||||
|
||||
To do that, add the following to the kube-apiserver command line arguments:
|
||||
|
||||
`--runtime-config=admissionregistration.k8s.io/v1beta1=false,apiextensions.k8s.io/v1beta1=false,apiregistration.k8s.io/v1beta1=false,authentication.k8s.io/v1beta1=false,authorization.k8s.io/v1beta1=false,certificates.k8s.io/v1beta1=false,coordination.k8s.io/v1beta1=false,extensions/v1beta1/ingresses=false,networking.k8s.io/v1beta1=false`
|
||||
|
||||
(as a side effect, this also turns off v1beta1 of EndpointSlice - watch out for
|
||||
that when you're testing).
|
||||
|
||||
Once you've switched all the kube-apiservers in your cluster to use that setting,
|
||||
those beta APIs are removed. You can test that API clients (`kubectl`, deployment
|
||||
tools, custom controllers etc) still work how you expect, and you can revert if
|
||||
you need to without having to plan a more disruptive downgrade.
|
||||
|
||||
|
||||
|
||||
### Advice for software authors
|
||||
|
||||
Maybe you're reading this because you're a developer of an addon or other
|
||||
component that integrates with Kubernetes?
|
||||
|
||||
If you develop an Ingress controller, webhook authenticator, an API aggregation, or
|
||||
any other tool that relies on these deprecated APIs, you should already have started
|
||||
to switch your software over.
|
||||
|
||||
You can use the tips in
|
||||
[Rehearse for the upgrade](#rehearse-for-the-upgrade) to run your own Kubernetes
|
||||
cluster that only uses the new APIs, and make sure that your code works OK.
|
||||
For your documentation, make sure readers are aware of any steps they should take
|
||||
for the Kubernetes v1.22 upgrade.
|
||||
|
||||
Where possible, give your users a hand to adopt the new APIs early - perhaps in a
|
||||
test environment - so they can give you feedback about any problems.
|
||||
|
||||
There are some [more deprecations](#looking-ahead) coming in Kubernetes v1.25,
|
||||
so plan to have those covered too.
|
||||
|
||||
## Kubernetes API removals
|
||||
|
||||
Here's some background about why Kubernetes removes some APIs, and also a promise
|
||||
about _stable_ APIs in Kubernetes.
|
||||
|
||||
Kubernetes follows a defined
|
||||
[deprecation policy](/docs/reference/using-api/deprecation-policy/) for its
|
||||
features, including the Kubernetes API. That policy allows for replacing stable
|
||||
(“GA”) APIs from Kubernetes. Importantly, this policy means that a stable API only
|
||||
be deprecated when a newer stable version of that same API is available.
|
||||
|
||||
That stability guarantee matters: if you're using a stable Kubernetes API, there
|
||||
won't ever be a new version released that forces you to switch to an alpha or beta
|
||||
feature.
|
||||
|
||||
Earlier stages are different. Alpha features are under test and potentially
|
||||
incomplete. Almost always, alpha features are disabled by default.
|
||||
Kubernetes releases can and do remove alpha features that haven't worked out.
|
||||
|
||||
After alpha, comes beta. These features are typically enabled by default; if the
|
||||
testing works out, the feature can graduate to stable. If not, it might need
|
||||
a redesign.
|
||||
|
||||
Last year, Kubernetes officially
|
||||
[adopted](/blog/2020/08/21/moving-forward-from-beta/#avoiding-permanent-beta)
|
||||
a policy for APIs that have reached their beta phase:
|
||||
|
||||
> For Kubernetes REST APIs, when a new feature's API reaches beta, that starts
|
||||
> a countdown. The beta-quality API now has three releases …
|
||||
> to either:
|
||||
>
|
||||
> * reach GA, and deprecate the beta, or
|
||||
> * have a new beta version (and deprecate the previous beta).
|
||||
|
||||
_At the time of that article, three Kubernetes releases equated to roughly nine
|
||||
calendar months. Later that same month, Kubernetes
|
||||
adopted a new
|
||||
release cadence of three releases per calendar year, so the countdown period is
|
||||
now roughly twelve calendar months._
|
||||
|
||||
Whether an API removal is because of a beta feature graduating to stable, or
|
||||
because that API hasn't proved successful, Kubernetes will continue to remove
|
||||
APIs by following its deprecation policy and making sure that migration options
|
||||
are documented.
|
||||
|
||||
### Looking ahead
|
||||
|
||||
There's a setting that's relevant if you use webhook authentication checks.
|
||||
A future Kubernetes release will switch to sending TokenReview objects
|
||||
to webhooks using the `authentication.k8s.io/v1` API by default. At the moment,
|
||||
the default is to send `authentication.k8s.io/v1beta1` TokenReviews to webhooks,
|
||||
and that's still the default for Kubernetes v1.22.
|
||||
However, you can switch over to the stable API right now if you want:
|
||||
add `--authentication-token-webhook-version=v1` to the command line options for
|
||||
the kube-apiserver, and check that webhooks for authentication still work how you
|
||||
expected.
|
||||
|
||||
Once you're happy it works OK, you can leave the `--authentication-token-webhook-version=v1`
|
||||
option set across your control plane.
|
||||
|
||||
The **v1.25** release that's planned for next year will stop serving beta versions of
|
||||
several Kubernetes APIs that are stable right now and have been for some time.
|
||||
The same v1.25 release will **remove** PodSecurityPolicy, which is deprecated and won't
|
||||
graduate to stable. See
|
||||
[PodSecurityPolicy Deprecation: Past, Present, and Future](/blog/2021/04/06/podsecuritypolicy-deprecation-past-present-and-future/)
|
||||
for more information.
|
||||
|
||||
The official [list of API removals](/docs/reference/using-api/deprecation-guide/#v1-25)
|
||||
planned for Kubernetes 1.25 is:
|
||||
|
||||
* The beta `CronJob` API (**batch/v1beta1**)
|
||||
* The beta `EndpointSlice` API (**networking.k8s.io/v1beta1**)
|
||||
* The beta `PodDisruptionBudget` API (**policy/v1beta1**)
|
||||
* The beta `PodSecurityPolicy` API (**policy/v1beta1**)
|
||||
|
||||
## Want to know more?
|
||||
|
||||
Deprecations are announced in the Kubernetes release notes. You can see the announcements
|
||||
of pending deprecations in the release notes for
|
||||
[1.19](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.19.md#deprecations),
|
||||
[1.20](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.20.md#deprecation),
|
||||
and [1.21](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.21.md#deprecation).
|
||||
|
||||
For information on the process of deprecation and removal, check out the official Kubernetes
|
||||
[deprecation policy](/docs/reference/using-api/deprecation-policy/#deprecating-parts-of-the-api)
|
||||
document.
|
|
@ -0,0 +1,65 @@
|
|||
---
|
||||
layout: blog
|
||||
title: "Spotlight on SIG Usability"
|
||||
date: 2021-07-15
|
||||
slug: sig-usability-spotlight-2021
|
||||
---
|
||||
|
||||
**Author:** Kunal Kushwaha, Civo
|
||||
|
||||
## Introduction
|
||||
|
||||
Are you interested in learning about what [SIG Usability](https://github.com/kubernetes/community/tree/master/sig-usability) does and how you can get involved? Well, you're at the right place. SIG Usability is all about making Kubernetes more accessible to new folks, and its main activity is conducting user research for the community. In this blog, we have summarized our conversation with [Gaby Moreno](https://twitter.com/morengab), who walks us through the various aspects of being a part of the SIG and shares some insights about how others can get involved.
|
||||
|
||||
Gaby is a co-lead for SIG Usability. She works as a Product Designer at IBM and enjoys working on the user experience of open, hybrid cloud technologies like Kubernetes, OpenShift, Terraform, and Cloud Foundry.
|
||||
|
||||
## A summary of our conversation
|
||||
|
||||
### Q. Could you tell us a little about what SIG Usability does?
|
||||
|
||||
A. SIG Usability at a high level started because there was no dedicated user experience team for Kubernetes. The extent of SIG Usability is focussed on the end-client ease of use of the Kubernetes project. The main activity is user research for the community, which includes speaking to Kubernetes users.
|
||||
|
||||
This covers points like user experience and accessibility. The objectives of the SIG are to guarantee that the Kubernetes project is maximally usable by people of a wide range of foundations and capacities, such as incorporating internationalization and ensuring the openness of documentation.
|
||||
|
||||
### Q. Why should new and existing contributors consider joining SIG Usability?
|
||||
|
||||
A. There are plenty of territories where new contributors can begin. For example:
|
||||
- User research projects, where people can help understand the usability of the end-user experiences, including error messages, end-to-end tasks, etc.
|
||||
- Accessibility guidelines for Kubernetes community artifacts, examples include: internationalization of documentation, color choices for people with color blindness, ensuring compatibility with screen reader technology, user interface design for core components with user interfaces, and more.
|
||||
|
||||
### Q. What do you do to help new contributors get started?
|
||||
|
||||
A. New contributors can get started by shadowing one of the user interviews, going through user interview transcripts, analyzing them, and designing surveys.
|
||||
|
||||
SIG Usability is also open to new project ideas. If you have an idea, we’ll do what we can to support it. There are regular SIG Meetings where people can ask their questions live. These meetings are also recorded for those who may not be able to attend. As always, you can reach out to us on Slack as well.
|
||||
|
||||
### Q. What does the survey include?
|
||||
|
||||
A. In simple terms, the survey gathers information about how people use Kubernetes, such as trends in learning to deploy a new system, error messages they receive, and workflows.
|
||||
|
||||
One of our goals is to standardize the responses accordingly. The ultimate goal is to analyze survey responses for important user stories whose needs aren't being met.
|
||||
|
||||
### Q. Are there any particular skills you’d like to recruit for? What skills are contributors to SIG Usability likely to learn?
|
||||
|
||||
A. Although contributing to SIG Usability does not have any pre-requisites as such, experience with user research, qualitative research, or prior experience with how to conduct an interview would be great plus points. Quantitative research, like survey design and screening, is also helpful and something that we expect contributors to learn.
|
||||
|
||||
### Q. What are you getting positive feedback on, and what’s coming up next for SIG Usability?
|
||||
|
||||
A. We have had new members joining and coming to monthly meetings regularly and showing interests in becoming a contributor and helping the community. We have also had a lot of people reach out to us via Slack showcasing their interest in the SIG.
|
||||
|
||||
Currently, we are focused on finishing the study mentioned in our [talk](https://www.youtube.com/watch?v=Byn0N_ZstE0), also our project for this year. We are always happy to have new contributors join us.
|
||||
|
||||
### Q: Any closing thoughts/resources you’d like to share?
|
||||
|
||||
A. We love meeting new contributors and assisting them in investigating different Kubernetes project spaces. We will work with and team up with other SIGs to facilitate engaging with end-users, running studies, and help them integrate accessible design practices into their development practices.
|
||||
|
||||
Here are some resources for you to get started:
|
||||
- [GitHub](https://github.com/kubernetes/community/tree/master/sig-usability)
|
||||
- [Mailing list](https://groups.google.com/g/kubernetes-sig-usability)
|
||||
- [Open Community Issues/PRs](https://github.com/kubernetes/community/labels/sig%2Fusability)
|
||||
- [Slack](https://slack.k8s.io/)
|
||||
- [Slack channel #sig-usability](https://kubernetes.slack.com/archives/CLC5EF63T)
|
||||
|
||||
## Wrap Up
|
||||
|
||||
SIG Usability hosted a [KubeCon talk](https://www.youtube.com/watch?v=Byn0N_ZstE0) about studying Kubernetes users' experiences. The talk focuses on updates to the user study projects, understanding who is using Kubernetes, what they are trying to achieve, how the project is addressing their needs, and where we need to improve the project and the client experience. Join the SIG's update to find out about the most recent research results, what the plans are for the forthcoming year, and how to get involved in the upstream usability team as a contributor!
|
|
@ -0,0 +1,83 @@
|
|||
---
|
||||
layout: blog
|
||||
title: "Kubernetes Release Cadence Change: Here’s What You Need To Know"
|
||||
date: 2021-07-20
|
||||
slug: new-kubernetes-release-cadence
|
||||
---
|
||||
|
||||
**Authors**: Celeste Horgan, Adolfo García Veytia, James Laverack, Jeremy Rickard
|
||||
|
||||
On April 23, 2021, the Release Team merged a Kubernetes Enhancement Proposal (KEP) changing the Kubernetes release cycle from four releases a year (once a quarter) to three releases a year.
|
||||
|
||||
This blog post provides a high level overview about what this means for the Kubernetes community's contributors and maintainers.
|
||||
|
||||
## What's changing and when
|
||||
|
||||
Starting with the [Kubernetes 1.22 release](https://github.com/kubernetes/sig-release/tree/master/releases/release-1.22), a lightweight policy will drive the creation of each release schedule. This policy states:
|
||||
|
||||
* The first Kubernetes release of a calendar year should start at the second or third
|
||||
week of January to provide people more time for contributors coming back from the
|
||||
end of year holidays.
|
||||
* The last Kubernetes release of a calendar year should be finished by the middle of
|
||||
December.
|
||||
* A Kubernetes release cycle has a length of approximately 15 weeks.
|
||||
* The week of KubeCon + CloudNativeCon is not considered a 'working week' for SIG Release. The Release Team will not hold meetings or make decisions in this period.
|
||||
* An explicit SIG Release break of at least two weeks between each cycle will
|
||||
be enforced.
|
||||
|
||||
As a result, Kubernetes will follow a three releases per year cadence. Kubernetes 1.23 will be the final release of the 2021 calendar year. This new policy results in a very predictable release schedule, allowing us to forecast upcoming release dates:
|
||||
|
||||
|
||||
*Proposed Kubernetes Release Schedule for the remainder of 2021*
|
||||
|
||||
| Week Number in Year | Release Number | Release Week | Note |
|
||||
| -------- | -------- | -------- | -------- |
|
||||
| 35 | 1.23 | 1 (August 23) | |
|
||||
| 50 | 1.23 | 16 (December 07) | KubeCon + CloudNativeCon NA Break (Oct 11-15) |
|
||||
|
||||
*Proposed Kubernetes Release Schedule for 2022*
|
||||
|
||||
| Week Number in Year | Release Number | Release Week | Note |
|
||||
| -------- | -------- | -------- | -------- |
|
||||
| 1 | 1.24 | 1 (January 03) | |
|
||||
| 15 | 1.24 | 15 (April 12) | |
|
||||
| 17 | 1.25 | 1 (April 26) | KubeCon + CloudNativeCon EU likely to occur |
|
||||
| 32 | 1.25 | 15 (August 09) | |
|
||||
| 34 | 1.26 | 1 (August 22 | KubeCon + CloudNativeCon NA likely to occur |
|
||||
| 49 | 1.26 | 14 (December 06) |
|
||||
|
||||
These proposed dates reflect only the start and end dates, and they are subject to change. The Release Team will select dates for enhancement freeze, code freeze, and other milestones at the start of each release. For more information on these milestones, please refer to the [release phases](https://www.k8s.dev/resources/release/#phases) documentation. Feedback from prior releases will feed into this process.
|
||||
|
||||
## What this means for end users
|
||||
|
||||
The major change end users will experience is a slower release cadence and a slower rate of enhancement graduation. Kubernetes release artifacts, release notes, and all other aspects of any given release will stay the same.
|
||||
|
||||
Prior to this change an enhancement could graduate from alpha to stable in 9 months. With the change in cadence, this will stretch to 12 months. Additionally, graduation of features over the last few releases has in some part been driven by release team activities.
|
||||
|
||||
With fewer releases, users can expect to see the rate of feature graduation slow. Users can also expect releases to contain a larger number of enhancements that they need to be aware of during upgrades. However, with fewer releases to consume per year, it's intended that end user organizations will spend less time on upgrades and gain more time on supporting their Kubernetes clusters. It also means that Kubernetes releases are in support for a slightly longer period of time, so bug fixes and security patches will be available for releases for a longer period of time.
|
||||
|
||||
|
||||
## What this means for Kubernetes contributors
|
||||
|
||||
With a lower release cadence, contributors have more time for project enhancements, feature development, planning, and testing. A slower release cadence also provides more room for maintaining their mental health, preparing for events like KubeCon + CloudNativeCon or work on downstream integrations.
|
||||
|
||||
|
||||
## Why we decided to change the release cadence
|
||||
|
||||
The Kubernetes 1.19 cycle was far longer than usual. SIG Release extended it to lessen the burden on both Kubernetes contributors and end users due the COVID-19 pandemic. Following this extended release, the Kubernetes 1.20 release became the third, and final, release for 2020.
|
||||
|
||||
As the Kubernetes project matures, the number of enhancements per cycle grows, along with the burden on contributors, the Release Engineering team. Downstream consumers and integrators also face increased challenges keeping up with [ever more feature-packed releases](https://kubernetes.io/blog/2021/04/08/kubernetes-1-21-release-announcement/). A wider project adoption means the complexity of supporting a rapidly evolving platform affects a bigger downstream chain of consumers.
|
||||
|
||||
Changing the release cadence from four to three releases per year balances a variety of factors for stakeholders: while it's not strictly an LTS policy, consumers and integrators will get longer support terms for each minor version as the extended release cycles lead to the [previous three releases being supported](https://kubernetes.io/blog/2020/08/31/kubernetes-1-19-feature-one-year-support/) for a longer period. Contributors get more time to [mature enhancements](https://www.cncf.io/blog/2021/04/12/enhancing-the-kubernetes-enhancements-process/) and [get them ready for production](https://github.com/kubernetes/community/blob/master/sig-architecture/production-readiness.md).
|
||||
|
||||
Finally, the management overhead for SIG Release and the Release Engineering team diminishes allowing the team to spend more time on improving the quality of the software releases and the tooling that drives them.
|
||||
|
||||
## How you can help
|
||||
|
||||
Join the [discussion](https://github.com/kubernetes/sig-release/discussions/1566) about communicating future release dates and be sure to be on the lookout for post release surveys.
|
||||
|
||||
## Where you can find out more
|
||||
|
||||
- Read the KEP [here](https://github.com/kubernetes/enhancements/tree/master/keps/sig-release/2572-release-cadence)
|
||||
- Join the [kubernetes-dev](https://groups.google.com/g/kubernetes-dev) mailing list
|
||||
- Join [Kubernetes Slack](https://slack.k8s.io) and follow the #announcements channel
|
|
@ -0,0 +1,71 @@
|
|||
---
|
||||
layout: blog
|
||||
title: 'Updating NGINX-Ingress to use the stable Ingress API'
|
||||
date: 2021-07-26
|
||||
slug: update-with-ingress-nginx
|
||||
---
|
||||
|
||||
**Authors:** James Strong, Ricardo Katz
|
||||
|
||||
With all Kubernetes APIs, there is a process to creating, maintaining, and
|
||||
ultimately deprecating them once they become GA. The networking.k8s.io API group is no
|
||||
different. The upcoming Kubernetes 1.22 release will remove several deprecated APIs
|
||||
that are relevant to networking:
|
||||
|
||||
- the `networking.k8s.io/v1beta1` API version of [IngressClass](/docs/concepts/services-networking/ingress/#ingress-class)
|
||||
- all beta versions of [Ingress](/docs/concepts/services-networking/ingress/): `extensions/v1beta1` and `networking.k8s.io/v1beta1`
|
||||
|
||||
On a v1.22 Kubernetes cluster, you'll be able to access Ingress and IngressClass
|
||||
objects through the stable (v1) APIs, but access via their beta APIs won't be possible.
|
||||
This change has been in
|
||||
in discussion since
|
||||
[2017](https://github.com/kubernetes/kubernetes/issues/43214),
|
||||
[2019](https://kubernetes.io/blog/2019/07/18/api-deprecations-in-1-16/) with
|
||||
1.16 Kubernetes API deprecations, and most recently in
|
||||
KEP-1453:
|
||||
[Graduate Ingress API to GA](https://github.com/kubernetes/enhancements/tree/master/keps/sig-network/1453-ingress-api#122).
|
||||
|
||||
During community meetings, the networking Special Interest Group has decided to continue
|
||||
supporting Kubernetes versions older than 1.22 with Ingress-NGINX version 0.47.0.
|
||||
Support for Ingress-NGINX will continue for six months after Kubernetes 1.22
|
||||
is released. Any additional bug fixes and CVEs for Ingress-NGINX will be
|
||||
addressed on a need-by-need basis.
|
||||
|
||||
Ingress-NGINX will have separate branches and releases of Ingress-NGINX to
|
||||
support this model, mirroring the Kubernetes project process. Future
|
||||
releases of the Ingress-NGINX project will track and support the latest
|
||||
versions of Kubernetes.
|
||||
|
||||
{{< table caption="Ingress NGINX supported version with Kubernetes Versions" >}}
|
||||
Kubernetes version | Ingress-NGINX version | Notes
|
||||
:-------------------|:----------------------|:------------
|
||||
v1.22 | v1.0.0-alpha.2 | New features, plus bug fixes.
|
||||
v1.21 | v0.47.x | Bugfixes only, and just for security issues or crashes. No end-of-support date announced.
|
||||
v1.20 | v0.47.x | Bugfixes only, and just for security issues or crashes. No end-of-support date announced.
|
||||
v1.19 | v0.47.x | Bugfixes only, and just for security issues or crashes. Fixes only provided until 6 months after Kubernetes v1.22.0 is released.
|
||||
{{< /table >}}
|
||||
|
||||
Because of the updates in Kubernetes 1.22, **v0.47.0** will not work with
|
||||
Kubernetes 1.22.
|
||||
|
||||
# What you need to do
|
||||
|
||||
The team is currently in the process of upgrading ingress-nginx to support
|
||||
the v1 migration, you can track the progress
|
||||
[here](https://github.com/kubernetes/ingress-nginx/pull/7156).
|
||||
We're not making feature improvements to `ingress-nginx` until after the support for
|
||||
Ingress v1 is complete.
|
||||
|
||||
In the meantime to ensure no compatibility issues:
|
||||
|
||||
* Update to the latest version of Ingress-NGINX; currently
|
||||
[v0.47.0](https://github.com/kubernetes/ingress-nginx/releases/tag/controller-v0.47.0)
|
||||
* After Kubernetes 1.22 is released, ensure you are using the latest version of
|
||||
Ingress-NGINX that supports the stable APIs for Ingress and IngressClass.
|
||||
* Test Ingress-NGINX version v1.0.0-alpha.2 with Cluster versions >= 1.19
|
||||
and report any issues to the projects Github page.
|
||||
|
||||
The community’s feedback and support in this effort is welcome. The
|
||||
Ingress-NGINX Sub-project regularly holds community meetings where we discuss
|
||||
this and other issues facing the project. For more information on the sub-project,
|
||||
please see [SIG Network](https://github.com/kubernetes/community/tree/master/sig-network).
|
|
@ -0,0 +1,231 @@
|
|||
---
|
||||
layout: blog
|
||||
title: "Roorkee robots, releases and racing: the Kubernetes 1.21 release interview"
|
||||
date: 2021-07-29
|
||||
---
|
||||
|
||||
**Author**: Craig Box (Google)
|
||||
|
||||
With Kubernetes 1.22 due out next week, now is a great time to look back on 1.21. The release team for that version was led by [Nabarun Pal](https://twitter.com/theonlynabarun) from VMware.
|
||||
|
||||
Back in April I [interviewed Nabarun](https://kubernetespodcast.com/episode/146-kubernetes-1.21/) on the weekly [Kubernetes Podcast from Google](https://kubernetespodcast.com/); the latest in a series of release lead conversations that started back with 1.11, not long after the show started back in 2018.
|
||||
|
||||
In these interviews we learn a little about the release, but also about the process behind it, and the story behind the person chosen to lead it. Getting to know a community member is my favourite part of the show each week, and so I encourage you to [subscribe wherever you get your podcasts](https://kubernetespodcast.com/subscribe/). With a release coming next week, you can probably guess what our next topic will be!
|
||||
|
||||
*This transcript has been edited and condensed for clarity.*
|
||||
|
||||
---
|
||||
|
||||
**CRAIG BOX: You have a Bachelor of Technology in Metallurgical and Materials Engineering. How are we doing at turning lead into gold?**
|
||||
|
||||
NABARUN PAL: Well, last I checked, we have yet to find the philosopher's stone!
|
||||
|
||||
**CRAIG BOX: One of the more important parts of the process?**
|
||||
|
||||
NABARUN PAL: We're not doing that well in terms of getting alchemists up and running. There is some improvement in nuclear technology, where you can turn lead into gold, but I would guess buying gold would be much more efficient.
|
||||
|
||||
**CRAIG BOX: Or Bitcoin? It depends what you want to do with the gold.**
|
||||
|
||||
NABARUN PAL: Yeah, seeing the increasing prices of Bitcoin, you'd probably prefer to bet on that. But, don't take this as a suggestion. I'm not a registered investment advisor, and I don't give investment advice!
|
||||
|
||||
**CRAIG BOX: But you are, of course, a trained materials engineer. How did you get into that line of education?**
|
||||
|
||||
NABARUN PAL: We had a graded and equated exam structure, where you sit a single exam, and then based on your performance in that exam, you can try any of the universities which take those scores into account. I went to the Indian Institute of Technology, Roorkee.
|
||||
|
||||
Materials engineering interested me a lot. I had a passion for computer science since childhood, but I also liked material science, so I wanted to explore that field. I did a lot of exploration around material science and metallurgy in my freshman and sophomore years, but then computing, since it was a passion, crept into the picture.
|
||||
|
||||
**CRAIG BOX: Let's dig in there a little bit. What did computing look like during your childhood?**
|
||||
|
||||
NABARUN PAL: It was a very interesting journey. I started exploring computers back when I was seven or eight. For my first programming language, if you call it a programming language, I explored LOGO.
|
||||
|
||||
You have a turtle on the screen, and you issue commands to it, like move forward or rotate or pen up or pen down. You basically draw geometric figures. I could visually see how I could draw a square and how I could draw a triangle. It was an interesting journey after that. I learned BASIC, then went to some amount of HTML, JavaScript.
|
||||
|
||||
**CRAIG BOX: It's interesting to me because Logo and BASIC were probably my first two programming languages, but I think there was probably quite a gap in terms of when HTML became a thing after those two! Did your love of computing always lead you down the path towards programming, or were you interested as a child in using computers for games or application software? What led you specifically into programming?**
|
||||
|
||||
NABARUN PAL: Programming came in late. Not just in computing, but in life, I'm curious with things. When my parents got me my first computer, I was curious. I was like, "how does this operating system work?" What even is running it? Using a television and using a computer is a different experience, but usability is kind of the same thing. The HCI device for a television is a remote, whereas with a computer, I had a keyboard and a mouse. I used to tinker with the box and reinstall operating systems.
|
||||
|
||||
We used to get magazines back then. They used to bundle OpenSuse or Debian, and I used to install them. It was an interesting experience, 15 years back, how Linux used to be. I have been a tinkerer all around, and that's what eventually led me to programming.
|
||||
|
||||
**CRAIG BOX: With an interest in both the physical and ethereal aspects of technology, you did a lot of robotics challenges during university. That's something that I am not surprised to hear from someone who has a background in Logo, to be honest. There's Mindstorms, and a lot of other technology that is based around robotics that a lot of LOGO people got into. How was that something that came about for you?**
|
||||
|
||||
NABARUN PAL: When I joined my university, apart from studying materials, one of the things they used to really encourage was to get involved in a lot of extracurricular activities. One which interested me was robotics. I joined [my college robotics team](https://github.com/marsiitr) and participated in a lot of challenges.
|
||||
|
||||
Predominantly, we used to participate in this competition called [ABU Robocon](https://en.wikipedia.org/wiki/ABU_Robocon), which is an event conducted by the Asia-Pacific Broadcasting Union. What they used to do was, every year, one of the participating countries in the contest would provide a problem statement. For example, one year, they asked us to build a badminton-playing robot. They asked us to build a rugby playing robot or a Frisbee thrower, and there are some interesting problem statements around the challenge: you can't do this. You can't do that. Weight has to be like this. Dimensions have to be like that.
|
||||
|
||||
I got involved in that, and most of my time at university, I used to spend there. Material science became kind of a backburner for me, and my hobby became my full time thing.
|
||||
|
||||
**CRAIG BOX: And you were not only involved there in terms of the project and contributions to it, but you got involved as a secretary of the team, effectively, doing a lot of the organization, which is a thread that will come up as we speak about Kubernetes.**
|
||||
|
||||
NABARUN PAL: Over the course of time, when I gained more knowledge into how the team works, it became very natural that I graduated up the ladder and then managed juniors. I became the joint secretary of the robotics club in our college. This was more of a broad, engaging role in evangelizing robotics at the university, to promote events, to help students to see the value in learning robotics - what you gain out of that mechanically or electronically, or how do you develop your logic by programming robots.
|
||||
|
||||
**CRAIG BOX: Your first job after graduation was working at a company called Algoshelf, but you were also an intern there while you were at school?**
|
||||
|
||||
NABARUN PAL: Algoshelf was known as Rorodata when I joined them as an intern. This was also an interesting opportunity for me in the sense that I was always interested in writing programs which people would use. One of the things that I did there was build an open source Function as a Service framework, if I may call it that - it was mostly turning Python functions into web servers without even writing any code. The interesting bit there was that it was targeted toward data scientists, and not towards programmers. We had to understand the pain of data scientists, that they had to learn a lot of programming in order to even deploy their machine learning models, and we wanted to solve that problem.
|
||||
|
||||
They offered me a job after my internship, and I kept on working for them after I graduated from university. There, I got introduced to Kubernetes, so we pivoted into a product structure where the very same thing I told you, the Functions as a Service thing, could be deployed in Kubernetes. I was exploring Kubernetes to use it as a scalable platform. Instead of managing pets, we wanted to manage cattle, as in, we wanted to have a very highly distributed architecture.
|
||||
|
||||
**CRAIG BOX: Not actual cattle. I've been to India. There are a lot of cows around.**
|
||||
|
||||
NABARUN PAL: Yeah, not actual cattle. That is a bit tough.
|
||||
|
||||
**CRAIG BOX: When Algoshelf we're looking at picking up Kubernetes, what was the evaluation process like? Were you looking at other tools at the time? Or had enough time passed that Kubernetes was clearly the platform that everyone was going to use?**
|
||||
|
||||
NABARUN PAL: Algoshelf was a natural evolution. Before Kubernetes, we used to deploy everything on a single big AWS server, using systemd. Everything was a systemd service, and everything was deployed using Fabric. Fabric is a Python package which essentially is like Ansible, but much leaner, as it does not have all the shims and things that Ansible has.
|
||||
|
||||
Then we asked "what if we need to scale out to different machines?" Kubernetes was in the hype. We hopped onto the hype train to see whether Kubernetes was worth it for us. And that's where my journey started, exploring the ecosystem, exploring the community. How can we improve the community in essence?
|
||||
|
||||
**CRAIG BOX: A couple of times now you've mentioned as you've grown in a role, becoming part of the organization and the arranging of the group. You've talked about working in Python. You had submitted some talks to Pycon India. And I understand you're now a tech lead for that conference. What does the tech community look like in India and how do you describe your involvement in it?**
|
||||
|
||||
NABARUN PAL: My involvement with the community began when I was at university. When I was working as an intern at Algoshelf, I was introduced to this-- I never knew about PyCon India, or tech conferences in general.
|
||||
|
||||
The person that I was working with just asked me, like hey, did you submit a talk to PyCon India? It's very useful, the library that we were making. So I [submitted a talk](https://www.nabarun.in/talk/2017/pyconindia/#1) to PyCon India in 2017. Eventually the talk got selected. That was not my first speaking opportunity, it was my second. I also spoke at PyData Delhi on a similar thing that I worked on in my internship.
|
||||
|
||||
It has been a journey since then. I talked about the same thing at FOSSASIA Summit in Singapore, and got really involved with the Python community because it was what I used to work on back then.
|
||||
|
||||
After giving all those talks at conferences, I got also introduced to this amazing group called [dgplug](https://dgplug.org/), which is an acronym for the Durgapur Linux Users Group. It is a group started in-- I don't remember the exact year, but it was around 12 to 13 years back, by someone called Kushal Das, with the ideology of [training students into being better open source contributors](https://foss.training/).
|
||||
|
||||
I liked the idea and got involved with in teaching last year. It is not limited to students. Professionals can also join in. It's about making anyone better at upstream contributions, making things sustainable. I started training people on Vim, on how to use text editors. so they are more efficient and productive. In general life, text editors are a really good tool.
|
||||
|
||||
The other thing was the shell. How do you navigate around the Linux shell and command line? That has been a fun experience.
|
||||
|
||||
**CRAIG BOX: It's very interesting to think about that, because my own involvement with a Linux User Group was probably around the year 2000. And back then we were teaching people how to install things-- Linux on CD was kinda new at that point in time. There was a lot more of, what is this new thing and how do we get involved? When the internet took off around that time, all of that stuff moved online - you no longer needed to go meet a group of people in a room to talk about Linux. And I haven't really given much thought to the concept of a LUG since then, but it's great to see it having turned into something that's now about contributing, rather than just about how you get things going for yourself.**
|
||||
|
||||
NABARUN PAL: Exactly. So as I mentioned earlier, my journey into Linux was installing SUSE from DVDs that came bundled with magazines. Back then it was a pain installing things because you did not get any instructions. There has certainly been a paradigm shift now. People are more open to reading instructions online, downloading ISOs, and then just installing them. So we really don't need to do that as part of LUGs.
|
||||
|
||||
We have shifted more towards enabling people to contribute to whichever project that they use. For example, if you're using Fedora, contribute to Fedora; make things better. It's just about giving back to the community in any way possible.
|
||||
|
||||
**CRAIG BOX: You're also involved in the [Kubernetes Bangalore meetup group](https://www.meetup.com/Bangalore-Kubernetes-Meetup/). Does that group have a similar mentality?**
|
||||
|
||||
NABARUN PAL: The Kubernetes Bangalore meetup group is essentially focused towards spreading the knowledge of Kubernetes and the aligned products in the ecosystem, whatever there is in the Cloud Native Landscape, in various ways. For example, to evangelize about using them in your company or how people use them in existing ways.
|
||||
|
||||
So a few months back in February, we did something like a [Kubernetes contributor workshop](https://www.youtube.com/watch?v=FgsXbHBRYIc). It was one of its kind in India. It was the first one if I recall correctly. We got a lot of traction and community members interested in contributing to Kubernetes and a lot of other projects. And this is becoming a really valuable thing.
|
||||
|
||||
I'm not much involved in the organization of the group. There are really great people already organizing it. I keep on being around and attending the meetups and trying to answer any questions if people have any.
|
||||
|
||||
**CRAIG BOX: One way that it is possible to contribute to the Kubernetes ecosystem is through the release process. You've [written a blog](https://blog.naba.run/posts/release-enhancements-journey/) which talks about your journey through that. It started in Kubernetes 1.17, where you took a shadow role for that release. Tell me about what it was like to first take that plunge.**
|
||||
|
||||
NABARUN PAL: Taking the plunge was a big step, I would say. It should not have been that way. After getting into the team, I saw that it is really encouraged that you should just apply to the team - but then write truthfully about yourself. What do you want? Write your passionate goal, why you want to be in the team.
|
||||
|
||||
So even right now the shadow applications are open for the next release. I wanted to give that a small shoutout. If you want to contribute to the Kubernetes release team, please do apply. The form is pretty simple. You just need to say why do you want to contribute to the release team.
|
||||
|
||||
**CRAIG BOX: What was your answer to that question?**
|
||||
|
||||
NABARUN PAL: It was a bit tricky. I have this philosophy of contributing to projects that I use in my day-to-day life. I use a lot of open source projects daily, and I started contributing to Kubernetes primarily because I was using the Kubernetes Python client. That was one of my first contributions.
|
||||
|
||||
When I was contributing to that, I explored the release team and it interested me a lot, particularly how interesting and varied the mechanics of releasing Kubernetes are. For most software projects, it's usually whenever you decide that you have made meaningful progress in terms of features, you release it. But Kubernetes is not like that. We follow a regular release cadence. And all those aspects really interested me. I actually applied for the first time in Kubernetes 1.16, but got rejected.
|
||||
|
||||
But I still applied to Kubernetes 1.17, and I got into the enhancements team. That team was led by [MrBobbyTables, Bob Killen](https://kubernetespodcast.com/episode/126-research-steering-honking/), back then, and [Jeremy Rickard](https://kubernetespodcast.com/episode/131-kubernetes-1.20/) was one of my co-shadows in the team. I shadowed enhancements again. Then I lead enhancements in 1.19. I then shadowed the lead in 1.20 and eventually led the 1.21 team. That's what my journey has been.
|
||||
|
||||
My suggestion to people is don't be afraid of failure. Even if you don't get selected, it's perfectly fine. You can still contribute to the release team. Just hop on the release calls, raise your hand, and introduce yourself.
|
||||
|
||||
**CRAIG BOX: Between the 1.20 and 1.21 releases, you moved to work on the upstream contribution team at VMware. I've noticed that VMware is hiring a lot of great upstream contributors at the moment. Is this something that [Stephen Augustus](https://kubernetespodcast.com/episode/130-kubecon-na-2020/) had his fingerprints all over? Is there something in the water?**
|
||||
|
||||
NABARUN PAL: A lot of people have fingerprints on this process. Stephen certainly had his fingerprints on it, I would say. We are expanding the team of upstream contributors primarily because the product that we are working for is based on Kubernetes. It helps us a lot in driving processes upstream and helping out the community as a whole, because everyone then gets enabled and benefits from what we contribute to the community.
|
||||
|
||||
**CRAIG BOX: I understand that the Tanzu team is being built out in India at the moment, but I guess you probably haven't been able to meet them in person yet?**
|
||||
|
||||
NABARUN PAL: Yes and no. I did not meet any of them after joining VMware, but I met a lot of my teammates, before I joined VMware, at KubeCons. For example, I met Nikhita, I met Dims, I met Stephen at KubeCon. I am yet to meet other members of the team and I'm really excited to catch up with them once everything comes out of lockdown and we go back to our normal lives.
|
||||
|
||||
**CRAIG BOX: Yes, everyone that I speak to who has changed jobs in the pandemic says it's a very odd experience, just nothing really being different. And the same perhaps for people who are working on open source moving companies as well. They're doing the same thing, perhaps just for a different employer.**
|
||||
|
||||
NABARUN PAL: As we say in the community, see you in another Slack in some time.
|
||||
|
||||
**CRAIG BOX: We now turn to the recent release of Kubernetes 1.21. First of all, congratulations on that.**
|
||||
|
||||
NABARUN PAL: Thank you.
|
||||
|
||||
**CRAIG BOX: [The announcement](https://kubernetes.io/blog/2021/04/08/kubernetes-1-21-release-announcement/) says the release consists of 51 enhancements, 13 graduating to stable, 16 moving to beta, 20 entering alpha, and then two features that have been deprecated. How would you summarize this release?**
|
||||
|
||||
NABARUN PAL: One of the big points for this release is that it is the largest release of all time.
|
||||
|
||||
**CRAIG BOX: Really?**
|
||||
|
||||
NABARUN PAL: Yep. 1.20 was the largest release back then, but 1.21 got more enhancements, primarily due to a lot of changes that we did to the process.
|
||||
|
||||
In the 1.21 release cycle, we did a few things differently compared to other release cycles-- for example, in the enhancement process. An enhancement, in the Kubernetes context, is basically a feature proposal. You will hear the terminology [Kubernetes Enhancement Proposals](https://github.com/kubernetes/enhancements/blob/master/keps/README.md), or KEP, a lot in the community. An enhancement is a broad thing encapsulated in a specific document.
|
||||
|
||||
**CRAIG BOX: I like to think of it as a thing that's worth having a heading in the release notes.**
|
||||
|
||||
NABARUN PAL: Indeed. Until the 1.20 release cycle, what we used to do was-- the release team has a vertical called enhancements. The enhancements team members used to ping each of the enhancement issues and ask whether they want to be part of the release cycle or not. The authors would decide, or talk to their SIG, and then come back with the answer, as to whether they wanted to be part of the cycle.
|
||||
|
||||
In this release, what we did was we eliminated that process and asked the SIGs proactively to discuss amongst themselves, what they wanted to pitch in for this release cycle. What set of features did they want to graduate this release? They may introduce things in alpha, graduate things to beta or stable, or they may also deprecate features.
|
||||
|
||||
What this did was promote a lot of async processes, and at the same time, give power back to the community. The community decides what they want in the release and then comes back collectively. It also reduces a lot of stress on the release team who previously had to ask people consistently what they wanted to pitch in for the release. You now have a deadline. You discuss amongst your SIG what your roadmap is and what it looks like for the near future. Maybe this release, and the next two. And you put all of those answers into a Google spreadsheet. Spreadsheets are still a thing.
|
||||
|
||||
**CRAIG BOX: The Kubernetes ecosystem runs entirely on Google Spreadsheets.**
|
||||
|
||||
NABARUN PAL: It does, and a lot of Google Docs for meeting notes! We did a lot of process improvements, which essentially led to a better release. This release cycle we had 13 enhancements graduating to stable, 16 which moved to beta, and 20 enhancements which were net new features into the ecosystem, and came in as alpha.
|
||||
|
||||
Along with that are features set for deprecation. One of them was PodSecurityPolicy. That has been a point of discussion in the Kubernetes user base and we also published [a blog post about it](https://kubernetes.io/blog/2021/04/06/podsecuritypolicy-deprecation-past-present-and-future/). All credit to SIG Security who have been on top of things as to find a replacement for PodSecurityPolicy even before this release cycle ended, so that they could at least have a proposal of what will happen next.
|
||||
|
||||
**CRAIG BOX: Let's talk about some old things and some new things. You mentioned PodSecurityPolicy there. That's a thing that's been around a long time and is being deprecated. Two things that have been around a long time and that are now being promoted to stable are CronJobs and PodDisruptionBudgets, both of which were introduced in Kubernetes 1.4, which came out in 2016. Why do you think it took so long for them both to go stable?**
|
||||
|
||||
NABARUN PAL: I might not have a definitive answer to your question. One of the things that I feel is they might be already so good that nobody saw that they were beta features, and just kept on using them.
|
||||
|
||||
One of the things that I noticed when reading for the CronJobs graduation from beta to stable was the new controller. Users might not see this, but there has been a drastic change in the CronJob controller v2. What it essentially does is goes from a poll-based method of checking what users have defined as CronJobs to a queue architecture, which is the modern method of defining controllers. That has been one of the really good improvements in the case of CronJobs. Instead of the controller working in O(N) time, you now have constant time complexity.
|
||||
|
||||
**CRAIG BOX: A lot of these features that have been in beta for a long time, like you say, people have an expectation that they are complete. With PodSecurityPolicy, it's being deprecated, which is allowed because it's a feature that never made it out of beta. But how do you think people will react to it going away? And does that say something about the need for the process to make sure that features don't just languish in beta forever, which has been introduced recently?**
|
||||
|
||||
NABARUN PAL: That's true. One of the driving factors, when contributors are thinking of graduating beta features has been the ["prevention of perma-beta" KEP](https://github.com/kubernetes/enhancements/blob/master/keps/sig-architecture/1635-prevent-permabeta/README.md). Back in 1.19 we [introduced this process](https://kubernetes.io/blog/2020/08/21/moving-forward-from-beta/) where each of the beta resources were marked for deprecation and removal in a certain time frame-- three releases for deprecation and another release for removal. That's also a motivating factor for eventually rethinking as to how beta resources work for us in the community. That is also very effective, I would say.
|
||||
|
||||
**CRAIG BOX: Do remember that Gmail was in beta for eight years.**
|
||||
|
||||
NABARUN PAL: I did not know that!
|
||||
|
||||
**CRAIG BOX: Nothing in Kubernetes is quite that old yet, but we'll get there. Of the 20 new enhancements, do you have a favorite or any that you'd like to call out?**
|
||||
|
||||
NABARUN PAL: There are two specific features in 1.21 that I'm really interested in, and are coming as net new features. One of them is the [persistent volume health monitor](https://github.com/kubernetes/enhancements/tree/master/keps/sig-storage/1432-volume-health-monitor), which gives the users the capability to actually see whether the backing volumes, which power persistent volumes in Kubernetes, are deleted or not. For example, the volumes may get deleted due to an inadvertent event, or they may get corrupted. That information is basically surfaced out as a field so that the user can leverage it in any way.
|
||||
|
||||
The other feature is the proposal for [adding headers with the command name to kubectl requests](https://github.com/kubernetes/enhancements/tree/master/keps/sig-cli/859-kubectl-headers). We have always set the user-agent information when doing those kind of requests, but the proposal is to add what command the user put in so that we can enable more telemetry, and cluster administrators can determine the usage patterns of how people are using the cluster. I'm really excited about these kind of features coming into play.
|
||||
|
||||
**CRAIG BOX: You're the first release lead from the Asia-Pacific region, or more accurately, outside of the US and Europe. Most meetings in the Kubernetes ecosystem are traditionally in the window of overlap between the US and Europe, in the morning in California and the evening here in the UK. What's it been like to work outside of the time zones that the community had previously been operating in?**
|
||||
|
||||
NABARUN PAL: It has been a fun and a challenging proposition, I would say. In the last two-ish years that I have been contributing to Kubernetes, the community has also transformed from a lot of early morning Pacific calls to more towards async processes. For example, we in the release team have transformed our processes so we don't do updates in the calls anymore. What we do is ask for updates ahead of time, and then in the call, we just discuss things which need to be discussed synchronously in the team.
|
||||
|
||||
We leverage the meetings right now more for discussions. But we also don't come to decisions in those discussions, because if any stakeholder is not present on the call, it puts them at a disadvantage. We are trying to talk more on Slack, publicly, or talk on mailing lists. That's where most of the discussion should happen, and also to gain lazy consensus. What I mean by lazy consensus is come up with a pre-decision kind of thing, but then also invite feedback from the broader community about what people would like them to see about that specific thing being discussed. This is where we as a community are also transforming a lot, but there is a lot more headroom to grow.
|
||||
|
||||
The release team also started to have EU/APAC burndown meetings. In addition to having one meeting focused towards the US and European time zones, we also do a meeting which is more suited towards European and Asia-Pacific time zones. One of the driving factors for those decisions was that the release team is seeing a lot of participation from a variety of time zones. To give you one metric, we had release team members this cycle from UTC+8 all through UTC-8 - 16 hours of span. It's really difficult to accommodate all of those zones in a single meeting. And it's not just those 16 hours of span - what about the other eight hours?
|
||||
|
||||
**CRAIG BOX: Yeah, you're missing New Zealand. You could add another 5 hours of span right there.**
|
||||
|
||||
NABARUN PAL: Exactly. So we will always miss people in meetings, and that's why we should also innovate more, have different kinds of meetings. But that also may not be very sustainable in the future. Will people attend duplicate meetings? Will people follow both of the meetings? More meetings is one of the solutions.
|
||||
|
||||
The other solution is you have threaded discussions on some medium, be it Slack or be it a mailing list. Then, people can just pitch in whenever it is work time for them. Then, at the end of the day, a 24-hour rolling period, you digest it, and then push it out as meeting notes. That's what the Contributor Experience Special Interest Group is doing - shout-out to them for moving to that process. I may be wrong here, but I think once every two weeks, they do async updates on Slack. And that is a really nice thing to have, improving variety of geographies that people can contribute from.
|
||||
|
||||
**CRAIG BOX: Once you've put everything together that you hope to be in your release, you create a release candidate build. How do you motivate people to test those?**
|
||||
|
||||
NABARUN PAL: That's a very interesting question. It is difficult for us to motivate people into trying out these candidates. It's mostly people who are passionate about Kubernetes who try out the release candidates and see for themselves what the bugs are. I remember [Dims tweeting out a call](https://twitter.com/dims/status/1377272238420934656) that if somebody tries out the release candidate and finds a good bug or caveat, they could get a callout in the KubeCon keynote. That's one of the incentives - if you want to be called out in a KubeCon keynote, please try our release candidates.
|
||||
|
||||
**CRAIG BOX: Or get a new pair of Kubernetes socks?**
|
||||
|
||||
NABARUN PAL: We would love to give out goodies to people who try out our release candidates and find bugs. For example, if you want the brand new release team logo as a sticker, just hit me up. If you find a bug in a 1.22 release candidate, I would love to be able to send you some coupon codes for the store. Don't quote me on this, but do reach out.
|
||||
|
||||
**CRAIG BOX: Now the release is out, is it time for you to put your feet up? What more things do you have to do, and how do you feel about the path ahead for yourself?**
|
||||
|
||||
NABARUN PAL: I was discussing this with the team yesterday. Even after the release, we had kind of a water-cooler conversation. I just pasted in a Zoom link to all the release team members and said, hey, do you want to chat? One of the things that I realized that I'm really missing is the daily burndowns right now. I will be around in the release team and the SIG Release meetings, helping out the new lead in transitioning. And even my job, right now, is not over. I'm working with Taylor, who is the emeritus advisor for 1.21, on figuring out some of the mechanics for the next release cycle. I'm also documenting what all we did as part of the process and as part of the process changes, and making sure the next release cycle is up and running.
|
||||
|
||||
**CRAIG BOX: We've done a lot of these release lead interviews now, and there's a question which we always like to ask, which is, what will you write down in the transition envelope? Savitha Raghunathan is the release lead for 1.22. What is the advice that you will pass on to her?**
|
||||
|
||||
NABARUN PAL: Three words-- **Do, Delegate, and Defer**. Categorize things into those three buckets as to what you should do right away, what you need to defer, and things that you can delegate to your shadows or other release team members. That's one of the mantras that works really well when leading a team. It is not just in the context of the release team, but it's in the context of managing any team.
|
||||
|
||||
The other bit is **over-communicate**. No amount of communication is enough. What I've realized is the community is always willing to help you. One of the big examples that I can give is the day before release was supposed to happen, we were seeing a lot of test failures, and then one of the community members had an idea-- why don't you just send an email? I was like, "that sounds good. We can send an email mentioning all the flakes and call out for help to the broader Kubernetes developer community." And eventually, once we sent out the email, lots of people came in to help us in de-flaking the tests and trying to find out the root cause as to why those tests were failing so often. Big shout out to Antonio and all the SIG Network folks who came to pitch in.
|
||||
|
||||
No matter how many names I mention, it will never be enough. A lot of people, even outside the release team, have helped us a lot with this release. And that's where the release theme comes in - **Power to the Community**. I'm really stoked by how this community behaves and how people are willing to help you all the time. It's not about what they're telling you to do, but it's what they're also interested in, they're passionate about.
|
||||
|
||||
**CRAIG BOX: One of the things you're passionate about is Formula One. Do you think Lewis Hamilton is going to take it away this year?**
|
||||
|
||||
NABARUN PAL: It's a fair probability that Lewis will win the title this year as well.
|
||||
|
||||
**CRAIG BOX: Which would take him to eight all time career wins. And thus-- [he's currently tied with Michael Schumacher](https://www.nytimes.com/2020/11/15/sports/autoracing/lewis-hamilton-schumacher-formula-one-record.html)-- would pull him ahead.**
|
||||
|
||||
NABARUN PAL: Yes. Michael Schumacher was my first favorite F1 driver, I would say. It feels a bit heartbreaking to see someone break Michael's record.
|
||||
|
||||
**CRAIG BOX: How do you feel about [Michael Schumacher's son joining the contest?](https://www.formula1.com/en/latest/article.breaking-mick-schumacher-to-race-for-haas-in-2021-as-famous-surname-returns.66XTVfSt80GrZe91lvWVwJ.html)**
|
||||
|
||||
NABARUN PAL: I feel good. Mick Schumacher is in the fray right now. And I wish we could see him, in a few years, in a Ferrari. The Schumacher family back to Ferrari would be really great to see. But then, my fan favorite has always been McLaren, partly because I like the chemistry of Lando and Carlos over the last two years. It was heartbreaking to see Carlos go to Ferrari. But then we have Lando and Daniel Ricciardo in the team. They're also fun people.
|
||||
|
||||
---
|
||||
|
||||
_[Nabarun Pal](https://twitter.com/theonlynabarun) is on the Tanzu team at VMware and served as the Kubernetes 1.21 release team lead._
|
||||
|
||||
_You can find the [Kubernetes Podcast from Google](http://www.kubernetespodcast.com/) at [@KubernetesPod](https://twitter.com/KubernetesPod) on Twitter, and you can [subscribe](https://kubernetespodcast.com/subscribe/) so you never miss an episode._
|
Before Width: | Height: | Size: 13 KiB |
Before Width: | Height: | Size: 30 KiB |
Before Width: | Height: | Size: 28 KiB |
Before Width: | Height: | Size: 19 KiB |
Before Width: | Height: | Size: 18 KiB |
Before Width: | Height: | Size: 19 KiB |
Before Width: | Height: | Size: 20 KiB |
|
@ -13,7 +13,7 @@ cid: community
|
|||
<div class="intro">
|
||||
<br class="mobile">
|
||||
<p>The Kubernetes community -- users, contributors, and the culture we've built together -- is one of the biggest reasons for the meteoric rise of this open source project. Our culture and values continue to grow and change as the project itself grows and changes. We all work together toward constant improvement of the project and the ways we work on it.
|
||||
<br><br>We are the people who file issues and pull requests, attend SIG meetings, Kubernetes meetups, and KubeCon, advocate for it's adoption and innovation, run <code>kubectl get pods</code>, and contribute in a thousand other vital ways. Read on to learn how you can get involved and become part of this amazing community.</p>
|
||||
<br><br>We are the people who file issues and pull requests, attend SIG meetings, Kubernetes meetups, and KubeCon, advocate for its adoption and innovation, run <code>kubectl get pods</code>, and contribute in a thousand other vital ways. Read on to learn how you can get involved and become part of this amazing community.</p>
|
||||
<br class="mobile">
|
||||
</div>
|
||||
|
||||
|
|
|
@ -210,7 +210,7 @@ To upgrade a HA control plane to use the cloud controller manager, see [Migrate
|
|||
|
||||
Want to know how to implement your own cloud controller manager, or extend an existing project?
|
||||
|
||||
The cloud controller manager uses Go interfaces to allow implementations from any cloud to be plugged in. Specifically, it uses the `CloudProvider` interface defined in [`cloud.go`](https://github.com/kubernetes/cloud-provider/blob/release-1.17/cloud.go#L42-L62) from [kubernetes/cloud-provider](https://github.com/kubernetes/cloud-provider).
|
||||
The cloud controller manager uses Go interfaces to allow implementations from any cloud to be plugged in. Specifically, it uses the `CloudProvider` interface defined in [`cloud.go`](https://github.com/kubernetes/cloud-provider/blob/release-1.21/cloud.go#L42-L69) from [kubernetes/cloud-provider](https://github.com/kubernetes/cloud-provider).
|
||||
|
||||
The implementation of the shared controllers highlighted in this document (Node, Route, and Service), and some scaffolding along with the shared cloudprovider interface, is part of the Kubernetes core. Implementations specific to cloud providers are outside the core of Kubernetes and implement the `CloudProvider` interface.
|
||||
|
||||
|
|
|
@ -159,11 +159,12 @@ You can run your own controller as a set of Pods,
|
|||
or externally to Kubernetes. What fits best will depend on what that particular
|
||||
controller does.
|
||||
|
||||
|
||||
|
||||
## {{% heading "whatsnext" %}}
|
||||
|
||||
* Read about the [Kubernetes control plane](/docs/concepts/overview/components/#control-plane-components)
|
||||
* Discover some of the basic [Kubernetes objects](/docs/concepts/overview/working-with-objects/kubernetes-objects/)
|
||||
* Learn more about the [Kubernetes API](/docs/concepts/overview/kubernetes-api/)
|
||||
* If you want to write your own controller, see [Extension Patterns](/docs/concepts/extend-kubernetes/extend-cluster/#extension-patterns) in Extending Kubernetes.
|
||||
* If you want to write your own controller, see
|
||||
[Extension Patterns](/docs/concepts/extend-kubernetes/#extension-patterns)
|
||||
in Extending Kubernetes.
|
||||
|
||||
|
|
|
@ -0,0 +1,164 @@
|
|||
---
|
||||
title: Garbage Collection
|
||||
content_type: concept
|
||||
weight: 50
|
||||
---
|
||||
|
||||
<!-- overview -->
|
||||
{{<glossary_definition term_id="garbage-collection" length="short">}} This
|
||||
allows the clean up of resources like the following:
|
||||
|
||||
* [Failed pods](/docs/concepts/workloads/pods/pod-lifecycle/#pod-garbage-collection)
|
||||
* [Completed Jobs](/docs/concepts/workloads/controllers/ttlafterfinished/)
|
||||
* [Objects without owner references](#owners-dependents)
|
||||
* [Unused containers and container images](#containers-images)
|
||||
* [Dynamically provisioned PersistentVolumes with a StorageClass reclaim policy of Delete](/docs/concepts/storage/persistent-volumes/#delete)
|
||||
* [Stale or expired CertificateSigningRequests (CSRs)](/reference/access-authn-authz/certificate-signing-requests/#request-signing-process)
|
||||
* {{<glossary_tooltip text="Nodes" term_id="node">}} deleted in the following scenarios:
|
||||
* On a cloud when the cluster uses a [cloud controller manager](/docs/concepts/architecture/cloud-controller/)
|
||||
* On-premises when the cluster uses an addon similar to a cloud controller
|
||||
manager
|
||||
* [Node Lease objects](/docs/concepts/architecture/nodes/#heartbeats)
|
||||
|
||||
## Owners and dependents {#owners-dependents}
|
||||
|
||||
Many objects in Kubernetes link to each other through [*owner references*](/docs/concepts/overview/working-with-objects/owners-dependents/).
|
||||
Owner references tell the control plane which objects are dependent on others.
|
||||
Kubernetes uses owner references to give the control plane, and other API
|
||||
clients, the opportunity to clean up related resources before deleting an
|
||||
object. In most cases, Kubernetes manages owner references automatically.
|
||||
|
||||
Ownership is different from the [labels and selectors](/docs/concepts/overview/working-with-objects/labels/)
|
||||
mechanism that some resources also use. For example, consider a
|
||||
{{<glossary_tooltip text="Service" term_id="service">}} that creates
|
||||
`EndpointSlice` objects. The Service uses *labels* to allow the control plane to
|
||||
determine which `EndpointSlice` objects are used for that Service. In addition
|
||||
to the labels, each `EndpointSlice` that is managed on behalf of a Service has
|
||||
an owner reference. Owner references help different parts of Kubernetes avoid
|
||||
interfering with objects they don’t control.
|
||||
|
||||
## Cascading deletion {#cascading-deletion}
|
||||
|
||||
Kubernetes checks for and deletes objects that no longer have owner
|
||||
references, like the pods left behind when you delete a ReplicaSet. When you
|
||||
delete an object, you can control whether Kubernetes deletes the object's
|
||||
dependents automatically, in a process called *cascading deletion*. There are
|
||||
two types of cascading deletion, as follows:
|
||||
|
||||
* Foreground cascading deletion
|
||||
* Background cascading deletion
|
||||
|
||||
You can also control how and when garbage collection deletes resources that have
|
||||
owner references using Kubernetes {{<glossary_tooltip text="finalizers" term_id="finalizer">}}.
|
||||
|
||||
### Foreground cascading deletion {#foreground-deletion}
|
||||
|
||||
In foreground cascading deletion, the owner object you're deleting first enters
|
||||
a *deletion in progress* state. In this state, the following happens to the
|
||||
owner object:
|
||||
|
||||
* The Kubernetes API server sets the object's `metadata.deletionTimestamp`
|
||||
field to the time the object was marked for deletion.
|
||||
* The Kubernetes API server also sets the `metadata.finalizers` field to
|
||||
`foregroundDeletion`.
|
||||
* The object remains visible through the Kubernetes API until the deletion
|
||||
process is complete.
|
||||
|
||||
After the owner object enters the deletion in progress state, the controller
|
||||
deletes the dependents. After deleting all the dependent objects, the controller
|
||||
deletes the owner object. At this point, the object is no longer visible in the
|
||||
Kubernetes API.
|
||||
|
||||
During foreground cascading deletion, the only dependents that block owner
|
||||
deletion are those that have the `ownerReference.blockOwnerDeletion=true` field.
|
||||
See [Use foreground cascading deletion](/docs/tasks/administer-cluster/use-cascading-deletion/#use-foreground-cascading-deletion)
|
||||
to learn more.
|
||||
|
||||
### Background cascading deletion {#background-deletion}
|
||||
|
||||
In background cascading deletion, the Kubernetes API server deletes the owner
|
||||
object immediately and the controller cleans up the dependent objects in
|
||||
the background. By default, Kubernetes uses background cascading deletion unless
|
||||
you manually use foreground deletion or choose to orphan the dependent objects.
|
||||
|
||||
See [Use background cascading deletion](/docs/tasks/administer-cluster/use-cascading-deletion/#use-background-cascading-deletion)
|
||||
to learn more.
|
||||
|
||||
### Orphaned dependents
|
||||
|
||||
When Kubernetes deletes an owner object, the dependents left behind are called
|
||||
*orphan* objects. By default, Kubernetes deletes dependent objects. To learn how
|
||||
to override this behaviour, see [Delete owner objects and orphan dependents](/docs/tasks/administer-cluster/use-cascading-deletion/#set-orphan-deletion-policy).
|
||||
|
||||
## Garbage collection of unused containers and images {#containers-images}
|
||||
|
||||
The {{<glossary_tooltip text="kubelet" term_id="kubelet">}} performs garbage
|
||||
collection on unused images every five minutes and on unused containers every
|
||||
minute. You should avoid using external garbage collection tools, as these can
|
||||
break the kubelet behavior and remove containers that should exist.
|
||||
|
||||
To configure options for unused container and image garbage collection, tune the
|
||||
kubelet using a [configuration file](/docs/tasks/administer-cluster/kubelet-config-file/)
|
||||
and change the parameters related to garbage collection using the
|
||||
[`KubeletConfiguration`](/docs/reference/config-api/kubelet-config.v1beta1/#kubelet-config-k8s-io-v1beta1-KubeletConfiguration)
|
||||
resource type.
|
||||
|
||||
### Container image lifecycle
|
||||
|
||||
Kubernetes manages the lifecycle of all images through its *image manager*,
|
||||
which is part of the kubelet, with the cooperation of cadvisor. The kubelet
|
||||
considers the following disk usage limits when making garbage collection
|
||||
decisions:
|
||||
|
||||
* `HighThresholdPercent`
|
||||
* `LowThresholdPercent`
|
||||
|
||||
Disk usage above the configured `HighThresholdPercent` value triggers garbage
|
||||
collection, which deletes images in order based on the last time they were used,
|
||||
starting with the oldest first. The kubelet deletes images
|
||||
until disk usage reaches the `LowThresholdPercent` value.
|
||||
|
||||
### Container image garbage collection {#container-image-garbage-collection}
|
||||
|
||||
The kubelet garbage collects unused containers based on the following variables,
|
||||
which you can define:
|
||||
|
||||
* `MinAge`: the minimum age at which the kubelet can garbage collect a
|
||||
container. Disable by setting to `0`.
|
||||
* `MaxPerPodContainer`: the maximum number of dead containers each Pod pair
|
||||
can have. Disable by setting to less than `0`.
|
||||
* `MaxContainers`: the maximum number of dead containers the cluster can have.
|
||||
Disable by setting to less than `0`.
|
||||
|
||||
In addition to these variables, the kubelet garbage collects unidentified and
|
||||
deleted containers, typically starting with the oldest first.
|
||||
|
||||
`MaxPerPodContainer` and `MaxContainer` may potentially conflict with each other
|
||||
in situations where retaining the maximum number of containers per Pod
|
||||
(`MaxPerPodContainer`) would go outside the allowable total of global dead
|
||||
containers (`MaxContainers`). In this situation, the kubelet adjusts
|
||||
`MaxPodPerContainer` to address the conflict. A worst-case scenario would be to
|
||||
downgrade `MaxPerPodContainer` to `1` and evict the oldest containers.
|
||||
Additionally, containers owned by pods that have been deleted are removed once
|
||||
they are older than `MinAge`.
|
||||
|
||||
{{<note>}}
|
||||
The kubelet only garbage collects the containers it manages.
|
||||
{{</note>}}
|
||||
|
||||
## Configuring garbage collection {#configuring-gc}
|
||||
|
||||
You can tune garbage collection of resources by configuring options specific to
|
||||
the controllers managing those resources. The following pages show you how to
|
||||
configure garbage collection:
|
||||
|
||||
* [Configuring cascading deletion of Kubernetes objects](/docs/tasks/administer-cluster/use-cascading-deletion/)
|
||||
* [Configuring cleanup of finished Jobs](/docs/concepts/workloads/controllers/ttlafterfinished/)
|
||||
|
||||
<!-- * [Configuring unused container and image garbage collection](/docs/tasks/administer-cluster/reconfigure-kubelet/) -->
|
||||
|
||||
## {{% heading "whatsnext" %}}
|
||||
|
||||
* Learn more about [ownership of Kubernetes objects](/docs/concepts/overview/working-with-objects/owners-dependents/).
|
||||
* Learn more about Kubernetes [finalizers](/docs/concepts/overview/working-with-objects/finalizers/).
|
||||
* Learn about the [TTL controller](/docs/concepts/workloads/controllers/ttlafterfinished/) (beta) that cleans up finished Jobs.
|
|
@ -14,7 +14,7 @@ A node may be a virtual or physical machine, depending on the cluster. Each node
|
|||
is managed by the
|
||||
{{< glossary_tooltip text="control plane" term_id="control-plane" >}}
|
||||
and contains the services necessary to run
|
||||
{{< glossary_tooltip text="Pods" term_id="pod" >}}
|
||||
{{< glossary_tooltip text="Pods" term_id="pod" >}}.
|
||||
|
||||
Typically you have several nodes in a cluster; in a learning or resource-limited
|
||||
environment, you might have only one node.
|
||||
|
@ -283,7 +283,7 @@ The node eviction behavior changes when a node in a given availability zone
|
|||
becomes unhealthy. The node controller checks what percentage of nodes in the zone
|
||||
are unhealthy (NodeReady condition is ConditionUnknown or ConditionFalse) at
|
||||
the same time:
|
||||
- If the fraction of unhealthy nodes is at least `--unhealthy-zone-threshold`
|
||||
- If the fraction of unhealthy nodes is at least `--unhealthy-zone-threshold`
|
||||
(default 0.55), then the eviction rate is reduced.
|
||||
- If the cluster is small (i.e. has less than or equal to
|
||||
`--large-cluster-size-threshold` nodes - default 50), then evictions are stopped.
|
||||
|
@ -377,6 +377,21 @@ For example, if `ShutdownGracePeriod=30s`, and
|
|||
for gracefully terminating normal pods, and the last 10 seconds would be
|
||||
reserved for terminating [critical pods](/docs/tasks/administer-cluster/guaranteed-scheduling-critical-addon-pods/#marking-pod-as-critical).
|
||||
|
||||
{{< note >}}
|
||||
When pods were evicted during the graceful node shutdown, they are marked as failed.
|
||||
Running `kubectl get pods` shows the status of the the evicted pods as `Shutdown`.
|
||||
And `kubectl describe pod` indicates that the pod was evicted because of node shutdown:
|
||||
|
||||
```
|
||||
Status: Failed
|
||||
Reason: Shutdown
|
||||
Message: Node is shutting, evicting pods
|
||||
```
|
||||
|
||||
Failed pod objects will be preserved until explicitly deleted or [cleaned up by the GC](/docs/concepts/workloads/pods/pod-lifecycle/#pod-garbage-collection).
|
||||
This is a change of behavior compared to abrupt node termination.
|
||||
{{< /note >}}
|
||||
|
||||
## {{% heading "whatsnext" %}}
|
||||
|
||||
* Learn about the [components](/docs/concepts/overview/components/#node-components) that make up a node.
|
||||
|
|
|
@ -33,8 +33,6 @@ the `--max-requests-inflight` flag without the API Priority and
|
|||
Fairness feature enabled.
|
||||
{{< /caution >}}
|
||||
|
||||
|
||||
|
||||
<!-- body -->
|
||||
|
||||
## Enabling/Disabling API Priority and Fairness
|
||||
|
@ -65,6 +63,7 @@ The command-line flag `--enable-priority-and-fairness=false` will disable the
|
|||
API Priority and Fairness feature, even if other flags have enabled it.
|
||||
|
||||
## Concepts
|
||||
|
||||
There are several distinct features involved in the API Priority and Fairness
|
||||
feature. Incoming requests are classified by attributes of the request using
|
||||
_FlowSchemas_, and assigned to priority levels. Priority levels add a degree of
|
||||
|
@ -75,12 +74,13 @@ each other, and allows for requests to be queued to prevent bursty traffic from
|
|||
causing failed requests when the average load is acceptably low.
|
||||
|
||||
### Priority Levels
|
||||
Without APF enabled, overall concurrency in
|
||||
the API server is limited by the `kube-apiserver` flags
|
||||
`--max-requests-inflight` and `--max-mutating-requests-inflight`. With APF
|
||||
enabled, the concurrency limits defined by these flags are summed and then the sum is divided up
|
||||
among a configurable set of _priority levels_. Each incoming request is assigned
|
||||
to a single priority level, and each priority level will only dispatch as many
|
||||
|
||||
Without APF enabled, overall concurrency in the API server is limited by the
|
||||
`kube-apiserver` flags `--max-requests-inflight` and
|
||||
`--max-mutating-requests-inflight`. With APF enabled, the concurrency limits
|
||||
defined by these flags are summed and then the sum is divided up among a
|
||||
configurable set of _priority levels_. Each incoming request is assigned to a
|
||||
single priority level, and each priority level will only dispatch as many
|
||||
concurrent requests as its configuration allows.
|
||||
|
||||
The default configuration, for example, includes separate priority levels for
|
||||
|
@ -90,6 +90,7 @@ requests cannot prevent leader election or actions by the built-in controllers
|
|||
from succeeding.
|
||||
|
||||
### Queuing
|
||||
|
||||
Even within a priority level there may be a large number of distinct sources of
|
||||
traffic. In an overload situation, it is valuable to prevent one stream of
|
||||
requests from starving others (in particular, in the relatively common case of a
|
||||
|
@ -114,15 +115,18 @@ independent flows will all make progress when total traffic exceeds capacity),
|
|||
tolerance for bursty traffic, and the added latency induced by queuing.
|
||||
|
||||
### Exempt requests
|
||||
|
||||
Some requests are considered sufficiently important that they are not subject to
|
||||
any of the limitations imposed by this feature. These exemptions prevent an
|
||||
improperly-configured flow control configuration from totally disabling an API
|
||||
server.
|
||||
|
||||
## Defaults
|
||||
|
||||
The Priority and Fairness feature ships with a suggested configuration that
|
||||
should suffice for experimentation; if your cluster is likely to
|
||||
experience heavy load then you should consider what configuration will work best. The suggested configuration groups requests into five priority
|
||||
experience heavy load then you should consider what configuration will work
|
||||
best. The suggested configuration groups requests into five priority
|
||||
classes:
|
||||
|
||||
* The `system` priority level is for requests from the `system:nodes` group,
|
||||
|
@ -180,19 +184,18 @@ If you add the following additional FlowSchema, this exempts those
|
|||
requests from rate limiting.
|
||||
|
||||
{{< caution >}}
|
||||
|
||||
Making this change also allows any hostile party to then send
|
||||
health-check requests that match this FlowSchema, at any volume they
|
||||
like. If you have a web traffic filter or similar external security
|
||||
mechanism to protect your cluster's API server from general internet
|
||||
traffic, you can configure rules to block any health check requests
|
||||
that originate from outside your cluster.
|
||||
|
||||
{{< /caution >}}
|
||||
|
||||
{{< codenew file="priority-and-fairness/health-for-strangers.yaml" >}}
|
||||
|
||||
## Resources
|
||||
|
||||
The flow control API involves two kinds of resources.
|
||||
[PriorityLevelConfigurations](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#prioritylevelconfiguration-v1beta1-flowcontrol-apiserver-k8s-io)
|
||||
define the available isolation classes, the share of the available concurrency
|
||||
|
@ -204,6 +207,7 @@ of the same API group, and it has the same Kinds with the same syntax and
|
|||
semantics.
|
||||
|
||||
### PriorityLevelConfiguration
|
||||
|
||||
A PriorityLevelConfiguration represents a single isolation class. Each
|
||||
PriorityLevelConfiguration has an independent limit on the number of outstanding
|
||||
requests, and limitations on the number of queued requests.
|
||||
|
@ -217,6 +221,7 @@ server by restarting `kube-apiserver` with a different value for
|
|||
`--max-requests-inflight` (or `--max-mutating-requests-inflight`), and all
|
||||
PriorityLevelConfigurations will see their maximum allowed concurrency go up (or
|
||||
down) by the same fraction.
|
||||
|
||||
{{< caution >}}
|
||||
With the Priority and Fairness feature enabled, the total concurrency limit for
|
||||
the server is set to the sum of `--max-requests-inflight` and
|
||||
|
@ -235,8 +240,8 @@ above the threshold will be queued, with the shuffle sharding and fair queuing t
|
|||
to balance progress between request flows.
|
||||
|
||||
The queuing configuration allows tuning the fair queuing algorithm for a
|
||||
priority level. Details of the algorithm can be read in the [enhancement
|
||||
proposal](#whats-next), but in short:
|
||||
priority level. Details of the algorithm can be read in the
|
||||
[enhancement proposal](#whats-next), but in short:
|
||||
|
||||
* Increasing `queues` reduces the rate of collisions between different flows, at
|
||||
the cost of increased memory usage. A value of 1 here effectively disables the
|
||||
|
@ -249,15 +254,15 @@ proposal](#whats-next), but in short:
|
|||
* Changing `handSize` allows you to adjust the probability of collisions between
|
||||
different flows and the overall concurrency available to a single flow in an
|
||||
overload situation.
|
||||
{{< note >}}
|
||||
A larger `handSize` makes it less likely for two individual flows to collide
|
||||
(and therefore for one to be able to starve the other), but more likely that
|
||||
a small number of flows can dominate the apiserver. A larger `handSize` also
|
||||
potentially increases the amount of latency that a single high-traffic flow
|
||||
can cause. The maximum number of queued requests possible from a
|
||||
single flow is `handSize * queueLengthLimit`.
|
||||
{{< /note >}}
|
||||
|
||||
{{< note >}}
|
||||
A larger `handSize` makes it less likely for two individual flows to collide
|
||||
(and therefore for one to be able to starve the other), but more likely that
|
||||
a small number of flows can dominate the apiserver. A larger `handSize` also
|
||||
potentially increases the amount of latency that a single high-traffic flow
|
||||
can cause. The maximum number of queued requests possible from a
|
||||
single flow is `handSize * queueLengthLimit`.
|
||||
{{< /note >}}
|
||||
|
||||
Following is a table showing an interesting collection of shuffle
|
||||
sharding configurations, showing for each the probability that a
|
||||
|
@ -319,6 +324,7 @@ considered part of a single flow. The correct choice for a given FlowSchema
|
|||
depends on the resource and your particular environment.
|
||||
|
||||
## Diagnostics
|
||||
|
||||
Every HTTP response from an API server with the priority and fairness feature
|
||||
enabled has two extra headers: `X-Kubernetes-PF-FlowSchema-UID` and
|
||||
`X-Kubernetes-PF-PriorityLevel-UID`, noting the flow schema that matched the request
|
||||
|
@ -356,13 +362,14 @@ poorly-behaved workloads that may be harming system health.
|
|||
matched the request), `priority_level` (indicating the one to which
|
||||
the request was assigned), and `reason`. The `reason` label will be
|
||||
have one of the following values:
|
||||
* `queue-full`, indicating that too many requests were already
|
||||
queued,
|
||||
* `concurrency-limit`, indicating that the
|
||||
PriorityLevelConfiguration is configured to reject rather than
|
||||
queue excess requests, or
|
||||
* `time-out`, indicating that the request was still in the queue
|
||||
when its queuing time limit expired.
|
||||
|
||||
* `queue-full`, indicating that too many requests were already
|
||||
queued,
|
||||
* `concurrency-limit`, indicating that the
|
||||
PriorityLevelConfiguration is configured to reject rather than
|
||||
queue excess requests, or
|
||||
* `time-out`, indicating that the request was still in the queue
|
||||
when its queuing time limit expired.
|
||||
|
||||
* `apiserver_flowcontrol_dispatched_requests_total` is a counter
|
||||
vector (cumulative since server start) of requests that began
|
||||
|
@ -405,6 +412,10 @@ poorly-behaved workloads that may be harming system health.
|
|||
queue) requests, broken down by the labels `priority_level` and
|
||||
`flow_schema`.
|
||||
|
||||
* `apiserver_flowcontrol_request_concurrency_in_use` is a gauge vector
|
||||
holding the instantaneous number of occupied seats, broken down by
|
||||
the labels `priority_level` and `flow_schema`.
|
||||
|
||||
* `apiserver_flowcontrol_priority_level_request_count_samples` is a
|
||||
histogram vector of observations of the then-current number of
|
||||
requests broken down by the labels `phase` (which takes on the
|
||||
|
@ -430,14 +441,15 @@ poorly-behaved workloads that may be harming system health.
|
|||
sample to its histogram, reporting the length of the queue immediately
|
||||
after the request was added. Note that this produces different
|
||||
statistics than an unbiased survey would.
|
||||
{{< note >}}
|
||||
An outlier value in a histogram here means it is likely that a single flow
|
||||
(i.e., requests by one user or for one namespace, depending on
|
||||
configuration) is flooding the API server, and being throttled. By contrast,
|
||||
if one priority level's histogram shows that all queues for that priority
|
||||
level are longer than those for other priority levels, it may be appropriate
|
||||
to increase that PriorityLevelConfiguration's concurrency shares.
|
||||
{{< /note >}}
|
||||
|
||||
{{< note >}}
|
||||
An outlier value in a histogram here means it is likely that a single flow
|
||||
(i.e., requests by one user or for one namespace, depending on
|
||||
configuration) is flooding the API server, and being throttled. By contrast,
|
||||
if one priority level's histogram shows that all queues for that priority
|
||||
level are longer than those for other priority levels, it may be appropriate
|
||||
to increase that PriorityLevelConfiguration's concurrency shares.
|
||||
{{< /note >}}
|
||||
|
||||
* `apiserver_flowcontrol_request_concurrency_limit` is a gauge vector
|
||||
holding the computed concurrency limit (based on the API server's
|
||||
|
@ -450,12 +462,13 @@ poorly-behaved workloads that may be harming system health.
|
|||
`priority_level` (indicating the one to which the request was
|
||||
assigned), and `execute` (indicating whether the request started
|
||||
executing).
|
||||
{{< note >}}
|
||||
Since each FlowSchema always assigns requests to a single
|
||||
PriorityLevelConfiguration, you can add the histograms for all the
|
||||
FlowSchemas for one priority level to get the effective histogram for
|
||||
requests assigned to that priority level.
|
||||
{{< /note >}}
|
||||
|
||||
{{< note >}}
|
||||
Since each FlowSchema always assigns requests to a single
|
||||
PriorityLevelConfiguration, you can add the histograms for all the
|
||||
FlowSchemas for one priority level to get the effective histogram for
|
||||
requests assigned to that priority level.
|
||||
{{< /note >}}
|
||||
|
||||
* `apiserver_flowcontrol_request_execution_seconds` is a histogram
|
||||
vector of how long requests took to actually execute, broken down by
|
||||
|
@ -465,14 +478,19 @@ poorly-behaved workloads that may be harming system health.
|
|||
|
||||
### Debug endpoints
|
||||
|
||||
When you enable the API Priority and Fairness feature, the kube-apiserver serves the following additional paths at its HTTP[S] ports.
|
||||
When you enable the API Priority and Fairness feature, the `kube-apiserver`
|
||||
serves the following additional paths at its HTTP[S] ports.
|
||||
|
||||
- `/debug/api_priority_and_fairness/dump_priority_levels` - a listing of
|
||||
all the priority levels and the current state of each. You can fetch like this:
|
||||
|
||||
- `/debug/api_priority_and_fairness/dump_priority_levels` - a listing of all the priority levels and the current state of each. You can fetch like this:
|
||||
```shell
|
||||
kubectl get --raw /debug/api_priority_and_fairness/dump_priority_levels
|
||||
```
|
||||
|
||||
The output is similar to this:
|
||||
```
|
||||
|
||||
```none
|
||||
PriorityLevelName, ActiveQueues, IsIdle, IsQuiescing, WaitingRequests, ExecutingRequests,
|
||||
workload-low, 0, true, false, 0, 0,
|
||||
global-default, 0, true, false, 0, 0,
|
||||
|
@ -483,12 +501,16 @@ When you enable the API Priority and Fairness feature, the kube-apiserver serves
|
|||
workload-high, 0, true, false, 0, 0,
|
||||
```
|
||||
|
||||
- `/debug/api_priority_and_fairness/dump_queues` - a listing of all the queues and their current state. You can fetch like this:
|
||||
- `/debug/api_priority_and_fairness/dump_queues` - a listing of all the
|
||||
queues and their current state. You can fetch like this:
|
||||
|
||||
```shell
|
||||
kubectl get --raw /debug/api_priority_and_fairness/dump_queues
|
||||
```
|
||||
|
||||
The output is similar to this:
|
||||
```
|
||||
|
||||
```none
|
||||
PriorityLevelName, Index, PendingRequests, ExecutingRequests, VirtualStart,
|
||||
workload-high, 0, 0, 0, 0.0000,
|
||||
workload-high, 1, 0, 0, 0.0000,
|
||||
|
@ -498,25 +520,33 @@ When you enable the API Priority and Fairness feature, the kube-apiserver serves
|
|||
leader-election, 15, 0, 0, 0.0000,
|
||||
```
|
||||
|
||||
- `/debug/api_priority_and_fairness/dump_requests` - a listing of all the requests that are currently waiting in a queue. You can fetch like this:
|
||||
- `/debug/api_priority_and_fairness/dump_requests` - a listing of all the requests
|
||||
that are currently waiting in a queue. You can fetch like this:
|
||||
|
||||
```shell
|
||||
kubectl get --raw /debug/api_priority_and_fairness/dump_requests
|
||||
```
|
||||
|
||||
The output is similar to this:
|
||||
```
|
||||
|
||||
```none
|
||||
PriorityLevelName, FlowSchemaName, QueueIndex, RequestIndexInQueue, FlowDistingsher, ArriveTime,
|
||||
exempt, <none>, <none>, <none>, <none>, <none>,
|
||||
system, system-nodes, 12, 0, system:node:127.0.0.1, 2020-07-23T15:26:57.179170694Z,
|
||||
```
|
||||
|
||||
In addition to the queued requests, the output includes one phantom line for each priority level that is exempt from limitation.
|
||||
In addition to the queued requests, the output includes one phantom line
|
||||
for each priority level that is exempt from limitation.
|
||||
|
||||
You can get a more detailed listing with a command like this:
|
||||
|
||||
```shell
|
||||
kubectl get --raw '/debug/api_priority_and_fairness/dump_requests?includeRequestDetails=1'
|
||||
```
|
||||
|
||||
The output is similar to this:
|
||||
```
|
||||
|
||||
```none
|
||||
PriorityLevelName, FlowSchemaName, QueueIndex, RequestIndexInQueue, FlowDistingsher, ArriveTime, UserName, Verb, APIPath, Namespace, Name, APIVersion, Resource, SubResource,
|
||||
system, system-nodes, 12, 0, system:node:127.0.0.1, 2020-07-23T15:31:03.583823404Z, system:node:127.0.0.1, create, /api/v1/namespaces/scaletest/configmaps,
|
||||
system, system-nodes, 12, 1, system:node:127.0.0.1, 2020-07-23T15:31:03.594555947Z, system:node:127.0.0.1, create, /api/v1/namespaces/scaletest/configmaps,
|
||||
|
@ -528,4 +558,4 @@ When you enable the API Priority and Fairness feature, the kube-apiserver serves
|
|||
For background information on design details for API priority and fairness, see
|
||||
the [enhancement proposal](https://github.com/kubernetes/enhancements/tree/master/keps/sig-api-machinery/1040-priority-and-fairness).
|
||||
You can make suggestions and feature requests via [SIG API Machinery](https://github.com/kubernetes/community/tree/master/sig-api-machinery)
|
||||
or the feature's [slack channel](http://kubernetes.slack.com/messages/api-priority-and-fairness).
|
||||
or the feature's [slack channel](https://kubernetes.slack.com/messages/api-priority-and-fairness).
|
||||
|
|
|
@ -1,86 +0,0 @@
|
|||
---
|
||||
reviewers:
|
||||
title: Garbage collection for container images
|
||||
content_type: concept
|
||||
weight: 70
|
||||
---
|
||||
|
||||
<!-- overview -->
|
||||
|
||||
Garbage collection is a helpful function of kubelet that will clean up unused [images](/docs/concepts/containers/#container-images) and unused [containers](/docs/concepts/containers/). Kubelet will perform garbage collection for containers every minute and garbage collection for images every five minutes.
|
||||
|
||||
External garbage collection tools are not recommended as these tools can potentially break the behavior of kubelet by removing containers expected to exist.
|
||||
|
||||
|
||||
|
||||
|
||||
<!-- body -->
|
||||
|
||||
## Image Collection
|
||||
|
||||
Kubernetes manages lifecycle of all images through imageManager, with the cooperation
|
||||
of cadvisor.
|
||||
|
||||
The policy for garbage collecting images takes two factors into consideration:
|
||||
`HighThresholdPercent` and `LowThresholdPercent`. Disk usage above the high threshold
|
||||
will trigger garbage collection. The garbage collection will delete least recently used images until the low
|
||||
threshold has been met.
|
||||
|
||||
## Container Collection
|
||||
|
||||
The policy for garbage collecting containers considers three user-defined variables. `MinAge` is the minimum age at which a container can be garbage collected. `MaxPerPodContainer` is the maximum number of dead containers every single
|
||||
pod (UID, container name) pair is allowed to have. `MaxContainers` is the maximum number of total dead containers. These variables can be individually disabled by setting `MinAge` to zero and setting `MaxPerPodContainer` and `MaxContainers` respectively to less than zero.
|
||||
|
||||
Kubelet will act on containers that are unidentified, deleted, or outside of the boundaries set by the previously mentioned flags. The oldest containers will generally be removed first. `MaxPerPodContainer` and `MaxContainer` may potentially conflict with each other in situations where retaining the maximum number of containers per pod (`MaxPerPodContainer`) would go outside the allowable range of global dead containers (`MaxContainers`). `MaxPerPodContainer` would be adjusted in this situation: A worst case scenario would be to downgrade `MaxPerPodContainer` to 1 and evict the oldest containers. Additionally, containers owned by pods that have been deleted are removed once they are older than `MinAge`.
|
||||
|
||||
Containers that are not managed by kubelet are not subject to container garbage collection.
|
||||
|
||||
## User Configuration
|
||||
|
||||
You can adjust the following thresholds to tune image garbage collection with the following kubelet flags :
|
||||
|
||||
1. `image-gc-high-threshold`, the percent of disk usage which triggers image garbage collection.
|
||||
Default is 85%.
|
||||
2. `image-gc-low-threshold`, the percent of disk usage to which image garbage collection attempts
|
||||
to free. Default is 80%.
|
||||
|
||||
You can customize the garbage collection policy through the following kubelet flags:
|
||||
|
||||
1. `minimum-container-ttl-duration`, minimum age for a finished container before it is
|
||||
garbage collected. Default is 0 minute, which means every finished container will be garbage collected.
|
||||
2. `maximum-dead-containers-per-container`, maximum number of old instances to be retained
|
||||
per container. Default is 1.
|
||||
3. `maximum-dead-containers`, maximum number of old instances of containers to retain globally.
|
||||
Default is -1, which means there is no global limit.
|
||||
|
||||
Containers can potentially be garbage collected before their usefulness has expired. These containers
|
||||
can contain logs and other data that can be useful for troubleshooting. A sufficiently large value for
|
||||
`maximum-dead-containers-per-container` is highly recommended to allow at least 1 dead container to be
|
||||
retained per expected container. A larger value for `maximum-dead-containers` is also recommended for a
|
||||
similar reason.
|
||||
See [this issue](https://github.com/kubernetes/kubernetes/issues/13287) for more details.
|
||||
|
||||
|
||||
## Deprecation
|
||||
|
||||
Some kubelet Garbage Collection features in this doc will be replaced by kubelet eviction in the future.
|
||||
|
||||
Including:
|
||||
|
||||
| Existing Flag | New Flag | Rationale |
|
||||
| ------------- | -------- | --------- |
|
||||
| `--image-gc-high-threshold` | `--eviction-hard` or `--eviction-soft` | existing eviction signals can trigger image garbage collection |
|
||||
| `--image-gc-low-threshold` | `--eviction-minimum-reclaim` | eviction reclaims achieve the same behavior |
|
||||
| `--maximum-dead-containers` | | deprecated once old logs are stored outside of container's context |
|
||||
| `--maximum-dead-containers-per-container` | | deprecated once old logs are stored outside of container's context |
|
||||
| `--minimum-container-ttl-duration` | | deprecated once old logs are stored outside of container's context |
|
||||
| `--low-diskspace-threshold-mb` | `--eviction-hard` or `eviction-soft` | eviction generalizes disk thresholds to other resources |
|
||||
| `--outofdisk-transition-frequency` | `--eviction-pressure-transition-period` | eviction generalizes disk pressure transition to other resources |
|
||||
|
||||
|
||||
|
||||
## {{% heading "whatsnext" %}}
|
||||
|
||||
|
||||
See [Configuring Out Of Resource Handling](/docs/tasks/administer-cluster/out-of-resource/) for more details.
|
||||
|
|
@ -83,8 +83,11 @@ As an example, you can find detailed information about how `kube-up.sh` sets
|
|||
up logging for COS image on GCP in the corresponding
|
||||
[`configure-helper` script](https://github.com/kubernetes/kubernetes/blob/{{< param "githubbranch" >}}/cluster/gce/gci/configure-helper.sh).
|
||||
|
||||
When using a **CRI container runtime**, the kubelet is responsible for rotating the logs and managing the logging directory structure. The kubelet
|
||||
sends this information to the CRI container runtime and the runtime writes the container logs to the given location. The two kubelet flags `container-log-max-size` and `container-log-max-files` can be used to configure the maximum size for each log file and the maximum number of files allowed for each container respectively.
|
||||
When using a **CRI container runtime**, the kubelet is responsible for rotating the logs and managing the logging directory structure.
|
||||
The kubelet sends this information to the CRI container runtime and the runtime writes the container logs to the given location.
|
||||
The two kubelet parameters [`containerLogMaxSize` and `containerLogMaxFiles`](/docs/reference/config-api/kubelet-config.v1beta1/#kubelet-config-k8s-io-v1beta1-KubeletConfiguration)
|
||||
in [kubelet config file](/docs/tasks/administer-cluster/kubelet-config-file/)
|
||||
can be used to configure the maximum size for each log file and the maximum number of files allowed for each container respectively.
|
||||
|
||||
When you run [`kubectl logs`](/docs/reference/generated/kubectl/kubectl-commands#logs) as in
|
||||
the basic logging example, the kubelet on the node handles the request and
|
||||
|
|
|
@ -50,7 +50,7 @@ It is a recommended practice to put resources related to the same microservice o
|
|||
A URL can also be specified as a configuration source, which is handy for deploying directly from configuration files checked into GitHub:
|
||||
|
||||
```shell
|
||||
kubectl apply -f https://raw.githubusercontent.com/kubernetes/website/master/content/en/examples/application/nginx/nginx-deployment.yaml
|
||||
kubectl apply -f https://raw.githubusercontent.com/kubernetes/website/main/content/en/examples/application/nginx/nginx-deployment.yaml
|
||||
```
|
||||
|
||||
```shell
|
||||
|
|
|
@ -115,7 +115,7 @@ CPU is always requested as an absolute quantity, never as a relative quantity;
|
|||
|
||||
Limits and requests for `memory` are measured in bytes. You can express memory as
|
||||
a plain integer or as a fixed-point number using one of these suffixes:
|
||||
E, P, T, G, M, K. You can also use the power-of-two equivalents: Ei, Pi, Ti, Gi,
|
||||
E, P, T, G, M, k. You can also use the power-of-two equivalents: Ei, Pi, Ti, Gi,
|
||||
Mi, Ki. For example, the following represent roughly the same value:
|
||||
|
||||
```shell
|
||||
|
|
|
@ -17,6 +17,11 @@ a *kubeconfig file*. This is a generic way of referring to configuration files.
|
|||
It does not mean that there is a file named `kubeconfig`.
|
||||
{{< /note >}}
|
||||
|
||||
{{< warning >}}
|
||||
Only use kubeconfig files from trusted sources. Using a specially-crafted kubeconfig file could result in malicious code execution or file exposure.
|
||||
If you must use an untrusted kubeconfig file, inspect it carefully first, much as you would a shell script.
|
||||
{{< /warning>}}
|
||||
|
||||
By default, `kubectl` looks for a file named `config` in the `$HOME/.kube` directory.
|
||||
You can specify other kubeconfig files by setting the `KUBECONFIG` environment
|
||||
variable or by setting the
|
||||
|
@ -154,4 +159,3 @@ are stored absolutely.
|
|||
|
||||
|
||||
|
||||
|
||||
|
|
|
@ -12,26 +12,33 @@ weight: 30
|
|||
|
||||
<!-- overview -->
|
||||
|
||||
Kubernetes Secrets let you store and manage sensitive information, such
|
||||
as passwords, OAuth tokens, and ssh keys. Storing confidential information in a Secret
|
||||
is safer and more flexible than putting it verbatim in a
|
||||
{{< glossary_tooltip term_id="pod" >}} definition or in a
|
||||
{{< glossary_tooltip text="container image" term_id="image" >}}.
|
||||
See [Secrets design document](https://git.k8s.io/community/contributors/design-proposals/auth/secrets.md) for more information.
|
||||
|
||||
A Secret is an object that contains a small amount of sensitive data such as
|
||||
a password, a token, or a key. Such information might otherwise be put in a
|
||||
Pod specification or in an image. Users can create Secrets and the system
|
||||
also creates some Secrets.
|
||||
{{< glossary_tooltip term_id="pod" >}} specification or in a
|
||||
{{< glossary_tooltip text="container image" term_id="image" >}}. Using a
|
||||
Secret means that you don't need to include confidential data in your
|
||||
application code.
|
||||
|
||||
Because Secrets can be created independently of the Pods that use them, there
|
||||
is less risk of the Secret (and its data) being exposed during the workflow of
|
||||
creating, viewing, and editing Pods. Kubernetes, and applications that run in
|
||||
your cluster, can also take additional precautions with Secrets, such as
|
||||
avoiding writing confidential data to nonvolatile storage.
|
||||
|
||||
Secrets are similar to {{< glossary_tooltip text="ConfigMaps" term_id="configmap" >}}
|
||||
but are specifically intended to hold confidential data.
|
||||
|
||||
{{< caution >}}
|
||||
Kubernetes Secrets are, by default, stored as unencrypted base64-encoded
|
||||
strings. By default they can be retrieved - as plain text - by anyone with API
|
||||
access, or anyone with access to Kubernetes' underlying data store, etcd. In
|
||||
order to safely use Secrets, it is recommended you (at a minimum):
|
||||
Kubernetes Secrets are, by default, stored unencrypted in the API server's underlying data store (etcd). Anyone with API access can retrieve or modify a Secret, and so can anyone with access to etcd.
|
||||
Additionally, anyone who is authorized to create a Pod in a namespace can use that access to read any Secret in that namespace; this includes indirect access such as the ability to create a Deployment.
|
||||
|
||||
In order to safely use Secrets, take at least the following steps:
|
||||
|
||||
1. [Enable Encryption at Rest](/docs/tasks/administer-cluster/encrypt-data/) for Secrets.
|
||||
2. [Enable or configure RBAC rules](/docs/reference/access-authn-authz/authorization/) that restrict reading and writing the Secret. Be aware that secrets can be obtained implicitly by anyone with the permission to create a Pod.
|
||||
2. Enable or configure [RBAC rules](/docs/reference/access-authn-authz/authorization/) that
|
||||
restrict reading data in Secrets (including via indirect means).
|
||||
3. Where appropriate, also use mechanisms such as RBAC to limit which principals are allowed to create new Secrets or replace existing ones.
|
||||
|
||||
{{< /caution >}}
|
||||
|
||||
<!-- body -->
|
||||
|
@ -47,6 +54,10 @@ A Secret can be used with a Pod in three ways:
|
|||
- As [container environment variable](#using-secrets-as-environment-variables).
|
||||
- By the [kubelet when pulling images](#using-imagepullsecrets) for the Pod.
|
||||
|
||||
The Kubernetes control plane also uses Secrets; for example,
|
||||
[bootstrap token Secrets](#bootstrap-token-secrets) are a mechanism to
|
||||
help automate node registration.
|
||||
|
||||
The name of a Secret object must be a valid
|
||||
[DNS subdomain name](/docs/concepts/overview/working-with-objects/names#dns-subdomain-names).
|
||||
You can specify the `data` and/or the `stringData` field when creating a
|
||||
|
@ -1164,7 +1175,7 @@ limit access using [authorization policies](
|
|||
Secrets often hold values that span a spectrum of importance, many of which can
|
||||
cause escalations within Kubernetes (e.g. service account tokens) and to
|
||||
external systems. Even if an individual app can reason about the power of the
|
||||
secrets it expects to interact with, other apps within the same namespace can
|
||||
Secrets it expects to interact with, other apps within the same namespace can
|
||||
render those assumptions invalid.
|
||||
|
||||
For these reasons `watch` and `list` requests for secrets within a namespace are
|
||||
|
@ -1235,15 +1246,9 @@ for secret data, so that the secrets are not stored in the clear into {{< glossa
|
|||
- A user who can create a Pod that uses a secret can also see the value of that secret. Even
|
||||
if the API server policy does not allow that user to read the Secret, the user could
|
||||
run a Pod which exposes the secret.
|
||||
- Currently, anyone with root permission on any node can read _any_ secret from the API server,
|
||||
by impersonating the kubelet. It is a planned feature to only send secrets to
|
||||
nodes that actually require them, to restrict the impact of a root exploit on a
|
||||
single node.
|
||||
|
||||
|
||||
## {{% heading "whatsnext" %}}
|
||||
|
||||
- Learn how to [manage Secret using `kubectl`](/docs/tasks/configmap-secret/managing-secret-using-kubectl/)
|
||||
- Learn how to [manage Secret using config file](/docs/tasks/configmap-secret/managing-secret-using-config-file/)
|
||||
- Learn how to [manage Secret using kustomize](/docs/tasks/configmap-secret/managing-secret-using-kustomize/)
|
||||
|
||||
|
|
|
@ -77,6 +77,20 @@ the pull policy of any object after its initial creation.
|
|||
|
||||
When `imagePullPolicy` is defined without a specific value, it is also set to `Always`.
|
||||
|
||||
### ImagePullBackOff
|
||||
|
||||
When a kubelet starts creating containers for a Pod using a container runtime,
|
||||
it might be possible the container is in [Waiting](/docs/concepts/workloads/pods/pod-lifecycle/#container-state-waiting)
|
||||
state because of `ImagePullBackOff`.
|
||||
|
||||
The status `ImagePullBackOff` means that a container could not start because Kubernetes
|
||||
could not pull a container image (for reasons such as invalid image name, or pulling
|
||||
from a private registry without `imagePullSecret`). The `BackOff` part indicates
|
||||
that Kubernetes will keep trying to pull the image, with an increasing back-off delay.
|
||||
|
||||
Kubernetes raises the delay between each attempt until it reaches a compiled-in limit,
|
||||
which is 300 seconds (5 minutes).
|
||||
|
||||
## Multi-architecture images with image indexes
|
||||
|
||||
As well as providing binary images, a container registry can also serve a [container image index](https://github.com/opencontainers/image-spec/blob/master/image-index.md). An image index can point to multiple [image manifests](https://github.com/opencontainers/image-spec/blob/master/manifest.md) for architecture-specific versions of a container. The idea is that you can have a name for an image (for example: `pause`, `example/mycontainer`, `kube-apiserver`) and allow different systems to fetch the right binary image for the machine architecture they are using.
|
||||
|
@ -316,4 +330,5 @@ Kubelet will merge any `imagePullSecrets` into a single virtual `.docker/config.
|
|||
|
||||
## {{% heading "whatsnext" %}}
|
||||
|
||||
* Read the [OCI Image Manifest Specification](https://github.com/opencontainers/image-spec/blob/master/manifest.md)
|
||||
* Read the [OCI Image Manifest Specification](https://github.com/opencontainers/image-spec/blob/master/manifest.md).
|
||||
* Learn about [container image garbage collection](/docs/concepts/architecture/garbage-collection/#container-image-garbage-collection).
|
||||
|
|
|
@ -51,7 +51,7 @@ heterogeneous node configurations, see [Scheduling](#scheduling) below.
|
|||
{{< /note >}}
|
||||
|
||||
The configurations have a corresponding `handler` name, referenced by the RuntimeClass. The
|
||||
handler must be a valid DNS 1123 label (alpha-numeric + `-` characters).
|
||||
handler must be a valid [DNS label name](/docs/concepts/overview/working-with-objects/names/#dns-label-names).
|
||||
|
||||
### 2. Create the corresponding RuntimeClass resources
|
||||
|
||||
|
@ -118,7 +118,7 @@ Runtime handlers are configured through containerd's configuration at
|
|||
`/etc/containerd/config.toml`. Valid handlers are configured under the runtimes section:
|
||||
|
||||
```
|
||||
[plugins.cri.containerd.runtimes.${HANDLER_NAME}]
|
||||
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.${HANDLER_NAME}]
|
||||
```
|
||||
|
||||
See containerd's config documentation for more details:
|
||||
|
@ -135,7 +135,7 @@ table](https://github.com/cri-o/cri-o/blob/master/docs/crio.conf.5.md#crioruntim
|
|||
runtime_path = "${PATH_TO_BINARY}"
|
||||
```
|
||||
|
||||
See CRI-O's [config documentation](https://raw.githubusercontent.com/cri-o/cri-o/9f11d1d/docs/crio.conf.5.md) for more details.
|
||||
See CRI-O's [config documentation](https://github.com/cri-o/cri-o/blob/master/docs/crio.conf.5.md) for more details.
|
||||
|
||||
## Scheduling
|
||||
|
||||
|
@ -179,4 +179,4 @@ are accounted for in Kubernetes.
|
|||
- [RuntimeClass Design](https://github.com/kubernetes/enhancements/blob/master/keps/sig-node/585-runtime-class/README.md)
|
||||
- [RuntimeClass Scheduling Design](https://github.com/kubernetes/enhancements/blob/master/keps/sig-node/585-runtime-class/README.md#runtimeclass-scheduling)
|
||||
- Read about the [Pod Overhead](/docs/concepts/scheduling-eviction/pod-overhead/) concept
|
||||
- [PodOverhead Feature Design](https://github.com/kubernetes/enhancements/blob/master/keps/sig-node/20190226-pod-overhead.md)
|
||||
- [PodOverhead Feature Design](https://github.com/kubernetes/enhancements/tree/master/keps/sig-node/688-pod-overhead)
|
||||
|
|
|
@ -51,8 +51,7 @@ Some of the things that you can use an operator to automate include:
|
|||
* choosing a leader for a distributed application without an internal
|
||||
member election process
|
||||
|
||||
What might an Operator look like in more detail? Here's an example in more
|
||||
detail:
|
||||
What might an Operator look like in more detail? Here's an example:
|
||||
|
||||
1. A custom resource named SampleDB, that you can configure into the cluster.
|
||||
2. A Deployment that makes sure a Pod is running that contains the
|
||||
|
@ -116,7 +115,7 @@ Operator.
|
|||
* [Charmed Operator Framework](https://juju.is/)
|
||||
* [kubebuilder](https://book.kubebuilder.io/)
|
||||
* [KUDO](https://kudo.dev/) (Kubernetes Universal Declarative Operator)
|
||||
* [Metacontroller](https://metacontroller.app/) along with WebHooks that
|
||||
* [Metacontroller](https://metacontroller.github.io/metacontroller/intro.html) along with WebHooks that
|
||||
you implement yourself
|
||||
* [Operator Framework](https://operatorframework.io)
|
||||
* [shell-operator](https://github.com/flant/shell-operator)
|
||||
|
@ -124,6 +123,7 @@ Operator.
|
|||
## {{% heading "whatsnext" %}}
|
||||
|
||||
|
||||
* Read the {{< glossary_tooltip text="CNCF" term_id="cncf" >}} [Operator White Paper](https://github.com/cncf/tag-app-delivery/blob/eece8f7307f2970f46f100f51932db106db46968/operator-wg/whitepaper/Operator-WhitePaper_v1-0.md).
|
||||
* Learn more about [Custom Resources](/docs/concepts/extend-kubernetes/api-extension/custom-resources/)
|
||||
* Find ready-made operators on [OperatorHub.io](https://operatorhub.io/) to suit your use case
|
||||
* [Publish](https://operatorhub.io/) your operator for other people to use
|
||||
|
|
|
@ -45,7 +45,7 @@ Containers have become popular because they provide extra benefits, such as:
|
|||
* Agile application creation and deployment: increased ease and efficiency of container image creation compared to VM image use.
|
||||
* Continuous development, integration, and deployment: provides for reliable and frequent container image build and deployment with quick and efficient rollbacks (due to image immutability).
|
||||
* Dev and Ops separation of concerns: create application container images at build/release time rather than deployment time, thereby decoupling applications from infrastructure.
|
||||
* Observability not only surfaces OS-level information and metrics, but also application health and other signals.
|
||||
* Observability: not only surfaces OS-level information and metrics, but also application health and other signals.
|
||||
* Environmental consistency across development, testing, and production: Runs the same on a laptop as it does in the cloud.
|
||||
* Cloud and OS distribution portability: Runs on Ubuntu, RHEL, CoreOS, on-premises, on major public clouds, and anywhere else.
|
||||
* Application-centric management: Raises the level of abstraction from running an OS on virtual hardware to running an application on an OS using logical resources.
|
||||
|
|
|
@ -30,6 +30,11 @@ Annotations, like labels, are key/value maps:
|
|||
}
|
||||
```
|
||||
|
||||
{{<note>}}
|
||||
The keys and the values in the map must be strings. In other words, you cannot use
|
||||
numeric, boolean, list or other types for either the keys or the values.
|
||||
{{</note>}}
|
||||
|
||||
Here are some examples of information that could be recorded in annotations:
|
||||
|
||||
* Fields managed by a declarative configuration layer. Attaching these fields
|
||||
|
|
|
@ -48,7 +48,7 @@ kubectl get pods --field-selector=status.phase!=Running,spec.restartPolicy=Alway
|
|||
|
||||
## Multiple resource types
|
||||
|
||||
You use field selectors across multiple resource types. This `kubectl` command selects all Statefulsets and Services that are not in the `default` namespace:
|
||||
You can use field selectors across multiple resource types. This `kubectl` command selects all Statefulsets and Services that are not in the `default` namespace:
|
||||
|
||||
```shell
|
||||
kubectl get statefulsets,services --all-namespaces --field-selector metadata.namespace!=default
|
||||
|
|
|
@ -0,0 +1,80 @@
|
|||
---
|
||||
title: Finalizers
|
||||
content_type: concept
|
||||
weight: 60
|
||||
---
|
||||
|
||||
<!-- overview -->
|
||||
|
||||
{{<glossary_definition term_id="finalizer" length="long">}}
|
||||
|
||||
You can use finalizers to control {{<glossary_tooltip text="garbage collection" term_id="garbage-collection">}}
|
||||
of resources by alerting {{<glossary_tooltip text="controllers" term_id="controller">}} to perform specific cleanup tasks before
|
||||
deleting the target resource.
|
||||
|
||||
Finalizers don't usually specify the code to execute. Instead, they are
|
||||
typically lists of keys on a specific resource similar to annotations.
|
||||
Kubernetes specifies some finalizers automatically, but you can also specify
|
||||
your own.
|
||||
|
||||
## How finalizers work
|
||||
|
||||
When you create a resource using a manifest file, you can specify finalizers in
|
||||
the `metadata.finalizers` field. When you attempt to delete the resource, the
|
||||
controller that manages it notices the values in the `finalizers` field and does
|
||||
the following:
|
||||
|
||||
* Modifies the object to add a `metadata.deletionTimestamp` field with the
|
||||
time you started the deletion.
|
||||
* Marks the object as read-only until its `metadata.finalizers` field is empty.
|
||||
|
||||
The controller then attempts to satisfy the requirements of the finalizers
|
||||
specified for that resource. Each time a finalizer condition is satisfied, the
|
||||
controller removes that key from the resource's `finalizers` field. When the
|
||||
field is empty, garbage collection continues. You can also use finalizers to
|
||||
prevent deletion of unmanaged resources.
|
||||
|
||||
A common example of a finalizer is `kubernetes.io/pv-protection`, which prevents
|
||||
accidental deletion of `PersistentVolume` objects. When a `PersistentVolume`
|
||||
object is in use by a Pod, Kubernetes adds the `pv-protection` finalizer. If you
|
||||
try to delete the `PersistentVolume`, it enters a `Terminating` status, but the
|
||||
controller can't delete it because the finalizer exists. When the Pod stops
|
||||
using the `PersistentVolume`, Kubernetes clears the `pv-protection` finalizer,
|
||||
and the controller deletes the volume.
|
||||
|
||||
## Owner references, labels, and finalizers {#owners-labels-finalizers}
|
||||
|
||||
Like {{<glossary_tooltip text="labels" term_id="label">}}, [owner references](/concepts/overview/working-with-objects/owners-dependents/)
|
||||
describe the relationships between objects in Kubernetes, but are used for a
|
||||
different purpose. When a
|
||||
{{<glossary_tooltip text="controllers" term_id="controller">}} manages objects
|
||||
like Pods, it uses labels to track changes to groups of related objects. For
|
||||
example, when a {{<glossary_tooltip text="Job" term_id="job">}} creates one or
|
||||
more Pods, the Job controller applies labels to those pods and tracks changes to
|
||||
any Pods in the cluster with the same label.
|
||||
|
||||
The Job controller also adds *owner references* to those Pods, pointing at the
|
||||
Job that created the Pods. If you delete the Job while these Pods are running,
|
||||
Kubernetes uses the owner references (not labels) to determine which Pods in the
|
||||
cluster need cleanup.
|
||||
|
||||
Kubernetes also processes finalizers when it identifies owner references on a
|
||||
resource targeted for deletion.
|
||||
|
||||
In some situations, finalizers can block the deletion of dependent objects,
|
||||
which can cause the targeted owner object to remain in a read-only state for
|
||||
longer than expected without being fully deleted. In these situations, you
|
||||
should check finalizers and owner references on the target owner and dependent
|
||||
objects to troubleshoot the cause.
|
||||
|
||||
{{<note>}}
|
||||
In cases where objects are stuck in a deleting state, try to avoid manually
|
||||
removing finalizers to allow deletion to continue. Finalizers are usually added
|
||||
to resources for a reason, so forcefully removing them can lead to issues in
|
||||
your cluster.
|
||||
{{</note>}}
|
||||
|
||||
## {{% heading "whatsnext" %}}
|
||||
|
||||
* Read [Using Finalizers to Control Deletion](/blog/2021/05/14/using-finalizers-to-control-deletion/)
|
||||
on the Kubernetes blog.
|
|
@ -42,7 +42,7 @@ Example labels:
|
|||
* `"partition" : "customerA"`, `"partition" : "customerB"`
|
||||
* `"track" : "daily"`, `"track" : "weekly"`
|
||||
|
||||
These are examples of commonly used labels; you are free to develop your own conventions. Keep in mind that label Key must be unique for a given object.
|
||||
These are examples of [commonly used labels](/docs/concepts/overview/working-with-objects/common-labels/); you are free to develop your own conventions. Keep in mind that label Key must be unique for a given object.
|
||||
|
||||
## Syntax and character set
|
||||
|
||||
|
@ -50,7 +50,7 @@ _Labels_ are key/value pairs. Valid label keys have two segments: an optional pr
|
|||
|
||||
If the prefix is omitted, the label Key is presumed to be private to the user. Automated system components (e.g. `kube-scheduler`, `kube-controller-manager`, `kube-apiserver`, `kubectl`, or other third-party automation) which add labels to end-user objects must specify a prefix.
|
||||
|
||||
The `kubernetes.io/` and `k8s.io/` prefixes are reserved for Kubernetes core components.
|
||||
The `kubernetes.io/` and `k8s.io/` prefixes are [reserved](/docs/reference/labels-annotations-taints/) for Kubernetes core components.
|
||||
|
||||
Valid label value:
|
||||
* must be 63 characters or less (can be empty),
|
||||
|
|
|
@ -28,7 +28,7 @@ For non-unique user-provided attributes, Kubernetes provides [labels](/docs/conc
|
|||
In cases when objects represent a physical entity, like a Node representing a physical host, when the host is re-created under the same name without deleting and re-creating the Node, Kubernetes treats the new host as the old one, which may lead to inconsistencies.
|
||||
{{< /note >}}
|
||||
|
||||
Below are three types of commonly used name constraints for resources.
|
||||
Below are four types of commonly used name constraints for resources.
|
||||
|
||||
### DNS Subdomain Names
|
||||
|
||||
|
@ -41,7 +41,7 @@ This means the name must:
|
|||
- start with an alphanumeric character
|
||||
- end with an alphanumeric character
|
||||
|
||||
### DNS Label Names
|
||||
### RFC 1123 Label Names {#dns-label-names}
|
||||
|
||||
Some resource types require their names to follow the DNS
|
||||
label standard as defined in [RFC 1123](https://tools.ietf.org/html/rfc1123).
|
||||
|
@ -52,6 +52,17 @@ This means the name must:
|
|||
- start with an alphanumeric character
|
||||
- end with an alphanumeric character
|
||||
|
||||
### RFC 1035 Label Names
|
||||
|
||||
Some resource types require their names to follow the DNS
|
||||
label standard as defined in [RFC 1035](https://tools.ietf.org/html/rfc1035).
|
||||
This means the name must:
|
||||
|
||||
- contain at most 63 characters
|
||||
- contain only lowercase alphanumeric characters or '-'
|
||||
- start with an alphabetic character
|
||||
- end with an alphanumeric character
|
||||
|
||||
### Path Segment Names
|
||||
|
||||
Some resource types require their names to be able to be safely encoded as a
|
||||
|
|
|
@ -0,0 +1,71 @@
|
|||
---
|
||||
title: Owners and Dependents
|
||||
content_type: concept
|
||||
weight: 60
|
||||
---
|
||||
|
||||
<!-- overview -->
|
||||
|
||||
In Kubernetes, some objects are *owners* of other objects. For example, a
|
||||
{{<glossary_tooltip text="ReplicaSet" term_id="replica-set">}} is the owner of a set of Pods. These owned objects are *dependents*
|
||||
of their owner.
|
||||
|
||||
Ownership is different from the [labels and selectors](/docs/concepts/overview/working-with-objects/labels/)
|
||||
mechanism that some resources also use. For example, consider a Service that
|
||||
creates `EndpointSlice` objects. The Service uses labels to allow the control plane to
|
||||
determine which `EndpointSlice` objects are used for that Service. In addition
|
||||
to the labels, each `EndpointSlice` that is managed on behalf of a Service has
|
||||
an owner reference. Owner references help different parts of Kubernetes avoid
|
||||
interfering with objects they don’t control.
|
||||
|
||||
## Owner references in object specifications
|
||||
|
||||
Dependent objects have a `metadata.ownerReferences` field that references their
|
||||
owner object. A valid owner reference consists of the object name and a UID
|
||||
within the same namespace as the dependent object. Kubernetes sets the value of
|
||||
this field automatically for objects that are dependents of other objects like
|
||||
ReplicaSets, DaemonSets, Deployments, Jobs and CronJobs, and ReplicationControllers.
|
||||
You can also configure these relationships manually by changing the value of
|
||||
this field. However, you usually don't need to and can allow Kubernetes to
|
||||
automatically manage the relationships.
|
||||
|
||||
Dependent objects also have an `ownerReferences.blockOwnerDeletion` field that
|
||||
takes a boolean value and controls whether specific dependents can block garbage
|
||||
collection from deleting their owner object. Kubernetes automatically sets this
|
||||
field to `true` if a {{<glossary_tooltip text="controller" term_id="controller">}}
|
||||
(for example, the Deployment controller) sets the value of the
|
||||
`metadata.ownerReferences` field. You can also set the value of the
|
||||
`blockOwnerDeletion` field manually to control which dependents block garbage
|
||||
collection.
|
||||
|
||||
A Kubernetes admission controller controls user access to change this field for
|
||||
dependent resources, based on the delete permissions of the owner. This control
|
||||
prevents unauthorized users from delaying owner object deletion.
|
||||
|
||||
## Ownership and finalizers
|
||||
|
||||
When you tell Kubernetes to delete a resource, the API server allows the
|
||||
managing controller to process any [finalizer rules](/docs/concepts/overview/working-with-objects/finalizers/)
|
||||
for the resource. {{<glossary_tooltip text="Finalizers" term_id="finalizer">}}
|
||||
prevent accidental deletion of resources your cluster may still need to function
|
||||
correctly. For example, if you try to delete a `PersistentVolume` that is still
|
||||
in use by a Pod, the deletion does not happen immediately because the
|
||||
`PersistentVolume` has the `kubernetes.io/pv-protection` finalizer on it.
|
||||
Instead, the volume remains in the `Terminating` status until Kubernetes clears
|
||||
the finalizer, which only happens after the `PersistentVolume` is no longer
|
||||
bound to a Pod.
|
||||
|
||||
Kubernetes also adds finalizers to an owner resource when you use either
|
||||
[foreground or orphan cascading deletion](/docs/concepts/architecture/garbage-collection/#cascading-deletion).
|
||||
In foreground deletion, it adds the `foreground` finalizer so that the
|
||||
controller must delete dependent resources that also have
|
||||
`ownerReferences.blockOwnerDeletion=true` before it deletes the owner. If you
|
||||
specify an orphan deletion policy, Kubernetes adds the `orphan` finalizer so
|
||||
that the controller ignores dependent resources after it deletes the owner
|
||||
object.
|
||||
|
||||
## {{% heading "whatsnext" %}}
|
||||
|
||||
* Learn more about [Kubernetes finalizers](/docs/concepts/overview/working-with-objects/finalizers/).
|
||||
* Learn about [garbage collection](/docs/concepts/architecture/garbage-collection).
|
||||
* Read the API reference for [object metadata](/docs/reference/kubernetes-api/common-definitions/object-meta/#System).
|
|
@ -10,7 +10,8 @@ weight: 40
|
|||
|
||||
{{< feature-state for_k8s_version="v1.20" state="stable" >}}
|
||||
|
||||
Kubernetes allow you to limit the number of process IDs (PIDs) that a {{< glossary_tooltip term_id="Pod" text="Pod" >}} can use.
|
||||
Kubernetes allow you to limit the number of process IDs (PIDs) that a
|
||||
{{< glossary_tooltip term_id="Pod" text="Pod" >}} can use.
|
||||
You can also reserve a number of allocatable PIDs for each {{< glossary_tooltip term_id="node" text="node" >}}
|
||||
for use by the operating system and daemons (rather than by Pods).
|
||||
|
||||
|
@ -84,7 +85,9 @@ gate](/docs/reference/command-line-tools-reference/feature-gates/)
|
|||
Kubernetes allows you to limit the number of processes running in a Pod. You
|
||||
specify this limit at the node level, rather than configuring it as a resource
|
||||
limit for a particular Pod. Each Node can have a different PID limit.
|
||||
To configure the limit, you can specify the command line parameter `--pod-max-pids` to the kubelet, or set `PodPidsLimit` in the kubelet [configuration file](/docs/tasks/administer-cluster/kubelet-config-file/).
|
||||
To configure the limit, you can specify the command line parameter `--pod-max-pids`
|
||||
to the kubelet, or set `PodPidsLimit` in the kubelet
|
||||
[configuration file](/docs/tasks/administer-cluster/kubelet-config-file/).
|
||||
|
||||
{{< note >}}
|
||||
Before Kubernetes version 1.20, PID resource limiting for Pods required enabling
|
||||
|
@ -95,9 +98,12 @@ the [feature gate](/docs/reference/command-line-tools-reference/feature-gates/)
|
|||
## PID based eviction
|
||||
|
||||
You can configure kubelet to start terminating a Pod when it is misbehaving and consuming abnormal amount of resources.
|
||||
This feature is called eviction. You can [Configure Out of Resource Handling](/docs/tasks/administer-cluster/out-of-resource) for various eviction signals.
|
||||
This feature is called eviction. You can
|
||||
[Configure Out of Resource Handling](/docs/concepts/scheduling-eviction/node-pressure-eviction/)
|
||||
for various eviction signals.
|
||||
Use `pid.available` eviction signal to configure the threshold for number of PIDs used by Pod.
|
||||
You can set soft and hard eviction policies. However, even with the hard eviction policy, if the number of PIDs growing very fast,
|
||||
You can set soft and hard eviction policies.
|
||||
However, even with the hard eviction policy, if the number of PIDs growing very fast,
|
||||
node can still get into unstable state by hitting the node PIDs limit.
|
||||
Eviction signal value is calculated periodically and does NOT enforce the limit.
|
||||
|
||||
|
@ -112,6 +118,7 @@ when one Pod is misbehaving.
|
|||
## {{% heading "whatsnext" %}}
|
||||
|
||||
- Refer to the [PID Limiting enhancement document](https://github.com/kubernetes/enhancements/blob/097b4d8276bc9564e56adf72505d43ce9bc5e9e8/keps/sig-node/20190129-pid-limiting.md) for more information.
|
||||
- For historical context, read [Process ID Limiting for Stability Improvements in Kubernetes 1.14](/blog/2019/04/15/process-id-limiting-for-stability-improvements-in-kubernetes-1.14/).
|
||||
- For historical context, read
|
||||
[Process ID Limiting for Stability Improvements in Kubernetes 1.14](/blog/2019/04/15/process-id-limiting-for-stability-improvements-in-kubernetes-1.14/).
|
||||
- Read [Managing Resources for Containers](/docs/concepts/configuration/manage-resources-containers/).
|
||||
- Learn how to [Configure Out of Resource Handling](/docs/tasks/administer-cluster/out-of-resource).
|
||||
- Learn how to [Configure Out of Resource Handling](/docs/concepts/scheduling-eviction/node-pressure-eviction/).
|
||||
|
|
|
@ -11,7 +11,8 @@ weight: 30
|
|||
|
||||
{{< feature-state for_k8s_version="v1.21" state="deprecated" >}}
|
||||
|
||||
PodSecurityPolicy is deprecated as of Kubernetes v1.21, and will be removed in v1.25.
|
||||
PodSecurityPolicy is deprecated as of Kubernetes v1.21, and will be removed in v1.25. For more information on the deprecation,
|
||||
see [PodSecurityPolicy Deprecation: Past, Present, and Future](/blog/2021/04/06/podsecuritypolicy-deprecation-past-present-and-future/).
|
||||
|
||||
Pod Security Policies enable fine-grained authorization of pod creation and
|
||||
updates.
|
||||
|
@ -48,13 +49,12 @@ administrator to control the following:
|
|||
|
||||
## Enabling Pod Security Policies
|
||||
|
||||
Pod security policy control is implemented as an optional (but recommended)
|
||||
[admission
|
||||
controller](/docs/reference/access-authn-authz/admission-controllers/#podsecuritypolicy). PodSecurityPolicies
|
||||
are enforced by [enabling the admission
|
||||
Pod security policy control is implemented as an optional [admission
|
||||
controller](/docs/reference/access-authn-authz/admission-controllers/#podsecuritypolicy).
|
||||
PodSecurityPolicies are enforced by [enabling the admission
|
||||
controller](/docs/reference/access-authn-authz/admission-controllers/#how-do-i-turn-on-an-admission-control-plug-in),
|
||||
but doing so without authorizing any policies **will prevent any pods from being
|
||||
created** in the cluster.
|
||||
but doing so without authorizing any policies **will prevent any pods from being created** in the
|
||||
cluster.
|
||||
|
||||
Since the pod security policy API (`policy/v1beta1/podsecuritypolicy`) is
|
||||
enabled independently of the admission controller, for existing clusters it is
|
||||
|
@ -110,7 +110,11 @@ roleRef:
|
|||
name: <role name>
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
subjects:
|
||||
# Authorize specific service accounts:
|
||||
# Authorize all service accounts in a namespace (recommended):
|
||||
- kind: Group
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
name: system:serviceaccounts:<authorized namespace>
|
||||
# Authorize specific service accounts (not recommended):
|
||||
- kind: ServiceAccount
|
||||
name: <authorized service account name>
|
||||
namespace: <authorized pod namespace>
|
||||
|
@ -139,6 +143,40 @@ Examples](/docs/reference/access-authn-authz/rbac#role-binding-examples).
|
|||
For a complete example of authorizing a PodSecurityPolicy, see
|
||||
[below](#example).
|
||||
|
||||
### Recommended Practice
|
||||
|
||||
PodSecurityPolicy is being replaced by a new, simplified `PodSecurity` {{< glossary_tooltip
|
||||
text="admission controller" term_id="admission-controller" >}}. For more details on this change, see
|
||||
[PodSecurityPolicy Deprecation: Past, Present, and
|
||||
Future](/blog/2021/04/06/podsecuritypolicy-deprecation-past-present-and-future/). Follow these
|
||||
guidelines to simplify migration from PodSecurityPolicy to the new admission controller:
|
||||
|
||||
1. Limit your PodSecurityPolicies to the policies defined by the [Pod Security Standards](/docs/concepts/security/pod-security-standards):
|
||||
- {{< example file="policy/privileged-psp.yaml" >}}Privileged{{< /example >}}
|
||||
- {{< example file="policy/baseline-psp.yaml" >}}Baseline{{< /example >}}
|
||||
- {{< example file="policy/restricted-psp.yaml" >}}Restricted{{< /example >}}
|
||||
|
||||
2. Only bind PSPs to entire namespaces, by using the `system:serviceaccounts:<namespace>` group
|
||||
(where `<namespace>` is the target namespace). For example:
|
||||
|
||||
```yaml
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
# This cluster role binding allows all pods in the "development" namespace to use the baseline PSP.
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: psp-baseline-namespaces
|
||||
roleRef:
|
||||
kind: ClusterRole
|
||||
name: psp-baseline
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
subjects:
|
||||
- kind: Group
|
||||
name: system:serviceaccounts:development
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
- kind: Group
|
||||
name: system:serviceaccounts:canary
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
```
|
||||
|
||||
### Troubleshooting
|
||||
|
||||
|
@ -464,12 +502,12 @@ allowed prefix, and a `readOnly` field indicating it must be mounted read-only.
|
|||
For example:
|
||||
|
||||
```yaml
|
||||
allowedHostPaths:
|
||||
# This allows "/foo", "/foo/", "/foo/bar" etc., but
|
||||
# disallows "/fool", "/etc/foo" etc.
|
||||
# "/foo/../" is never valid.
|
||||
- pathPrefix: "/foo"
|
||||
readOnly: true # only allow read-only mounts
|
||||
allowedHostPaths:
|
||||
# This allows "/foo", "/foo/", "/foo/bar" etc., but
|
||||
# disallows "/fool", "/etc/foo" etc.
|
||||
# "/foo/../" is never valid.
|
||||
- pathPrefix: "/foo"
|
||||
readOnly: true # only allow read-only mounts
|
||||
```
|
||||
|
||||
{{< warning >}}There are many ways a container with unrestricted access to the host
|
||||
|
@ -661,8 +699,10 @@ Refer to the [Sysctl documentation](
|
|||
|
||||
## {{% heading "whatsnext" %}}
|
||||
|
||||
- See [PodSecurityPolicy Deprecation: Past, Present, and
|
||||
Future](/blog/2021/04/06/podsecuritypolicy-deprecation-past-present-and-future/) to learn about
|
||||
the future of pod security policy.
|
||||
|
||||
- See [Pod Security Standards](/docs/concepts/security/pod-security-standards/) for policy recommendations.
|
||||
|
||||
- Refer to [Pod Security Policy Reference](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#podsecuritypolicy-v1beta1-policy) for the api details.
|
||||
|
||||
|
||||
|
|
|
@ -57,8 +57,9 @@ Neither contention nor changes to quota will affect already created resources.
|
|||
|
||||
## Enabling Resource Quota
|
||||
|
||||
Resource Quota support is enabled by default for many Kubernetes distributions. It is
|
||||
enabled when the {{< glossary_tooltip text="API server" term_id="kube-apiserver" >}} `--enable-admission-plugins=` flag has `ResourceQuota` as
|
||||
Resource Quota support is enabled by default for many Kubernetes distributions. It is
|
||||
enabled when the {{< glossary_tooltip text="API server" term_id="kube-apiserver" >}}
|
||||
`--enable-admission-plugins=` flag has `ResourceQuota` as
|
||||
one of its arguments.
|
||||
|
||||
A resource quota is enforced in a particular namespace when there is a
|
||||
|
@ -66,7 +67,9 @@ ResourceQuota in that namespace.
|
|||
|
||||
## Compute Resource Quota
|
||||
|
||||
You can limit the total sum of [compute resources](/docs/concepts/configuration/manage-resources-containers/) that can be requested in a given namespace.
|
||||
You can limit the total sum of
|
||||
[compute resources](/docs/concepts/configuration/manage-resources-containers/)
|
||||
that can be requested in a given namespace.
|
||||
|
||||
The following resource types are supported:
|
||||
|
||||
|
@ -125,7 +128,9 @@ In release 1.8, quota support for local ephemeral storage is added as an alpha f
|
|||
| `ephemeral-storage` | Same as `requests.ephemeral-storage`. |
|
||||
|
||||
{{< note >}}
|
||||
When using a CRI container runtime, container logs will count against the ephemeral storage quota. This can result in the unexpected eviction of pods that have exhausted their storage quotas. Refer to [Logging Architecture](/docs/concepts/cluster-administration/logging/) for details.
|
||||
When using a CRI container runtime, container logs will count against the ephemeral storage quota.
|
||||
This can result in the unexpected eviction of pods that have exhausted their storage quotas.
|
||||
Refer to [Logging Architecture](/docs/concepts/cluster-administration/logging/) for details.
|
||||
{{< /note >}}
|
||||
|
||||
## Object Count Quota
|
||||
|
@ -192,7 +197,7 @@ Resources specified on the quota outside of the allowed set results in a validat
|
|||
| `NotTerminating` | Match pods where `.spec.activeDeadlineSeconds is nil` |
|
||||
| `BestEffort` | Match pods that have best effort quality of service. |
|
||||
| `NotBestEffort` | Match pods that do not have best effort quality of service. |
|
||||
| `PriorityClass` | Match pods that references the specified [priority class](/docs/concepts/configuration/pod-priority-preemption). |
|
||||
| `PriorityClass` | Match pods that references the specified [priority class](/docs/concepts/scheduling-eviction/pod-priority-preemption). |
|
||||
| `CrossNamespacePodAffinity` | Match pods that have cross-namespace pod [(anti)affinity terms](/docs/concepts/scheduling-eviction/assign-pod-node). |
|
||||
|
||||
The `BestEffort` scope restricts a quota to tracking the following resource:
|
||||
|
@ -248,13 +253,14 @@ specified.
|
|||
|
||||
{{< feature-state for_k8s_version="v1.17" state="stable" >}}
|
||||
|
||||
Pods can be created at a specific [priority](/docs/concepts/configuration/pod-priority-preemption/#pod-priority).
|
||||
Pods can be created at a specific [priority](/docs/concepts/scheduling-eviction/pod-priority-preemption/#pod-priority).
|
||||
You can control a pod's consumption of system resources based on a pod's priority, by using the `scopeSelector`
|
||||
field in the quota spec.
|
||||
|
||||
A quota is matched and consumed only if `scopeSelector` in the quota spec selects the pod.
|
||||
|
||||
When quota is scoped for priority class using `scopeSelector` field, quota object is restricted to track only following resources:
|
||||
When quota is scoped for priority class using `scopeSelector` field, quota object
|
||||
is restricted to track only following resources:
|
||||
|
||||
* `pods`
|
||||
* `cpu`
|
||||
|
@ -554,7 +560,7 @@ kubectl create -f ./object-counts.yaml --namespace=myspace
|
|||
kubectl get quota --namespace=myspace
|
||||
```
|
||||
|
||||
```
|
||||
```none
|
||||
NAME AGE
|
||||
compute-resources 30s
|
||||
object-counts 32s
|
||||
|
@ -564,7 +570,7 @@ object-counts 32s
|
|||
kubectl describe quota compute-resources --namespace=myspace
|
||||
```
|
||||
|
||||
```
|
||||
```none
|
||||
Name: compute-resources
|
||||
Namespace: myspace
|
||||
Resource Used Hard
|
||||
|
@ -580,7 +586,7 @@ requests.nvidia.com/gpu 0 4
|
|||
kubectl describe quota object-counts --namespace=myspace
|
||||
```
|
||||
|
||||
```
|
||||
```none
|
||||
Name: object-counts
|
||||
Namespace: myspace
|
||||
Resource Used Hard
|
||||
|
@ -677,10 +683,10 @@ Then, create a resource quota object in the `kube-system` namespace:
|
|||
{{< codenew file="policy/priority-class-resourcequota.yaml" >}}
|
||||
|
||||
```shell
|
||||
$ kubectl apply -f https://k8s.io/examples/policy/priority-class-resourcequota.yaml -n kube-system
|
||||
kubectl apply -f https://k8s.io/examples/policy/priority-class-resourcequota.yaml -n kube-system
|
||||
```
|
||||
|
||||
```
|
||||
```none
|
||||
resourcequota/pods-cluster-services created
|
||||
```
|
||||
|
||||
|
|
|
@ -193,7 +193,7 @@ resources based on the filesystems on the node.
|
|||
If the node has a dedicated `imagefs` filesystem for container runtimes to use,
|
||||
the kubelet does the following:
|
||||
|
||||
* If the `nodefs` filesystem meets the eviction threshlds, the kubelet garbage collects
|
||||
* If the `nodefs` filesystem meets the eviction thresholds, the kubelet garbage collects
|
||||
dead pods and containers.
|
||||
* If the `imagefs` filesystem meets the eviction thresholds, the kubelet
|
||||
deletes all unused images.
|
||||
|
@ -214,7 +214,7 @@ signal below the threshold, the kubelet begins to evict end-user pods.
|
|||
The kubelet uses the following parameters to determine pod eviction order:
|
||||
|
||||
1. Whether the pod's resource usage exceeds requests
|
||||
1. [Pod Priority](/docs/concepts/configuration/pod-priority-preemption/)
|
||||
1. [Pod Priority](/docs/concepts/scheduling-eviction/pod-priority-preemption/)
|
||||
1. The pod's resource usage relative to requests
|
||||
|
||||
As a result, kubelet ranks and evicts pods in the following order:
|
||||
|
|
|
@ -252,12 +252,12 @@ Even so, the answer to the preceding question must be yes. If the answer is no,
|
|||
the Node is not considered for preemption.
|
||||
{{< /note >}}
|
||||
|
||||
If a pending Pod has inter-pod affinity to one or more of the lower-priority
|
||||
Pods on the Node, the inter-Pod affinity rule cannot be satisfied in the absence
|
||||
of those lower-priority Pods. In this case, the scheduler does not preempt any
|
||||
Pods on the Node. Instead, it looks for another Node. The scheduler might find a
|
||||
suitable Node or it might not. There is no guarantee that the pending Pod can be
|
||||
scheduled.
|
||||
If a pending Pod has inter-pod {{< glossary_tooltip text="affinity" term_id="affinity" >}}
|
||||
to one or more of the lower-priority Pods on the Node, the inter-Pod affinity
|
||||
rule cannot be satisfied in the absence of those lower-priority Pods. In this case,
|
||||
the scheduler does not preempt any Pods on the Node. Instead, it looks for another
|
||||
Node. The scheduler might find a suitable Node or it might not. There is no
|
||||
guarantee that the pending Pod can be scheduled.
|
||||
|
||||
Our recommended solution for this problem is to create inter-Pod affinity only
|
||||
towards equal or higher priority Pods.
|
||||
|
@ -353,7 +353,7 @@ the removal of the lowest priority Pods is not sufficient to allow the scheduler
|
|||
to schedule the preemptor Pod, or if the lowest priority Pods are protected by
|
||||
`PodDisruptionBudget`.
|
||||
|
||||
The kubelet uses Priority to determine pod order for [out-of-resource eviction](/docs/tasks/administer-cluster/out-of-resource/).
|
||||
The kubelet uses Priority to determine pod order for [node-pressure eviction](/docs/concepts/scheduling-eviction/node-pressure-eviction/).
|
||||
You can use the QoS class to estimate the order in which pods are most likely
|
||||
to get evicted. The kubelet ranks pods for eviction based on the following factors:
|
||||
|
||||
|
@ -361,10 +361,10 @@ to get evicted. The kubelet ranks pods for eviction based on the following facto
|
|||
1. Pod Priority
|
||||
1. Amount of resource usage relative to requests
|
||||
|
||||
See [evicting end-user pods](/docs/tasks/administer-cluster/out-of-resource/#evicting-end-user-pods)
|
||||
See [Pod selection for kubelet eviction](/docs/concepts/scheduling-eviction/node-pressure-eviction/#pod-selection-for-kubelet-eviction)
|
||||
for more details.
|
||||
|
||||
kubelet out-of-resource eviction does not evict Pods when their
|
||||
kubelet node-pressure eviction does not evict Pods when their
|
||||
usage does not exceed their requests. If a Pod with lower priority is not
|
||||
exceeding its requests, it won't be evicted. Another Pod with higher priority
|
||||
that exceeds its requests may be evicted.
|
||||
|
|
|
@ -8,7 +8,7 @@ weight: 90
|
|||
|
||||
<!-- overview -->
|
||||
|
||||
{{< feature-state for_k8s_version="v1.15" state="alpha" >}}
|
||||
{{< feature-state for_k8s_version="v1.19" state="stable" >}}
|
||||
|
||||
The scheduling framework is a pluggable architecture for the Kubernetes scheduler.
|
||||
It adds a new set of "plugin" APIs to the existing scheduler. Plugins are compiled into the scheduler. The APIs allow most scheduling features to be implemented as plugins, while keeping the
|
||||
|
|
|
@ -10,7 +10,7 @@ weight: 40
|
|||
|
||||
|
||||
<!-- overview -->
|
||||
[_Node affinity_](/docs/concepts/scheduling-eviction/assign-pod-node/#affinity-and-anti-affinity),
|
||||
[_Node affinity_](/docs/concepts/scheduling-eviction/assign-pod-node/#affinity-and-anti-affinity)
|
||||
is a property of {{< glossary_tooltip text="Pods" term_id="pod" >}} that *attracts* them to
|
||||
a set of {{< glossary_tooltip text="nodes" term_id="node" >}} (either as a preference or a
|
||||
hard requirement). _Taints_ are the opposite -- they allow a node to repel a set of pods.
|
||||
|
@ -266,9 +266,23 @@ This ensures that DaemonSet pods are never evicted due to these problems.
|
|||
|
||||
## Taint Nodes by Condition
|
||||
|
||||
The node lifecycle controller automatically creates taints corresponding to
|
||||
Node conditions with `NoSchedule` effect.
|
||||
Similarly the scheduler does not check Node conditions; instead the scheduler checks taints. This assures that Node conditions don't affect what's scheduled onto the Node. The user can choose to ignore some of the Node's problems (represented as Node conditions) by adding appropriate Pod tolerations.
|
||||
The control plane, using the node {{<glossary_tooltip text="controller" term_id="controller">}},
|
||||
automatically creates taints with a `NoSchedule` effect for [node conditions](/docs/concepts/scheduling-eviction/node-pressure-eviction/#node-conditions).
|
||||
|
||||
The scheduler checks taints, not node conditions, when it makes scheduling
|
||||
decisions. This ensures that node conditions don't directly affect scheduling.
|
||||
For example, if the `DiskPressure` node condition is active, the control plane
|
||||
adds the `node.kubernetes.io/disk-pressure` taint and does not schedule new pods
|
||||
onto the affected node. If the `MemoryPressure` node condition is active, the
|
||||
control plane adds the `node.kubernetes.io/memory-pressure` taint.
|
||||
|
||||
You can ignore node conditions for newly created pods by adding the corresponding
|
||||
Pod tolerations. The control plane also adds the `node.kubernetes.io/memory-pressure`
|
||||
toleration on pods that have a {{< glossary_tooltip text="QoS class" term_id="qos-class" >}}
|
||||
other than `BestEffort`. This is because Kubernetes treats pods in the `Guaranteed`
|
||||
or `Burstable` QoS classes (even pods with no memory request set) as if they are
|
||||
able to cope with memory pressure, while new `BestEffort` pods are not scheduled
|
||||
onto the affected node.
|
||||
|
||||
The DaemonSet controller automatically adds the following `NoSchedule`
|
||||
tolerations to all daemons, to prevent DaemonSets from breaking.
|
||||
|
@ -282,10 +296,9 @@ tolerations to all daemons, to prevent DaemonSets from breaking.
|
|||
Adding these tolerations ensures backward compatibility. You can also add
|
||||
arbitrary tolerations to DaemonSets.
|
||||
|
||||
|
||||
## {{% heading "whatsnext" %}}
|
||||
|
||||
* Read about [out of resource handling](/docs/tasks/administer-cluster/out-of-resource/) and how you can configure it
|
||||
* Read about [pod priority](/docs/concepts/configuration/pod-priority-preemption/)
|
||||
* Read about [Node-pressure Eviction](/docs/concepts/scheduling-eviction/node-pressure-eviction/) and how you can configure it
|
||||
* Read about [Pod Priority](/docs/concepts/scheduling-eviction/pod-priority-preemption/)
|
||||
|
||||
|
||||
|
|
|
@ -86,7 +86,7 @@ enforced/disallowed:
|
|||
<tr>
|
||||
<td>Capabilities</td>
|
||||
<td>
|
||||
Adding additional capabilities beyond the <a href="https://docs.docker.com/engine/reference/run/#runtime-privilege-and-linux-capabilities">default set</a> must be disallowed.<br>
|
||||
Adding <tt>NET_RAW</tt> or capabilities beyond the <a href="https://docs.docker.com/engine/reference/run/#runtime-privilege-and-linux-capabilities">default set</a> must be disallowed.<br>
|
||||
<br><b>Restricted Fields:</b><br>
|
||||
spec.containers[*].securityContext.capabilities.add<br>
|
||||
spec.initContainers[*].securityContext.capabilities.add<br>
|
||||
|
@ -194,7 +194,7 @@ well as lower-trust users.The following listed controls should be enforced/disal
|
|||
<tr>
|
||||
<td>Volume Types</td>
|
||||
<td>
|
||||
In addition to restricting HostPath volumes, the restricted profile limits usage of non-core volume types to those defined through PersistentVolumes.<br>
|
||||
In addition to restricting HostPath volumes, the restricted profile limits usage of non-ephemeral volume types to those defined through PersistentVolumes.<br>
|
||||
<br><b>Restricted Fields:</b><br>
|
||||
spec.volumes[*].hostPath<br>
|
||||
spec.volumes[*].gcePersistentDisk<br>
|
||||
|
@ -216,7 +216,6 @@ well as lower-trust users.The following listed controls should be enforced/disal
|
|||
spec.volumes[*].portworxVolume<br>
|
||||
spec.volumes[*].scaleIO<br>
|
||||
spec.volumes[*].storageos<br>
|
||||
spec.volumes[*].csi<br>
|
||||
<br><b>Allowed Values:</b> undefined/nil<br>
|
||||
</td>
|
||||
</tr>
|
||||
|
@ -283,9 +282,9 @@ of individual policies are not defined here.
|
|||
|
||||
[**PodSecurityPolicy**](/docs/concepts/policy/pod-security-policy/)
|
||||
|
||||
- [Privileged](https://raw.githubusercontent.com/kubernetes/website/master/content/en/examples/policy/privileged-psp.yaml)
|
||||
- [Baseline](https://raw.githubusercontent.com/kubernetes/website/master/content/en/examples/policy/baseline-psp.yaml)
|
||||
- [Restricted](https://raw.githubusercontent.com/kubernetes/website/master/content/en/examples/policy/restricted-psp.yaml)
|
||||
- {{< example file="policy/privileged-psp.yaml" >}}Privileged{{< /example >}}
|
||||
- {{< example file="policy/baseline-psp.yaml" >}}Baseline{{< /example >}}
|
||||
- {{< example file="policy/restricted-psp.yaml" >}}Restricted{{< /example >}}
|
||||
|
||||
## FAQ
|
||||
|
||||
|
|
|
@ -7,6 +7,7 @@ content_type: concept
|
|||
weight: 20
|
||||
---
|
||||
<!-- overview -->
|
||||
|
||||
Kubernetes creates DNS records for services and pods. You can contact
|
||||
services with consistent DNS names instead of IP addresses.
|
||||
|
||||
|
@ -49,7 +50,7 @@ options ndots:5
|
|||
```
|
||||
|
||||
In summary, a pod in the _test_ namespace can successfully resolve either
|
||||
`data.prod` or `data.prod.cluster.local`.
|
||||
`data.prod` or `data.prod.svc.cluster.local`.
|
||||
|
||||
### DNS Records
|
||||
|
||||
|
@ -261,6 +262,8 @@ spec:
|
|||
|
||||
### Pod's DNS Config {#pod-dns-config}
|
||||
|
||||
{{< feature-state for_k8s_version="v1.14" state="stable" >}}
|
||||
|
||||
Pod's DNS Config allows users more control on the DNS settings for a Pod.
|
||||
|
||||
The `dnsConfig` field is optional and it can work with any `dnsPolicy` settings.
|
||||
|
@ -310,18 +313,6 @@ search default.svc.cluster-domain.example svc.cluster-domain.example cluster-dom
|
|||
options ndots:5
|
||||
```
|
||||
|
||||
### Feature availability
|
||||
|
||||
The availability of Pod DNS Config and DNS Policy "`None`" is shown as below.
|
||||
|
||||
| k8s version | Feature support |
|
||||
| :---------: |:-----------:|
|
||||
| 1.14 | Stable |
|
||||
| 1.10 | Beta (on by default)|
|
||||
| 1.9 | Alpha |
|
||||
|
||||
|
||||
|
||||
## {{% heading "whatsnext" %}}
|
||||
|
||||
|
||||
|
|
|
@ -249,5 +249,4 @@ implementation in `kube-proxy`.
|
|||
|
||||
## {{% heading "whatsnext" %}}
|
||||
|
||||
* Learn about [Enabling EndpointSlices](/docs/tasks/administer-cluster/enabling-endpointslices)
|
||||
* Read [Connecting Applications with Services](/docs/concepts/services-networking/connect-applications-service/)
|
||||
|
|
|
@ -32,6 +32,7 @@ Kubernetes as a project supports and maintains [AWS](https://github.com/kubernet
|
|||
Citrix Application Delivery Controller.
|
||||
* [Contour](https://projectcontour.io/) is an [Envoy](https://www.envoyproxy.io/) based ingress controller.
|
||||
* [EnRoute](https://getenroute.io/) is an [Envoy](https://www.envoyproxy.io) based API gateway that can run as an ingress controller.
|
||||
* [Easegress IngressController](https://github.com/megaease/easegress/blob/main/doc/ingresscontroller.md) is an [Easegress](https://megaease.com/easegress/) based API gateway that can run as an ingress controller.
|
||||
* F5 BIG-IP [Container Ingress Services for Kubernetes](https://clouddocs.f5.com/containers/latest/userguide/kubernetes/)
|
||||
lets you use an Ingress to configure F5 BIG-IP virtual servers.
|
||||
* [Gloo](https://gloo.solo.io) is an open-source ingress controller based on [Envoy](https://www.envoyproxy.io),
|
||||
|
|
|
@ -255,7 +255,7 @@ The above rule allows any Pod with label `db` on the namespace `default` to comm
|
|||
|
||||
The following restrictions apply when using this field:
|
||||
* As an alpha feature, this is disabled by default. To enable the `endPort` field at a cluster level, you (or your cluster administrator) need to enable the `NetworkPolicyEndPort` [feature gate](/docs/reference/command-line-tools-reference/feature-gates/) for the API server with `--feature-gates=NetworkPolicyEndPort=true,…`.
|
||||
* The `endPort` field must be equal than or greater to the `port` field.
|
||||
* The `endPort` field must be equal to or greater than the `port` field.
|
||||
* `endPort` can only be defined if `port` is also defined.
|
||||
* Both ports must be numeric.
|
||||
|
||||
|
|
|
@ -72,7 +72,7 @@ A Service in Kubernetes is a REST object, similar to a Pod. Like all of the
|
|||
REST objects, you can `POST` a Service definition to the API server to create
|
||||
a new instance.
|
||||
The name of a Service object must be a valid
|
||||
[DNS label name](/docs/concepts/overview/working-with-objects/names#dns-label-names).
|
||||
[RFC 1035 label name](/docs/concepts/overview/working-with-objects/names#rfc-1035-label-names).
|
||||
|
||||
For example, suppose you have a set of Pods where each listens on TCP port 9376
|
||||
and contains a label `app=MyApp`:
|
||||
|
@ -188,7 +188,7 @@ selectors and uses DNS names instead. For more information, see the
|
|||
[ExternalName](#externalname) section later in this document.
|
||||
|
||||
### Over Capacity Endpoints
|
||||
If an Endpoints resource has more than 1000 endpoints then a Kubernetes v1.21 (or later)
|
||||
If an Endpoints resource has more than 1000 endpoints then a Kubernetes v1.21
|
||||
cluster annotates that Endpoints with `endpoints.kubernetes.io/over-capacity: warning`.
|
||||
This annotation indicates that the affected Endpoints object is over capacity.
|
||||
|
||||
|
@ -215,7 +215,7 @@ each Service port. The value of this field is mirrored by the corresponding
|
|||
Endpoints and EndpointSlice objects.
|
||||
|
||||
This field follows standard Kubernetes label syntax. Values should either be
|
||||
[IANA standard service names](http://www.iana.org/assignments/service-names) or
|
||||
[IANA standard service names](https://www.iana.org/assignments/service-names) or
|
||||
domain prefixed names such as `mycompany.com/my-custom-protocol`.
|
||||
|
||||
## Virtual IPs and service proxies
|
||||
|
|
|
@ -76,7 +76,7 @@ for provisioning PVs. This field must be specified.
|
|||
| Glusterfs | ✓ | [Glusterfs](#glusterfs) |
|
||||
| iSCSI | - | - |
|
||||
| Quobyte | ✓ | [Quobyte](#quobyte) |
|
||||
| NFS | - | - |
|
||||
| NFS | - | [NFS](#nfs) |
|
||||
| RBD | ✓ | [Ceph RBD](#ceph-rbd) |
|
||||
| VsphereVolume | ✓ | [vSphere](#vsphere) |
|
||||
| PortworxVolume | ✓ | [Portworx Volume](#portworx-volume) |
|
||||
|
@ -189,7 +189,7 @@ and pre-created PVs, but you'll need to look at the documentation for a specific
|
|||
to see its supported topology keys and examples.
|
||||
|
||||
{{< note >}}
|
||||
If you choose to use `waitForFirstConsumer`, do not use `nodeName` in the Pod spec
|
||||
If you choose to use `WaitForFirstConsumer`, do not use `nodeName` in the Pod spec
|
||||
to specify node affinity. If `nodeName` is used in this case, the scheduler will be bypassed and PVC will remain in `pending` state.
|
||||
|
||||
Instead, you can use node selector for hostname in this case as shown below.
|
||||
|
@ -423,6 +423,29 @@ parameters:
|
|||
`gluster-dynamic-<claimname>`. The dynamic endpoint and service are automatically
|
||||
deleted when the persistent volume claim is deleted.
|
||||
|
||||
### NFS
|
||||
|
||||
```yaml
|
||||
apiVersion: storage.k8s.io/v1
|
||||
kind: StorageClass
|
||||
metadata:
|
||||
name: example-nfs
|
||||
provisioner: example.com/external-nfs
|
||||
parameters:
|
||||
server: nfs-server.example.com
|
||||
path: /share
|
||||
readOnly: false
|
||||
```
|
||||
|
||||
* `server`: Server is the hostname or IP address of the NFS server.
|
||||
* `path`: Path that is exported by the NFS server.
|
||||
* `readOnly`: A flag indicating whether the storage will be mounted as read only (default false).
|
||||
|
||||
Kubernetes doesn't include an internal NFS provisioner. You need to use an external provisioner to create a StorageClass for NFS.
|
||||
Here are some examples:
|
||||
* [NFS Ganesha server and external provisioner](https://github.com/kubernetes-sigs/nfs-ganesha-server-and-external-provisioner)
|
||||
* [NFS subdir external provisioner](https://github.com/kubernetes-sigs/nfs-subdir-external-provisioner)
|
||||
|
||||
### OpenStack Cinder
|
||||
|
||||
```yaml
|
||||
|
@ -658,11 +681,11 @@ metadata:
|
|||
provisioner: kubernetes.io/azure-disk
|
||||
parameters:
|
||||
storageaccounttype: Standard_LRS
|
||||
kind: Shared
|
||||
kind: managed
|
||||
```
|
||||
|
||||
* `storageaccounttype`: Azure storage account Sku tier. Default is empty.
|
||||
* `kind`: Possible values are `shared` (default), `dedicated`, and `managed`.
|
||||
* `kind`: Possible values are `shared`, `dedicated`, and `managed` (default).
|
||||
When `kind` is `shared`, all unmanaged disks are created in a few shared
|
||||
storage accounts in the same resource group as the cluster. When `kind` is
|
||||
`dedicated`, a new dedicated storage account will be created for the new
|
||||
|
|
|
@ -529,6 +529,15 @@ See the [GlusterFS example](https://github.com/kubernetes/examples/tree/{{< para
|
|||
|
||||
### hostPath {#hostpath}
|
||||
|
||||
{{< warning >}}
|
||||
HostPath volumes present many security risks, and it is a best practice to avoid the use of
|
||||
HostPaths when possible. When a HostPath volume must be used, it should be scoped to only the
|
||||
required file or directory, and mounted as ReadOnly.
|
||||
|
||||
If restricting HostPath access to specific directories through AdmissionPolicy, `volumeMounts` MUST
|
||||
be required to use `readOnly` mounts for the policy to be effective.
|
||||
{{< /warning >}}
|
||||
|
||||
A `hostPath` volume mounts a file or directory from the host node's filesystem
|
||||
into your Pod. This is not something that most Pods will need, but it offers a
|
||||
powerful escape hatch for some applications.
|
||||
|
@ -558,6 +567,9 @@ The supported values for field `type` are:
|
|||
|
||||
Watch out when using this type of volume, because:
|
||||
|
||||
* HostPaths can expose privileged system credentials (such as for the Kubelet) or privileged APIs
|
||||
(such as container runtime socket), which can be used for container escape or to attack other
|
||||
parts of the cluster.
|
||||
* Pods with identical configuration (such as created from a PodTemplate) may
|
||||
behave differently on different nodes due to different files on the nodes
|
||||
* The files or directories created on the underlying hosts are only writable by root. You
|
||||
|
|
|
@ -32,7 +32,8 @@ different flags and/or different memory and cpu requests for different hardware
|
|||
|
||||
### Create a DaemonSet
|
||||
|
||||
You can describe a DaemonSet in a YAML file. For example, the `daemonset.yaml` file below describes a DaemonSet that runs the fluentd-elasticsearch Docker image:
|
||||
You can describe a DaemonSet in a YAML file. For example, the `daemonset.yaml` file below
|
||||
describes a DaemonSet that runs the fluentd-elasticsearch Docker image:
|
||||
|
||||
{{< codenew file="controllers/daemonset.yaml" >}}
|
||||
|
||||
|
@ -46,19 +47,23 @@ kubectl apply -f https://k8s.io/examples/controllers/daemonset.yaml
|
|||
|
||||
As with all other Kubernetes config, a DaemonSet needs `apiVersion`, `kind`, and `metadata` fields. For
|
||||
general information about working with config files, see
|
||||
[running stateless applications](/docs/tasks/run-application/run-stateless-application-deployment/),
|
||||
[configuring containers](/docs/tasks/), and [object management using kubectl](/docs/concepts/overview/working-with-objects/object-management/) documents.
|
||||
[running stateless applications](/docs/tasks/run-application/run-stateless-application-deployment/)
|
||||
and [object management using kubectl](/docs/concepts/overview/working-with-objects/object-management/).
|
||||
|
||||
The name of a DaemonSet object must be a valid
|
||||
[DNS subdomain name](/docs/concepts/overview/working-with-objects/names#dns-subdomain-names).
|
||||
|
||||
A DaemonSet also needs a [`.spec`](https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status) section.
|
||||
A DaemonSet also needs a
|
||||
[`.spec`](https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status)
|
||||
section.
|
||||
|
||||
### Pod Template
|
||||
|
||||
The `.spec.template` is one of the required fields in `.spec`.
|
||||
|
||||
The `.spec.template` is a [pod template](/docs/concepts/workloads/pods/#pod-templates). It has exactly the same schema as a {{< glossary_tooltip text="Pod" term_id="pod" >}}, except it is nested and does not have an `apiVersion` or `kind`.
|
||||
The `.spec.template` is a [pod template](/docs/concepts/workloads/pods/#pod-templates).
|
||||
It has exactly the same schema as a {{< glossary_tooltip text="Pod" term_id="pod" >}},
|
||||
except it is nested and does not have an `apiVersion` or `kind`.
|
||||
|
||||
In addition to required fields for a Pod, a Pod template in a DaemonSet has to specify appropriate
|
||||
labels (see [pod selector](#pod-selector)).
|
||||
|
@ -79,20 +84,23 @@ unintentional orphaning of Pods, and it was found to be confusing to users.
|
|||
|
||||
The `.spec.selector` is an object consisting of two fields:
|
||||
|
||||
* `matchLabels` - works the same as the `.spec.selector` of a [ReplicationController](/docs/concepts/workloads/controllers/replicationcontroller/).
|
||||
* `matchLabels` - works the same as the `.spec.selector` of a
|
||||
[ReplicationController](/docs/concepts/workloads/controllers/replicationcontroller/).
|
||||
* `matchExpressions` - allows to build more sophisticated selectors by specifying key,
|
||||
list of values and an operator that relates the key and values.
|
||||
|
||||
When the two are specified the result is ANDed.
|
||||
|
||||
If the `.spec.selector` is specified, it must match the `.spec.template.metadata.labels`. Config with these not matching will be rejected by the API.
|
||||
If the `.spec.selector` is specified, it must match the `.spec.template.metadata.labels`.
|
||||
Config with these not matching will be rejected by the API.
|
||||
|
||||
### Running Pods on select Nodes
|
||||
|
||||
If you specify a `.spec.template.spec.nodeSelector`, then the DaemonSet controller will
|
||||
create Pods on nodes which match that [node
|
||||
selector](/docs/concepts/scheduling-eviction/assign-pod-node/). Likewise if you specify a `.spec.template.spec.affinity`,
|
||||
then DaemonSet controller will create Pods on nodes which match that [node affinity](/docs/concepts/scheduling-eviction/assign-pod-node/).
|
||||
create Pods on nodes which match that [node selector](/docs/concepts/scheduling-eviction/assign-pod-node/).
|
||||
Likewise if you specify a `.spec.template.spec.affinity`,
|
||||
then DaemonSet controller will create Pods on nodes which match that
|
||||
[node affinity](/docs/concepts/scheduling-eviction/assign-pod-node/).
|
||||
If you do not specify either, then the DaemonSet controller will create Pods on all nodes.
|
||||
|
||||
## How Daemon Pods are scheduled
|
||||
|
@ -106,18 +114,19 @@ node that a Pod runs on is selected by the Kubernetes scheduler. However,
|
|||
DaemonSet pods are created and scheduled by the DaemonSet controller instead.
|
||||
That introduces the following issues:
|
||||
|
||||
* Inconsistent Pod behavior: Normal Pods waiting to be scheduled are created
|
||||
and in `Pending` state, but DaemonSet pods are not created in `Pending`
|
||||
state. This is confusing to the user.
|
||||
* [Pod preemption](/docs/concepts/configuration/pod-priority-preemption/)
|
||||
is handled by default scheduler. When preemption is enabled, the DaemonSet controller
|
||||
will make scheduling decisions without considering pod priority and preemption.
|
||||
* Inconsistent Pod behavior: Normal Pods waiting to be scheduled are created
|
||||
and in `Pending` state, but DaemonSet pods are not created in `Pending`
|
||||
state. This is confusing to the user.
|
||||
* [Pod preemption](/docs/concepts/scheduling-eviction/pod-priority-preemption/)
|
||||
is handled by default scheduler. When preemption is enabled, the DaemonSet controller
|
||||
will make scheduling decisions without considering pod priority and preemption.
|
||||
|
||||
`ScheduleDaemonSetPods` allows you to schedule DaemonSets using the default
|
||||
scheduler instead of the DaemonSet controller, by adding the `NodeAffinity` term
|
||||
to the DaemonSet pods, instead of the `.spec.nodeName` term. The default
|
||||
scheduler is then used to bind the pod to the target host. If node affinity of
|
||||
the DaemonSet pod already exists, it is replaced (the original node affinity was taken into account before selecting the target host). The DaemonSet controller only
|
||||
the DaemonSet pod already exists, it is replaced (the original node affinity was
|
||||
taken into account before selecting the target host). The DaemonSet controller only
|
||||
performs these operations when creating or modifying DaemonSet pods, and no
|
||||
changes are made to the `spec.template` of the DaemonSet.
|
||||
|
||||
|
@ -158,10 +167,12 @@ Some possible patterns for communicating with Pods in a DaemonSet are:
|
|||
|
||||
- **Push**: Pods in the DaemonSet are configured to send updates to another service, such
|
||||
as a stats database. They do not have clients.
|
||||
- **NodeIP and Known Port**: Pods in the DaemonSet can use a `hostPort`, so that the pods are reachable via the node IPs. Clients know the list of node IPs somehow, and know the port by convention.
|
||||
- **DNS**: Create a [headless service](/docs/concepts/services-networking/service/#headless-services) with the same pod selector,
|
||||
and then discover DaemonSets using the `endpoints` resource or retrieve multiple A records from
|
||||
DNS.
|
||||
- **NodeIP and Known Port**: Pods in the DaemonSet can use a `hostPort`, so that the pods
|
||||
are reachable via the node IPs.
|
||||
Clients know the list of node IPs somehow, and know the port by convention.
|
||||
- **DNS**: Create a [headless service](/docs/concepts/services-networking/service/#headless-services)
|
||||
with the same pod selector, and then discover DaemonSets using the `endpoints`
|
||||
resource or retrieve multiple A records from DNS.
|
||||
- **Service**: Create a service with the same Pod selector, and use the service to reach a
|
||||
daemon on a random node. (No way to reach specific node.)
|
||||
|
||||
|
@ -219,6 +230,8 @@ storage servers).
|
|||
Use a Deployment for stateless services, like frontends, where scaling up and down the
|
||||
number of replicas and rolling out updates are more important than controlling exactly which host
|
||||
the Pod runs on. Use a DaemonSet when it is important that a copy of a Pod always run on
|
||||
all or certain hosts, and when it needs to start before other Pods.
|
||||
all or certain hosts, if the DaemonSet provides node-level functionality that allows other Pods to run correctly on that particular node.
|
||||
|
||||
For example, [network plugins](/docs/concepts/extend-kubernetes/compute-storage-net/network-plugins/) often include a component that runs as a DaemonSet. The DaemonSet component makes sure that the node where it's running has working cluster networking.
|
||||
|
||||
|
||||
|
|
|
@ -1,184 +0,0 @@
|
|||
---
|
||||
title: Garbage Collection
|
||||
content_type: concept
|
||||
weight: 60
|
||||
---
|
||||
|
||||
<!-- overview -->
|
||||
|
||||
The role of the Kubernetes garbage collector is to delete certain objects
|
||||
that once had an owner, but no longer have an owner.
|
||||
|
||||
|
||||
<!-- body -->
|
||||
|
||||
## Owners and dependents
|
||||
|
||||
Some Kubernetes objects are owners of other objects. For example, a ReplicaSet
|
||||
is the owner of a set of Pods. The owned objects are called *dependents* of the
|
||||
owner object. Every dependent object has a `metadata.ownerReferences` field that
|
||||
points to the owning object.
|
||||
|
||||
Sometimes, Kubernetes sets the value of `ownerReference` automatically. For
|
||||
example, when you create a ReplicaSet, Kubernetes automatically sets the
|
||||
`ownerReference` field of each Pod in the ReplicaSet. In 1.8, Kubernetes
|
||||
automatically sets the value of `ownerReference` for objects created or adopted
|
||||
by ReplicationController, ReplicaSet, StatefulSet, DaemonSet, Deployment, Job
|
||||
and CronJob.
|
||||
|
||||
You can also specify relationships between owners and dependents by manually
|
||||
setting the `ownerReference` field.
|
||||
|
||||
Here's a configuration file for a ReplicaSet that has three Pods:
|
||||
|
||||
{{< codenew file="controllers/replicaset.yaml" >}}
|
||||
|
||||
If you create the ReplicaSet and then view the Pod metadata, you can see
|
||||
OwnerReferences field:
|
||||
|
||||
```shell
|
||||
kubectl apply -f https://k8s.io/examples/controllers/replicaset.yaml
|
||||
kubectl get pods --output=yaml
|
||||
```
|
||||
|
||||
The output shows that the Pod owner is a ReplicaSet named `my-repset`:
|
||||
|
||||
```yaml
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
...
|
||||
ownerReferences:
|
||||
- apiVersion: apps/v1
|
||||
controller: true
|
||||
blockOwnerDeletion: true
|
||||
kind: ReplicaSet
|
||||
name: my-repset
|
||||
uid: d9607e19-f88f-11e6-a518-42010a800195
|
||||
...
|
||||
```
|
||||
|
||||
{{< note >}}
|
||||
Cross-namespace owner references are disallowed by design.
|
||||
|
||||
Namespaced dependents can specify cluster-scoped or namespaced owners.
|
||||
A namespaced owner **must** exist in the same namespace as the dependent.
|
||||
If it does not, the owner reference is treated as absent, and the dependent
|
||||
is subject to deletion once all owners are verified absent.
|
||||
|
||||
Cluster-scoped dependents can only specify cluster-scoped owners.
|
||||
In v1.20+, if a cluster-scoped dependent specifies a namespaced kind as an owner,
|
||||
it is treated as having an unresolveable owner reference, and is not able to be garbage collected.
|
||||
|
||||
In v1.20+, if the garbage collector detects an invalid cross-namespace `ownerReference`,
|
||||
or a cluster-scoped dependent with an `ownerReference` referencing a namespaced kind, a warning Event
|
||||
with a reason of `OwnerRefInvalidNamespace` and an `involvedObject` of the invalid dependent is reported.
|
||||
You can check for that kind of Event by running
|
||||
`kubectl get events -A --field-selector=reason=OwnerRefInvalidNamespace`.
|
||||
{{< /note >}}
|
||||
|
||||
## Controlling how the garbage collector deletes dependents
|
||||
|
||||
When you delete an object, you can specify whether the object's dependents are
|
||||
also deleted automatically. Deleting dependents automatically is called *cascading
|
||||
deletion*. There are two modes of *cascading deletion*: *background* and *foreground*.
|
||||
|
||||
If you delete an object without deleting its dependents
|
||||
automatically, the dependents are said to be *orphaned*.
|
||||
|
||||
### Foreground cascading deletion
|
||||
|
||||
In *foreground cascading deletion*, the root object first
|
||||
enters a "deletion in progress" state. In the "deletion in progress" state,
|
||||
the following things are true:
|
||||
|
||||
* The object is still visible via the REST API
|
||||
* The object's `deletionTimestamp` is set
|
||||
* The object's `metadata.finalizers` contains the value "foregroundDeletion".
|
||||
|
||||
Once the "deletion in progress" state is set, the garbage
|
||||
collector deletes the object's dependents. Once the garbage collector has deleted all
|
||||
"blocking" dependents (objects with `ownerReference.blockOwnerDeletion=true`), it deletes
|
||||
the owner object.
|
||||
|
||||
Note that in the "foregroundDeletion", only dependents with
|
||||
`ownerReference.blockOwnerDeletion=true` block the deletion of the owner object.
|
||||
Kubernetes version 1.7 added an [admission controller](/docs/reference/access-authn-authz/admission-controllers/#ownerreferencespermissionenforcement) that controls user access to set
|
||||
`blockOwnerDeletion` to true based on delete permissions on the owner object, so that
|
||||
unauthorized dependents cannot delay deletion of an owner object.
|
||||
|
||||
If an object's `ownerReferences` field is set by a controller (such as Deployment or ReplicaSet),
|
||||
blockOwnerDeletion is set automatically and you do not need to manually modify this field.
|
||||
|
||||
### Background cascading deletion
|
||||
|
||||
In *background cascading deletion*, Kubernetes deletes the owner object
|
||||
immediately and the garbage collector then deletes the dependents in
|
||||
the background.
|
||||
|
||||
### Setting the cascading deletion policy
|
||||
|
||||
To control the cascading deletion policy, set the `propagationPolicy`
|
||||
field on the `deleteOptions` argument when deleting an Object. Possible values include "Orphan",
|
||||
"Foreground", or "Background".
|
||||
|
||||
Here's an example that deletes dependents in background:
|
||||
|
||||
```shell
|
||||
kubectl proxy --port=8080
|
||||
curl -X DELETE localhost:8080/apis/apps/v1/namespaces/default/replicasets/my-repset \
|
||||
-d '{"kind":"DeleteOptions","apiVersion":"v1","propagationPolicy":"Background"}' \
|
||||
-H "Content-Type: application/json"
|
||||
```
|
||||
|
||||
Here's an example that deletes dependents in foreground:
|
||||
|
||||
```shell
|
||||
kubectl proxy --port=8080
|
||||
curl -X DELETE localhost:8080/apis/apps/v1/namespaces/default/replicasets/my-repset \
|
||||
-d '{"kind":"DeleteOptions","apiVersion":"v1","propagationPolicy":"Foreground"}' \
|
||||
-H "Content-Type: application/json"
|
||||
```
|
||||
|
||||
Here's an example that orphans dependents:
|
||||
|
||||
```shell
|
||||
kubectl proxy --port=8080
|
||||
curl -X DELETE localhost:8080/apis/apps/v1/namespaces/default/replicasets/my-repset \
|
||||
-d '{"kind":"DeleteOptions","apiVersion":"v1","propagationPolicy":"Orphan"}' \
|
||||
-H "Content-Type: application/json"
|
||||
```
|
||||
|
||||
kubectl also supports cascading deletion.
|
||||
|
||||
To delete dependents in the foreground using kubectl, set `--cascade=foreground`. To
|
||||
orphan dependents, set `--cascade=orphan`.
|
||||
|
||||
The default behavior is to delete the dependents in the background which is the
|
||||
behavior when `--cascade` is omitted or explicitly set to `background`.
|
||||
|
||||
Here's an example that orphans the dependents of a ReplicaSet:
|
||||
|
||||
```shell
|
||||
kubectl delete replicaset my-repset --cascade=orphan
|
||||
```
|
||||
|
||||
### Additional note on Deployments
|
||||
|
||||
Prior to 1.7, When using cascading deletes with Deployments you *must* use `propagationPolicy: Foreground`
|
||||
to delete not only the ReplicaSets created, but also their Pods. If this type of _propagationPolicy_
|
||||
is not used, only the ReplicaSets will be deleted, and the Pods will be orphaned.
|
||||
See [kubeadm/#149](https://github.com/kubernetes/kubeadm/issues/149#issuecomment-284766613) for more information.
|
||||
|
||||
## Known issues
|
||||
|
||||
Tracked at [#26120](https://github.com/kubernetes/kubernetes/issues/26120)
|
||||
|
||||
|
||||
|
||||
## {{% heading "whatsnext" %}}
|
||||
|
||||
|
||||
[Design Doc 1](https://git.k8s.io/community/contributors/design-proposals/api-machinery/garbage-collection.md)
|
||||
|
||||
[Design Doc 2](https://git.k8s.io/community/contributors/design-proposals/api-machinery/synchronous-garbage-collection.md)
|
|
@ -255,7 +255,8 @@ from failed Jobs is not lost inadvertently.
|
|||
|
||||
## Job termination and cleanup
|
||||
|
||||
When a Job completes, no more Pods are created, but the Pods are not deleted either. Keeping them around
|
||||
When a Job completes, no more Pods are created, but the Pods are [usually](#pod-backoff-failure-policy) not deleted either.
|
||||
Keeping them around
|
||||
allows you to still view the logs of completed pods to check for errors, warnings, or other diagnostic output.
|
||||
The job object also remains after it is completed so that you can view its status. It is up to the user to delete
|
||||
old jobs after noting their status. Delete the job with `kubectl` (e.g. `kubectl delete jobs/pi` or `kubectl delete -f ./job.yaml`). When you delete the job using `kubectl`, all the pods it created are deleted too.
|
||||
|
@ -304,7 +305,7 @@ cleaned up by CronJobs based on the specified capacity-based cleanup policy.
|
|||
|
||||
### TTL mechanism for finished Jobs
|
||||
|
||||
{{< feature-state for_k8s_version="v1.12" state="alpha" >}}
|
||||
{{< feature-state for_k8s_version="v1.21" state="beta" >}}
|
||||
|
||||
Another way to clean up finished Jobs (either `Complete` or `Failed`)
|
||||
automatically is to use a TTL mechanism provided by a
|
||||
|
@ -342,11 +343,6 @@ If the field is set to `0`, the Job will be eligible to be automatically deleted
|
|||
immediately after it finishes. If the field is unset, this Job won't be cleaned
|
||||
up by the TTL controller after it finishes.
|
||||
|
||||
Note that this TTL mechanism is alpha, with feature gate `TTLAfterFinished`. For
|
||||
more information, see the documentation for
|
||||
[TTL controller](/docs/concepts/workloads/controllers/ttlafterfinished/) for
|
||||
finished resources.
|
||||
|
||||
## Job patterns
|
||||
|
||||
The Job object can be used to support reliable parallel execution of Pods. The Job object is not
|
||||
|
|
|
@ -282,6 +282,17 @@ on the Kubernetes API server for each static Pod.
|
|||
This means that the Pods running on a node are visible on the API server,
|
||||
but cannot be controlled from there.
|
||||
|
||||
## Container probes
|
||||
|
||||
A _probe_ is a diagnostic performed periodically by the kubelet on a container. To perform a diagnostic, the kubelet can invoke different actions:
|
||||
|
||||
- `ExecAction` (performed with the help of the container runtime)
|
||||
- `TCPSocketAction` (checked directly by the kubelet)
|
||||
- `HTTPGetAction` (checked directly by the kubelet)
|
||||
|
||||
You can read more about [probes](/docs/concepts/workloads/pods/pod-lifecycle/#container-probes)
|
||||
in the Pod Lifecycle documentation.
|
||||
|
||||
## {{% heading "whatsnext" %}}
|
||||
|
||||
* Learn about the [lifecycle of a Pod](/docs/concepts/workloads/pods/pod-lifecycle/).
|
||||
|
|
|
@ -31,7 +31,7 @@ an application. Examples are:
|
|||
- cloud provider or hypervisor failure makes VM disappear
|
||||
- a kernel panic
|
||||
- the node disappears from the cluster due to cluster network partition
|
||||
- eviction of a pod due to the node being [out-of-resources](/docs/tasks/administer-cluster/out-of-resource/).
|
||||
- eviction of a pod due to the node being [out-of-resources](/docs/concepts/scheduling-eviction/node-pressure-eviction/).
|
||||
|
||||
Except for the out-of-resources condition, all these conditions
|
||||
should be familiar to most users; they are not specific
|
||||
|
@ -86,7 +86,7 @@ rolling out node software updates can cause voluntary disruptions. Also, some im
|
|||
of cluster (node) autoscaling may cause voluntary disruptions to defragment and compact nodes.
|
||||
Your cluster administrator or hosting provider should have documented what level of voluntary
|
||||
disruptions, if any, to expect. Certain configuration options, such as
|
||||
[using PriorityClasses](/docs/concepts/configuration/pod-priority-preemption/)
|
||||
[using PriorityClasses](/docs/concepts/scheduling-eviction/pod-priority-preemption/)
|
||||
in your pod spec can also cause voluntary (and involuntary) disruptions.
|
||||
|
||||
|
||||
|
|
|
@ -291,7 +291,8 @@ Given the ordering and execution for init containers, the following rules
|
|||
for resource usage apply:
|
||||
|
||||
* The highest of any particular resource request or limit defined on all init
|
||||
containers is the *effective init request/limit*
|
||||
containers is the *effective init request/limit*. If any resource has no
|
||||
resource limit specified this is considered as the highest limit.
|
||||
* The Pod's *effective request/limit* for a resource is the higher of:
|
||||
* the sum of all app containers request/limit for a resource
|
||||
* the effective init request/limit for a resource
|
||||
|
|
|
@ -304,13 +304,23 @@ specify a readiness probe. In this case, the readiness probe might be the same
|
|||
as the liveness probe, but the existence of the readiness probe in the spec means
|
||||
that the Pod will start without receiving any traffic and only start receiving
|
||||
traffic after the probe starts succeeding.
|
||||
If your container needs to work on loading large data, configuration files, or
|
||||
migrations during startup, specify a readiness probe.
|
||||
|
||||
If you want your container to be able to take itself down for maintenance, you
|
||||
can specify a readiness probe that checks an endpoint specific to readiness that
|
||||
is different from the liveness probe.
|
||||
|
||||
If your app has a strict dependency on back-end services, you can implement both
|
||||
a liveness and a readiness probe. The liveness probe passes when the app itself
|
||||
is healthy, but the readiness probe additionally checks that each required
|
||||
back-end service is available. This helps you avoid directing traffic to Pods
|
||||
that can only respond with error messages.
|
||||
|
||||
If your container needs to work on loading large data, configuration files, or
|
||||
migrations during startup, you can use a
|
||||
[startup probe](#when-should-you-use-a-startup-probe). However, if you want to
|
||||
detect the difference between an app that has failed and an app that is still
|
||||
processing its startup data, you might prefer a readiness probe.
|
||||
|
||||
{{< note >}}
|
||||
If you want to be able to drain requests when the Pod is deleted, you do not
|
||||
necessarily need a readiness probe; on deletion, the Pod automatically puts itself
|
||||
|
@ -369,7 +379,7 @@ An example flow:
|
|||
as terminating (a graceful shutdown duration has been set), the kubelet begins the local Pod
|
||||
shutdown process.
|
||||
1. If one of the Pod's containers has defined a `preStop`
|
||||
[hook](/docs/concepts/containers/container-lifecycle-hooks/#hook-details), the kubelet
|
||||
[hook](/docs/concepts/containers/container-lifecycle-hooks), the kubelet
|
||||
runs that hook inside of the container. If the `preStop` hook is still running after the
|
||||
grace period expires, the kubelet requests a small, one-off grace period extension of 2
|
||||
seconds.
|
||||
|
|
|
@ -16,7 +16,7 @@ You can use _topology spread constraints_ to control how {{< glossary_tooltip te
|
|||
In versions of Kubernetes before v1.18, you must enable the `EvenPodsSpread`
|
||||
[feature gate](/docs/reference/command-line-tools-reference/feature-gates/) on
|
||||
the [API server](/docs/concepts/overview/components/#kube-apiserver) and the
|
||||
[scheduler](/docs/reference/generated/kube-scheduler/) in order to use Pod
|
||||
[scheduler](/docs/reference/command-line-tools-reference/kube-scheduler/) in order to use Pod
|
||||
topology spread constraints.
|
||||
{{< /note >}}
|
||||
|
||||
|
@ -82,12 +82,11 @@ spec:
|
|||
You can define one or multiple `topologySpreadConstraint` to instruct the kube-scheduler how to place each incoming Pod in relation to the existing Pods across your cluster. The fields are:
|
||||
|
||||
- **maxSkew** describes the degree to which Pods may be unevenly distributed.
|
||||
It's the maximum permitted difference between the number of matching Pods in
|
||||
any two topology domains of a given topology type. It must be greater than
|
||||
zero. Its semantics differs according to the value of `whenUnsatisfiable`:
|
||||
It must be greater than zero. Its semantics differs according to the value of `whenUnsatisfiable`:
|
||||
- when `whenUnsatisfiable` equals to "DoNotSchedule", `maxSkew` is the maximum
|
||||
permitted difference between the number of matching pods in the target
|
||||
topology and the global minimum.
|
||||
topology and the global minimum
|
||||
(the minimum number of pods that match the label selector in a topology domain. For example, if you have 3 zones with 0, 2 and 3 matching pods respectively, The global minimum is 0).
|
||||
- when `whenUnsatisfiable` equals to "ScheduleAnyway", scheduler gives higher
|
||||
precedence to topologies that would help reduce the skew.
|
||||
- **topologyKey** is the key of node labels. If two Nodes are labelled with this key and have identical values for that label, the scheduler treats both Nodes as being in the same topology. The scheduler tries to place a balanced number of Pods into each topology domain.
|
||||
|
@ -96,6 +95,8 @@ You can define one or multiple `topologySpreadConstraint` to instruct the kube-s
|
|||
- `ScheduleAnyway` tells the scheduler to still schedule it while prioritizing nodes that minimize the skew.
|
||||
- **labelSelector** is used to find matching Pods. Pods that match this label selector are counted to determine the number of Pods in their corresponding topology domain. See [Label Selectors](/docs/concepts/overview/working-with-objects/labels/#label-selectors) for more details.
|
||||
|
||||
When a Pod defines more than one `topologySpreadConstraint`, those constraints are ANDed: The kube-scheduler looks for a node for the incoming Pod that satisfies all the constraints.
|
||||
|
||||
You can read more about this field by running `kubectl explain Pod.spec.topologySpreadConstraints`.
|
||||
|
||||
### Example: One TopologySpreadConstraint
|
||||
|
@ -387,7 +388,8 @@ for more details.
|
|||
|
||||
## Known Limitations
|
||||
|
||||
- Scaling down a Deployment may result in imbalanced Pods distribution.
|
||||
- There's no guarantee that the constraints remain satisfied when Pods are removed. For example, scaling down a Deployment may result in imbalanced Pods distribution.
|
||||
You can use [Descheduler](https://github.com/kubernetes-sigs/descheduler) to rebalance the Pods distribution.
|
||||
- Pods matched on tainted nodes are respected. See [Issue 80921](https://github.com/kubernetes/kubernetes/issues/80921)
|
||||
|
||||
## {{% heading "whatsnext" %}}
|
||||
|
|
|
@ -0,0 +1,25 @@
|
|||
---
|
||||
title: Viewing Site Analytics
|
||||
content_type: concept
|
||||
weight: 100
|
||||
card:
|
||||
name: contribute
|
||||
weight: 100
|
||||
---
|
||||
|
||||
<!-- overview -->
|
||||
|
||||
This page contains information about the kubernetes.io analytics dashboard.
|
||||
|
||||
|
||||
<!-- body -->
|
||||
|
||||
[View the dashboard](https://datastudio.google.com/reporting/fede2672-b2fd-402a-91d2-7473bdb10f04).
|
||||
|
||||
This dashboard is built using Google Data Studio and shows information collected on kubernetes.io using Google Analytics.
|
||||
|
||||
### Using the dashboard
|
||||
|
||||
By default, the dashboard shows all collected analytics for the past 30 days. Use the date selector to see data from a different date range. Other filtering options allow you to view data based on user location, the device used to access the site, the translation of the docs used, and more.
|
||||
|
||||
If you notice an issue with this dashboard, or would like to request any improvements, please [open an issue](https://github.com/kubernetes/website/issues/new/choose).
|
|
@ -6,7 +6,7 @@ weight: 40
|
|||
|
||||
<!-- overview -->
|
||||
|
||||
This page shows how to use the `update-imported-docs` script to generate
|
||||
This page shows how to use the `update-imported-docs.py` script to generate
|
||||
the Kubernetes reference documentation. The script automates
|
||||
the build setup and generates the reference documentation for a release.
|
||||
|
||||
|
@ -18,8 +18,8 @@ the build setup and generates the reference documentation for a release.
|
|||
|
||||
## Getting the docs repository
|
||||
|
||||
Make sure your `website` fork is up-to-date with the `kubernetes/website` master and clone
|
||||
your `website` fork.
|
||||
Make sure your `website` fork is up-to-date with the `kubernetes/website` remote on
|
||||
GitHub (`main` branch), and clone your `website` fork.
|
||||
|
||||
```shell
|
||||
mkdir github.com
|
||||
|
@ -39,7 +39,7 @@ see the [contributing upstream guide](/docs/contribute/generate-ref-docs/contrib
|
|||
|
||||
## Overview of update-imported-docs
|
||||
|
||||
The `update-imported-docs` script is located in the `<web-base>/update-imported-docs/`
|
||||
The `update-imported-docs.py` script is located in the `<web-base>/update-imported-docs/`
|
||||
directory.
|
||||
|
||||
The script builds the following references:
|
||||
|
@ -48,7 +48,7 @@ The script builds the following references:
|
|||
* The `kubectl` command reference
|
||||
* The Kubernetes API reference
|
||||
|
||||
The `update-imported-docs` script generates the Kubernetes reference documentation
|
||||
The `update-imported-docs.py` script generates the Kubernetes reference documentation
|
||||
from the Kubernetes source code. The script creates a temporary directory
|
||||
under `/tmp` on your machine and clones the required repositories: `kubernetes/kubernetes` and
|
||||
`kubernetes-sigs/reference-docs` into this directory.
|
||||
|
@ -69,7 +69,7 @@ The `generate-command` field defines a series of build instructions
|
|||
from `kubernetes-sigs/reference-docs/Makefile`. The `K8S_RELEASE` variable
|
||||
determines the version of the release.
|
||||
|
||||
The `update-imported-docs` script performs the following steps:
|
||||
The `update-imported-docs.py` script performs the following steps:
|
||||
|
||||
1. Clones the related repositories specified in a configuration file. For the
|
||||
purpose of generating reference docs, the repository that is cloned by
|
||||
|
@ -152,17 +152,17 @@ For example:
|
|||
|
||||
## Running the update-imported-docs tool
|
||||
|
||||
You can run the `update-imported-docs` tool as follows:
|
||||
You can run the `update-imported-docs.py` tool as follows:
|
||||
|
||||
```shell
|
||||
cd <web-base>/update-imported-docs
|
||||
./update-imported-docs <configuration-file.yml> <release-version>
|
||||
./update-imported-docs.py <configuration-file.yml> <release-version>
|
||||
```
|
||||
|
||||
For example:
|
||||
|
||||
```shell
|
||||
./update-imported-docs reference.yml 1.17
|
||||
./update-imported-docs.py reference.yml 1.17
|
||||
```
|
||||
|
||||
<!-- Revisit: is the release configuration used -->
|
||||
|
@ -171,7 +171,7 @@ For example:
|
|||
The `release.yml` configuration file contains instructions to fix relative links.
|
||||
To fix relative links within your imported files, set the`gen-absolute-links`
|
||||
property to `true`. You can find an example of this in
|
||||
[`release.yml`](https://github.com/kubernetes/website/blob/master/update-imported-docs/release.yml).
|
||||
[`release.yml`](https://github.com/kubernetes/website/blob/main/update-imported-docs/release.yml).
|
||||
|
||||
## Adding and committing changes in kubernetes/website
|
||||
|
||||
|
@ -254,4 +254,3 @@ running the build targets, see the following guides:
|
|||
* [Generating Reference Documentation for kubectl Commands](/docs/contribute/generate-ref-docs/kubectl/)
|
||||
* [Generating Reference Documentation for the Kubernetes API](/docs/contribute/generate-ref-docs/kubernetes-api/)
|
||||
|
||||
|
||||
|
|
|
@ -98,7 +98,7 @@ Once you've opened a localization PR, you can become members of the Kubernetes G
|
|||
|
||||
### Add your localization team in GitHub
|
||||
|
||||
Next, add your Kubernetes localization team to [`sig-docs/teams.yaml`](https://github.com/kubernetes/org/blob/master/config/kubernetes/sig-docs/teams.yaml). For an example of adding a localization team, see the PR to add the [Spanish localization team](https://github.com/kubernetes/org/pull/685).
|
||||
Next, add your Kubernetes localization team to [`sig-docs/teams.yaml`](https://github.com/kubernetes/org/blob/main/config/kubernetes/sig-docs/teams.yaml). For an example of adding a localization team, see the PR to add the [Spanish localization team](https://github.com/kubernetes/org/pull/685).
|
||||
|
||||
Members of `@kubernetes/sig-docs-**-owners` can approve PRs that change content within (and only within) your localization directory: `/content/**/`.
|
||||
|
||||
|
@ -117,7 +117,7 @@ For an example of adding a label, see the PR for adding the [Italian language la
|
|||
|
||||
### Modify the site configuration
|
||||
|
||||
The Kubernetes website uses Hugo as its web framework. The website's Hugo configuration resides in the [`config.toml`](https://github.com/kubernetes/website/tree/master/config.toml) file. To support a new localization, you'll need to modify `config.toml`.
|
||||
The Kubernetes website uses Hugo as its web framework. The website's Hugo configuration resides in the [`config.toml`](https://github.com/kubernetes/website/tree/main/config.toml) file. To support a new localization, you'll need to modify `config.toml`.
|
||||
|
||||
Add a configuration block for the new language to `config.toml`, under the existing `[languages]` block. The German block, for example, looks like:
|
||||
|
||||
|
@ -136,7 +136,7 @@ For more information about Hugo's multilingual support, see "[Multilingual Mode]
|
|||
|
||||
### Add a new localization directory
|
||||
|
||||
Add a language-specific subdirectory to the [`content`](https://github.com/kubernetes/website/tree/master/content) folder in the repository. For example, the two-letter code for German is `de`:
|
||||
Add a language-specific subdirectory to the [`content`](https://github.com/kubernetes/website/tree/main/content) folder in the repository. For example, the two-letter code for German is `de`:
|
||||
|
||||
```shell
|
||||
mkdir content/de
|
||||
|
@ -219,7 +219,7 @@ For an example of adding a new localization, see the PR to enable [docs in Frenc
|
|||
|
||||
### Add a localized README file
|
||||
|
||||
To guide other localization contributors, add a new [`README-**.md`](https://help.github.com/articles/about-readmes/) to the top level of k/website, where `**` is the two-letter language code. For example, a German README file would be `README-de.md`.
|
||||
To guide other localization contributors, add a new [`README-**.md`](https://help.github.com/articles/about-readmes/) to the top level of [k/website](https://github.com/kubernetes/website/), where `**` is the two-letter language code. For example, a German README file would be `README-de.md`.
|
||||
|
||||
Provide guidance to localization contributors in the localized `README-**.md` file. Include the same information contained in `README.md` as well as:
|
||||
|
||||
|
@ -276,15 +276,15 @@ To find source files for your target version:
|
|||
2. Select a branch for your target version from the following table:
|
||||
Target version | Branch
|
||||
-----|-----
|
||||
Latest version | [`master`](https://github.com/kubernetes/website/tree/master)
|
||||
Latest version | [`main`](https://github.com/kubernetes/website/tree/main)
|
||||
Previous version | [`release-{{< skew prevMinorVersion >}}`](https://github.com/kubernetes/website/tree/release-{{< skew prevMinorVersion >}})
|
||||
Next version | [`dev-{{< skew nextMinorVersion >}}`](https://github.com/kubernetes/website/tree/dev-{{< skew nextMinorVersion >}})
|
||||
|
||||
The `master` branch holds content for the current release `{{< latest-version >}}`. The release team will create a `{{< release-branch >}}` branch before the next release: v{{< skew nextMinorVersion >}}.
|
||||
The `main` branch holds content for the current release `{{< latest-version >}}`. The release team will create a `{{< release-branch >}}` branch before the next release: v{{< skew nextMinorVersion >}}.
|
||||
|
||||
### Site strings in i18n
|
||||
|
||||
Localizations must include the contents of [`data/i18n/en/en.toml`](https://github.com/kubernetes/website/blob/master/data/i18n/en/en.toml) in a new language-specific file. Using German as an example: `data/i18n/de/de.toml`.
|
||||
Localizations must include the contents of [`data/i18n/en/en.toml`](https://github.com/kubernetes/website/blob/main/data/i18n/en/en.toml) in a new language-specific file. Using German as an example: `data/i18n/de/de.toml`.
|
||||
|
||||
Add a new localization directory and file to `data/i18n/`. For example, with German (`de`):
|
||||
|
||||
|
@ -339,14 +339,14 @@ Repeat steps 1-4 as needed until the localization is complete. For example, subs
|
|||
Teams must merge localized content into the same branch from which the content was sourced.
|
||||
|
||||
For example:
|
||||
- a localization branch sourced from `master` must be merged into `master`.
|
||||
- a localization branch sourced from `release-1.19` must be merged into `release-1.19`.
|
||||
- a localization branch sourced from `main` must be merged into `main`.
|
||||
- a localization branch sourced from `release-{{ skew "prevMinorVersion" }}` must be merged into `release-{{ skew "prevMinorVersion" }}`.
|
||||
|
||||
{{< note >}}
|
||||
If your localization branch was created from `master` branch but it is not merged into `master` before new release branch `{{< release-branch >}}` created, merge it into both `master` and new release branch `{{< release-branch >}}`. To merge your localization branch into new release branch `{{< release-branch >}}`, you need to switch upstream branch of your localization branch to `{{< release-branch >}}`.
|
||||
If your localization branch was created from `main` branch but it is not merged into `main` before new release branch `{{< release-branch >}}` created, merge it into both `main` and new release branch `{{< release-branch >}}`. To merge your localization branch into new release branch `{{< release-branch >}}`, you need to switch upstream branch of your localization branch to `{{< release-branch >}}`.
|
||||
{{< /note >}}
|
||||
|
||||
At the beginning of every team milestone, it's helpful to open an issue comparing upstream changes between the previous localization branch and the current localization branch. There are two scripts for comparing upstream changes. [`upstream_changes.py`](https://github.com/kubernetes/website/tree/master/scripts#upstream_changespy) is useful for checking the changes made to a specific file. And [`diff_l10n_branches.py`](https://github.com/kubernetes/website/tree/master/scripts#diff_l10n_branchespy) is useful for creating a list of outdated files for a specific localization branch.
|
||||
At the beginning of every team milestone, it's helpful to open an issue comparing upstream changes between the previous localization branch and the current localization branch. There are two scripts for comparing upstream changes. [`upstream_changes.py`](https://github.com/kubernetes/website/tree/main/scripts#upstream_changespy) is useful for checking the changes made to a specific file. And [`diff_l10n_branches.py`](https://github.com/kubernetes/website/tree/main/scripts#diff_l10n_branchespy) is useful for creating a list of outdated files for a specific localization branch.
|
||||
|
||||
While only approvers can open a new localization branch and merge pull requests, anyone can open a pull request for a new localization branch. No special permissions are required.
|
||||
|
||||
|
|
|
@ -40,7 +40,7 @@ Anyone can write a blog post and submit it for review.
|
|||
- Many CNCF projects have their own blog. These are often a better choice for posts. There are times of major feature or milestone for a CNCF project that users would be interested in reading on the Kubernetes blog.
|
||||
- Blog posts should be original content
|
||||
- The official blog is not for repurposing existing content from a third party as new content.
|
||||
- The [license](https://github.com/kubernetes/website/blob/master/LICENSE) for the blog allows commercial use of the content for commercial purposes, but not the other way around.
|
||||
- The [license](https://github.com/kubernetes/website/blob/main/LICENSE) for the blog allows commercial use of the content for commercial purposes, but not the other way around.
|
||||
- Blog posts should aim to be future proof
|
||||
- Given the development velocity of the project, we want evergreen content that won't require updates to stay accurate for the reader.
|
||||
- It can be a better choice to add a tutorial or update official documentation than to write a high level overview as a blog post.
|
||||
|
@ -56,7 +56,7 @@ The SIG Docs [blog subproject](https://github.com/kubernetes/community/tree/mast
|
|||
|
||||
To submit a blog post follow these directions:
|
||||
|
||||
- [Open a pull request](/docs/contribute/new-content/open-a-pr/#fork-the-repo) with a new blog post. New blog posts go under the [`content/en/blog/_posts`](https://github.com/kubernetes/website/tree/master/content/en/blog/_posts) directory.
|
||||
- [Open a pull request](/docs/contribute/new-content/open-a-pr/#fork-the-repo) with a new blog post. New blog posts go under the [`content/en/blog/_posts`](https://github.com/kubernetes/website/tree/main/content/en/blog/_posts) directory.
|
||||
|
||||
- Ensure that your blog post follows the correct naming conventions and the following frontmatter (metadata) information:
|
||||
|
||||
|
@ -90,6 +90,6 @@ Case studies highlight how organizations are using Kubernetes to solve
|
|||
real-world problems. The Kubernetes marketing team and members of the {{< glossary_tooltip text="CNCF" term_id="cncf" >}} collaborate with you on all case studies.
|
||||
|
||||
Have a look at the source for the
|
||||
[existing case studies](https://github.com/kubernetes/website/tree/master/content/en/case-studies).
|
||||
[existing case studies](https://github.com/kubernetes/website/tree/main/content/en/case-studies).
|
||||
|
||||
Refer to the [case study guidelines](https://github.com/cncf/foundation/blob/master/case-study-guidelines.md) and submit your request as outlined in the guidelines.
|
||||
|
|
|
@ -127,7 +127,7 @@ Make sure you have [git](https://git-scm.com/book/en/v2/Getting-Started-Installi
|
|||
upstream https://github.com/kubernetes/website.git (push)
|
||||
```
|
||||
|
||||
6. Fetch commits from your fork's `origin/master` and `kubernetes/website`'s `upstream/master`:
|
||||
6. Fetch commits from your fork's `origin/main` and `kubernetes/website`'s `upstream/main`:
|
||||
|
||||
```bash
|
||||
git fetch origin
|
||||
|
@ -137,15 +137,15 @@ Make sure you have [git](https://git-scm.com/book/en/v2/Getting-Started-Installi
|
|||
This makes sure your local repository is up to date before you start making changes.
|
||||
|
||||
{{< note >}}
|
||||
This workflow is different than the [Kubernetes Community GitHub Workflow](https://github.com/kubernetes/community/blob/master/contributors/guide/github-workflow.md). You do not need to merge your local copy of `master` with `upstream/master` before pushing updates to your fork.
|
||||
This workflow is different than the [Kubernetes Community GitHub Workflow](https://github.com/kubernetes/community/blob/master/contributors/guide/github-workflow.md). You do not need to merge your local copy of `main` with `upstream/main` before pushing updates to your fork.
|
||||
{{< /note >}}
|
||||
|
||||
### Create a branch
|
||||
|
||||
1. Decide which branch base to your work on:
|
||||
|
||||
- For improvements to existing content, use `upstream/master`.
|
||||
- For new content about existing features, use `upstream/master`.
|
||||
- For improvements to existing content, use `upstream/main`.
|
||||
- For new content about existing features, use `upstream/main`.
|
||||
- For localized content, use the localization's conventions. For more information, see [localizing Kubernetes documentation](/docs/contribute/localization/).
|
||||
- For new features in an upcoming Kubernetes release, use the feature branch. For more information, see [documenting for a release](/docs/contribute/new-content/new-features/).
|
||||
- For long-running efforts that multiple SIG Docs contributors collaborate on,
|
||||
|
@ -154,10 +154,10 @@ Make sure you have [git](https://git-scm.com/book/en/v2/Getting-Started-Installi
|
|||
|
||||
If you need help choosing a branch, ask in the `#sig-docs` Slack channel.
|
||||
|
||||
2. Create a new branch based on the branch identified in step 1. This example assumes the base branch is `upstream/master`:
|
||||
2. Create a new branch based on the branch identified in step 1. This example assumes the base branch is `upstream/main`:
|
||||
|
||||
```bash
|
||||
git checkout -b <my_new_branch> upstream/master
|
||||
git checkout -b <my_new_branch> upstream/main
|
||||
```
|
||||
|
||||
3. Make your changes using a text editor.
|
||||
|
@ -262,7 +262,7 @@ The commands below use Docker as default container engine. Set the `CONTAINER_EN
|
|||
|
||||
Alternately, install and use the `hugo` command on your computer:
|
||||
|
||||
1. Install the [Hugo](https://gohugo.io/getting-started/installing/) version specified in [`website/netlify.toml`](https://raw.githubusercontent.com/kubernetes/website/master/netlify.toml).
|
||||
1. Install the [Hugo](https://gohugo.io/getting-started/installing/) version specified in [`website/netlify.toml`](https://raw.githubusercontent.com/kubernetes/website/main/netlify.toml).
|
||||
|
||||
2. If you have not updated your website repository, the `website/themes/docsy` directory is empty.
|
||||
The site cannot build without a local copy of the theme. To update the website theme, run:
|
||||
|
@ -370,11 +370,11 @@ If another contributor commits changes to the same file in another PR, it can cr
|
|||
git push --force-with-lease origin <your-branch-name>
|
||||
```
|
||||
|
||||
2. Fetch changes from `kubernetes/website`'s `upstream/master` and rebase your branch:
|
||||
2. Fetch changes from `kubernetes/website`'s `upstream/main` and rebase your branch:
|
||||
|
||||
```bash
|
||||
git fetch upstream
|
||||
git rebase upstream/master
|
||||
git rebase upstream/main
|
||||
```
|
||||
|
||||
3. Inspect the results of the rebase:
|
||||
|
|
|
@ -42,7 +42,7 @@ When opening a pull request, you need to know in advance which branch to base yo
|
|||
|
||||
Scenario | Branch
|
||||
:---------|:------------
|
||||
Existing or new English language content for the current release | `master`
|
||||
Existing or new English language content for the current release | `main`
|
||||
Content for a feature change release | The branch which corresponds to the major and minor version the feature change is in, using the pattern `dev-<version>`. For example, if a feature changes in the `v{{< skew nextMinorVersion >}}` release, then add documentation changes to the ``dev-{{< skew nextMinorVersion >}}`` branch.
|
||||
Content in other languages (localizations) | Use the localization's convention. See the [Localization branching strategy](/docs/contribute/localization/#branching-strategy) for more information.
|
||||
|
||||
|
@ -60,6 +60,6 @@ Limit pull requests to one language per PR. If you need to make an identical cha
|
|||
|
||||
## Tools for contributors
|
||||
|
||||
The [doc contributors tools](https://github.com/kubernetes/website/tree/master/content/en/docs/doc-contributor-tools) directory in the `kubernetes/website` repository contains tools to help your contribution journey go more smoothly.
|
||||
The [doc contributors tools](https://github.com/kubernetes/website/tree/main/content/en/docs/doc-contributor-tools) directory in the `kubernetes/website` repository contains tools to help your contribution journey go more smoothly.
|
||||
|
||||
|
||||
|
|
|
@ -73,8 +73,8 @@ two [prow plugins](https://github.com/kubernetes/test-infra/tree/master/prow/plu
|
|||
- approve
|
||||
|
||||
These two plugins use the
|
||||
[OWNERS](https://github.com/kubernetes/website/blob/master/OWNERS) and
|
||||
[OWNERS_ALIASES](https://github.com/kubernetes/website/blob/master/OWNERS_ALIASES)
|
||||
[OWNERS](https://github.com/kubernetes/website/blob/main/OWNERS) and
|
||||
[OWNERS_ALIASES](https://github.com/kubernetes/website/blob/main/OWNERS_ALIASES)
|
||||
files in the top level of the `kubernetes/website` GitHub repository to control
|
||||
how prow works within the repository.
|
||||
|
||||
|
|
|
@ -44,8 +44,8 @@ These queries exclude localization PRs. All queries are against the main branch
|
|||
Lists PRs that need an LGTM from a member. If the PR needs technical review, loop in one of the reviewers suggested by the bot. If the content needs work, add suggestions and feedback in-line.
|
||||
- [Has LGTM, needs docs approval](https://github.com/kubernetes/website/pulls?q=is%3Aopen+is%3Apr+-label%3Ado-not-merge%2Fwork-in-progress+-label%3Ado-not-merge%2Fhold+label%3Alanguage%2Fen+label%3Algtm+):
|
||||
Lists PRs that need an `/approve` comment to merge.
|
||||
- [Quick Wins](https://github.com/kubernetes/website/pulls?utf8=%E2%9C%93&q=is%3Apr+is%3Aopen+base%3Amaster+-label%3A%22do-not-merge%2Fwork-in-progress%22+-label%3A%22do-not-merge%2Fhold%22+label%3A%22cncf-cla%3A+yes%22+label%3A%22size%2FXS%22+label%3A%22language%2Fen%22): Lists PRs against the main branch with no clear blockers. (change "XS" in the size label as you work through the PRs [XS, S, M, L, XL, XXL]).
|
||||
- [Not against the main branch](https://github.com/kubernetes/website/pulls?q=is%3Aopen+is%3Apr+label%3Alanguage%2Fen+-base%3Amaster): If the PR is against a `dev-` branch, it's for an upcoming release. Assign the [docs release manager](https://github.com/kubernetes/sig-release/tree/master/release-team#kubernetes-release-team-roles) using: `/assign @<manager's_github-username>`. If the PR is against an old branch, help the author figure out whether it's targeted against the best branch.
|
||||
- [Quick Wins](https://github.com/kubernetes/website/pulls?utf8=%E2%9C%93&q=is%3Apr+is%3Aopen+base%3Amain+-label%3A%22do-not-merge%2Fwork-in-progress%22+-label%3A%22do-not-merge%2Fhold%22+label%3A%22cncf-cla%3A+yes%22+label%3A%22size%2FXS%22+label%3A%22language%2Fen%22): Lists PRs against the main branch with no clear blockers. (change "XS" in the size label as you work through the PRs [XS, S, M, L, XL, XXL]).
|
||||
- [Not against the primary branch](https://github.com/kubernetes/website/pulls?q=is%3Aopen+is%3Apr+label%3Alanguage%2Fen+-base%3Amain): If the PR is against a `dev-` branch, it's for an upcoming release. Assign the [docs release manager](https://github.com/kubernetes/sig-release/tree/master/release-team#kubernetes-release-team-roles) using: `/assign @<manager's_github-username>`. If the PR is against an old branch, help the author figure out whether it's targeted against the best branch.
|
||||
|
||||
### Helpful Prow commands for wranglers
|
||||
|
||||
|
|
|
@ -147,7 +147,7 @@ separately for reviewer status in SIG Docs.
|
|||
To apply:
|
||||
|
||||
1. Open a pull request that adds your GitHub user name to a section of the
|
||||
[OWNERS_ALIASES](https://github.com/kubernetes/website/blob/master/OWNERS) file
|
||||
[OWNERS_ALIASES](https://github.com/kubernetes/website/blob/main/OWNERS) file
|
||||
in the `kubernetes/website` repository.
|
||||
|
||||
{{< note >}}
|
||||
|
@ -219,7 +219,7 @@ separately for approver status in SIG Docs.
|
|||
To apply:
|
||||
|
||||
1. Open a pull request adding yourself to a section of the
|
||||
[OWNERS_ALIASES](https://github.com/kubernetes/website/blob/master/OWNERS)
|
||||
[OWNERS_ALIASES](https://github.com/kubernetes/website/blob/main/OWNERS)
|
||||
file in the `kubernetes/website` repository.
|
||||
|
||||
{{< note >}}
|
||||
|
|
|
@ -55,7 +55,7 @@ You can reference glossary terms with an inclusion that automatically updates an
|
|||
As well as inclusions with tooltips, you can reuse the definitions from the glossary in
|
||||
page content.
|
||||
|
||||
The raw data for glossary terms is stored at [https://github.com/kubernetes/website/tree/master/content/en/docs/reference/glossary](https://github.com/kubernetes/website/tree/master/content/en/docs/reference/glossary), with a content file for each glossary term.
|
||||
The raw data for glossary terms is stored at [https://github.com/kubernetes/website/tree/main/content/en/docs/reference/glossary](https://github.com/kubernetes/website/tree/main/content/en/docs/reference/glossary), with a content file for each glossary term.
|
||||
|
||||
### Glossary demo
|
||||
|
||||
|
|
|
@ -30,7 +30,7 @@ glossary entries, tabs, and representing feature state.
|
|||
## Language
|
||||
|
||||
Kubernetes documentation has been translated into multiple languages
|
||||
(see [Localization READMEs](https://github.com/kubernetes/website/blob/master/README.md#localization-readmemds)).
|
||||
(see [Localization READMEs](https://github.com/kubernetes/website/blob/main/README.md#localization-readmemds)).
|
||||
|
||||
The way of localizing the docs for a different language is described in [Localizing Kubernetes Documentation](/docs/contribute/localization/).
|
||||
|
||||
|
|
|
@ -10,7 +10,7 @@ card:
|
|||
|
||||
<!-- overview -->
|
||||
|
||||
If you notice an issue with Kubernetes documentation, or have an idea for new content, then open an issue. All you need is a [GitHub account](https://github.com/join) and a web browser.
|
||||
If you notice an issue with Kubernetes documentation or have an idea for new content, then open an issue. All you need is a [GitHub account](https://github.com/join) and a web browser.
|
||||
|
||||
In most cases, new work on Kubernetes documentation begins with an issue in GitHub. Kubernetes contributors
|
||||
then review, categorize and tag issues as needed. Next, you or another member
|
||||
|
@ -22,7 +22,7 @@ of the Kubernetes community open a pull request with changes to resolve the issu
|
|||
|
||||
## Opening an issue
|
||||
|
||||
If you want to suggest improvements to existing content, or notice an error, then open an issue.
|
||||
If you want to suggest improvements to existing content or notice an error, then open an issue.
|
||||
|
||||
1. Click the **Create an issue** link on the right sidebar. This redirects you
|
||||
to a GitHub issue page pre-populated with some headers.
|
||||
|
|