Merge branch 'main' into resourcequotabehavior

pull/35011/head
Kartik Sharma 2023-05-25 11:43:13 +05:30 committed by GitHub
commit 8404c668f8
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
3351 changed files with 269436 additions and 204356 deletions

View File

@ -1,4 +1,4 @@
<!--
<!--
Hello!
@ -9,7 +9,7 @@
PLEASE title the FIRST commit appropriately, so that if you squash all
your commits into one, the combined commit message makes sense.
For overall help on editing and submitting pull requests, visit:
https://kubernetes.io/docs/contribute/suggest-improvements/
https://kubernetes.io/docs/contribute/suggesting-improvements/
Use the default base branch, “main”, if you're documenting existing
features in the English localization.

View File

@ -4,6 +4,7 @@ on:
schedule: # Build twice daily: shortly after midnight and noon (UTC)
# Offset is to be nice to the build service
- cron: '4 0,12 * * *'
permissions: {} # none
jobs:
build:
runs-on: ubuntu-latest

9
.gitignore vendored
View File

@ -27,13 +27,14 @@ kubernetes.github.io.iml
nohup.out
# Hugo output
public/
resources/
/public
/resources/
.hugo_build.lock
# Netlify Functions build output
package-lock.json
functions/
node_modules/
/functions/
/node_modules/
# Generated files when building with make container-build
.config/

View File

@ -34,6 +34,6 @@ Note that code issues should be filed against the main kubernetes repository, wh
### Submitting Documentation Pull Requests
If you're fixing an issue in the existing documentation, you should submit a PR against the main branch. Follow [these instructions to create a documentation pull request against the kubernetes.io repository](http://kubernetes.io/docs/home/contribute/create-pull-request/).
If you're fixing an issue in the existing documentation, you should submit a PR against the main branch. Follow [these instructions to create a documentation pull request against the kubernetes.io repository](https://kubernetes.io/docs/contribute/new-content/open-a-pr/).
For more information, see [contributing to Kubernetes docs](https://kubernetes.io/docs/contribute/).

View File

@ -4,7 +4,7 @@
# change is that the Hugo version is now an overridable argument rather than a fixed
# environment variable.
FROM golang:1.18-alpine
FROM docker.io/library/golang:1.20-alpine
LABEL maintainer="Luc Perkins <lperkins@linuxfoundation.org>"
@ -24,7 +24,7 @@ RUN mkdir $HOME/src && \
cd "hugo-${HUGO_VERSION}" && \
go install --tags extended
FROM golang:1.18-alpine
FROM docker.io/library/golang:1.20-alpine
RUN apk add --no-cache \
runuser \

View File

@ -9,11 +9,16 @@ CONTAINER_ENGINE ?= docker
IMAGE_REGISTRY ?= gcr.io/k8s-staging-sig-docs
IMAGE_VERSION=$(shell scripts/hash-files.sh Dockerfile Makefile | cut -c 1-12)
CONTAINER_IMAGE = $(IMAGE_REGISTRY)/k8s-website-hugo:v$(HUGO_VERSION)-$(IMAGE_VERSION)
CONTAINER_RUN = "$(CONTAINER_ENGINE)" run --rm --interactive --tty --volume "$(CURDIR):/src"
# Mount read-only to allow use with tools like Podman in SELinux mode
# Container targets don't need to write into /src
CONTAINER_RUN = "$(CONTAINER_ENGINE)" run --rm --interactive --tty --volume "$(CURDIR):/src:ro,Z"
CCRED=\033[0;31m
CCEND=\033[0m
# Docker buildx related settings for multi-arch images
DOCKER_BUILDX ?= docker buildx
.PHONY: all build build-preview help serve
help: ## Show this help.
@ -29,13 +34,13 @@ module-init: ## Initialize required submodules.
all: build ## Build site with production settings and put deliverables in ./public
build: module-check ## Build site with non-production settings and put deliverables in ./public
hugo --minify --environment development
hugo --cleanDestinationDir --minify --environment development
build-preview: module-check ## Build site with drafts and future posts enabled
hugo --buildDrafts --buildFuture --environment preview
hugo --cleanDestinationDir --buildDrafts --buildFuture --environment preview
deploy-preview: ## Deploy preview site via netlify
hugo --enableGitInfo --buildFuture --environment preview -b $(DEPLOY_PRIME_URL)
GOMAXPROCS=1 hugo --cleanDestinationDir --enableGitInfo --buildFuture --environment preview -b $(DEPLOY_PRIME_URL)
functions-build:
$(NETLIFY_FUNC) build functions-src
@ -44,11 +49,11 @@ check-headers-file:
scripts/check-headers-file.sh
production-build: module-check ## Build the production site and ensure that noindex headers aren't added
hugo --minify --environment production
GOMAXPROCS=1 hugo --cleanDestinationDir --minify --environment production
HUGO_ENV=production $(MAKE) check-headers-file
non-production-build: module-check ## Build the non-production site, which adds noindex headers to prevent indexing
hugo --enableGitInfo --environment nonprod
GOMAXPROCS=1 hugo --cleanDestinationDir --enableGitInfo --environment nonprod
serve: module-check ## Boot the development server.
hugo server --buildFuture --environment development
@ -74,6 +79,23 @@ container-image: ## Build a container image for the preview of the website
container-push: container-image ## Push container image for the preview of the website
$(CONTAINER_ENGINE) push $(CONTAINER_IMAGE)
PLATFORMS ?= linux/arm64,linux/amd64
docker-push: ## Build a multi-architecture image and push that into the registry
docker run --rm --privileged tonistiigi/binfmt:qemu-v6.2.0-26@sha256:5bf63a53ad6222538112b5ced0f1afb8509132773ea6dd3991a197464962854e --install all
docker version
$(DOCKER_BUILDX) version
$(DOCKER_BUILDX) inspect image-builder > /dev/null 2>&1 || $(DOCKER_BUILDX) create --name image-builder --use
# copy existing Dockerfile and insert --platform=${TARGETPLATFORM} into Dockerfile.cross, and preserve the original Dockerfile
sed -e 's/\(^FROM\)/FROM --platform=\$$\{TARGETPLATFORM\}/' Dockerfile > Dockerfile.cross
$(DOCKER_BUILDX) build \
--push \
--platform=$(PLATFORMS) \
--build-arg HUGO_VERSION=$(HUGO_VERSION) \
--tag $(CONTAINER_IMAGE) \
-f Dockerfile.cross .
$(DOCKER_BUILDX) stop image-builder
rm Dockerfile.cross
container-build: module-check
$(CONTAINER_RUN) --read-only --mount type=tmpfs,destination=/tmp,tmpfs-mode=01777 $(CONTAINER_IMAGE) sh -c "npm ci && hugo --minify --environment development"

4
OWNERS
View File

@ -7,12 +7,14 @@ approvers:
- sig-docs-en-owners # Defined in OWNERS_ALIASES
emeritus_approvers:
# - celestehorgan, commented out to disable PR assignments
# - chenopis, commented out to disable PR assignments
# - irvifa, commented out to disable PR assignments
# - jaredbhatti, commented out to disable PR assignments
# - jimangel, commented out to disable PR assignments
# - kbarnard10, commented out to disable PR assignments
# - steveperry-53, commented out to disable PR assignments
- stewart-yu
- stewart-yu
# - zacharysarah, commented out to disable PR assignments
labels:

View File

@ -1,14 +1,24 @@
aliases:
sig-docs-blog-owners: # Approvers for blog content
- onlydole
- mrbobbytables
- sftim
- nate-double-u
- onlydole
- sftim
sig-docs-blog-reviewers: # Reviewers for blog content
- mrbobbytables
- nate-double-u
- onlydole
- sftim
- nate-double-u
sig-docs-localization-owners: # Admins for localization content
- a-mccarthy
- divya-mohan0209
- jimangel
- kbhawkey
- natalisucks
- onlydole
- reylejano
- sftim
- tengqm
sig-docs-de-owners: # Admins for German content
- bene2k1
- mkorbi
@ -20,136 +30,106 @@ aliases:
sig-docs-en-owners: # Admins for English content
- annajung
- bradtopol
- celestehorgan
- divya-mohan0209
- jimangel
- jlbutler
- kbhawkey
- kcmartin
- natalisucks
- nate-double-u
- onlydole
- pi-victor
- reylejano
- savitharaghunathan
- Rishit-dagli # 1.28 Release Team Docs Lead
- sftim
- tengqm
- zacharysarah
sig-docs-en-reviews: # PR reviews for English content
- bradtopol
- celestehorgan
- daminisatya
- divya-mohan0209
- jimangel
- kbhawkey
- mehabhalodiya
- mengjiao-liu
- natalisucks
- nate-double-u
- onlydole
- rajeshdeshpande02
- reylejano
- sftim
- shannonxtreme
- tengqm
- zacharysarah
sig-docs-es-owners: # Admins for Spanish content
- raelga
- 92nqb
- krol3
- electrocucaracha
- raelga
- ramrodo
sig-docs-es-reviews: # PR reviews for Spanish content
- raelga
- 92nqb
- krol3
- electrocucaracha
- raelga
- ramrodo
sig-docs-fr-owners: # Admins for French content
- remyleone
- awkif
- feloy
- perriea
- rekcah78
- yastij
- smana
- rbenzair
- abuisine
- erickhun
- jygastaud
- awkif
- oussemos
- anthonydahanne
- feloy
- remyleone
sig-docs-fr-reviews: # PR reviews for French content
- remyleone
- awkif
- feloy
- perriea
- rekcah78
- yastij
- smana
- rbenzair
- abuisine
- erickhun
- jygastaud
- awkif
- oussemos
- anthonydahanne
- feloy
- remyleone
sig-docs-hi-owners: # Admins for Hindi content
- anubha-v-ardhan
- divya-mohan0209
- mittalyashu
sig-docs-hi-reviews: # PR reviews for Hindi content
- anubha-v-ardhan
- Babapool
- bishal7679
- divya-mohan0209
- mittalyashu
- Garima-Negi
- verma-kunal
sig-docs-id-owners: # Admins for Indonesian content
- ariscahyadi
- danninov
- girikuncoro
- habibrosyad
- phanama
- wahyuoi
sig-docs-id-reviews: # PR reviews for Indonesian content
- ariscahyadi
- danninov
- girikuncoro
- habibrosyad
- phanama
- wahyuoi
sig-docs-it-owners: # Admins for Italian content
- fabriziopandini
- Fale
- mattiaperi
- micheleberardi
sig-docs-it-reviews: # PR reviews for Italian content
- fabriziopandini
- Fale
- mattiaperi
- micheleberardi
sig-docs-ja-owners: # Admins for Japanese content
# cstoku
- inductor
- nasa9084
sig-docs-ja-reviews: # PR reviews for Japanese content
- atoato88
- bells17
# cstoku
- inductor
- kakts
- makocchi-git
# MasayaAoyama
- nasa9084
# oke-py
- ptux
- t-inu
sig-docs-ko-owners: # Admins for Korean content
- ClaudiaJKang
- gochist
- ianychoi
- jihoon-seo
- seokho-son
- yoonian
- ysyukr
sig-docs-ko-reviews: # PR reviews for Korean content
- ClaudiaJKang
- gochist
- ianychoi
- jihoon-seo
- jmyung
- pjhwa
- seokho-son
- yoonian
- ysyukr
sig-docs-leads: # Website chairs and tech leads
- divya-mohan0209
- jimangel
- kbhawkey
- natalisucks
- onlydole
@ -157,117 +137,102 @@ aliases:
- sftim
- tengqm
sig-docs-zh-owners: # Admins for Chinese content
# chenopis
- chenrui333
# dchen1107
# haibinxie
# hanjiayao
- howieyuen
# lichuqiang
- mengjiao-liu
- SataQiu
- Sea-n
- tanjunchen
- tengqm
- windsonsea
- xichengliudui
# zhangxiaoyu-zidif
sig-docs-zh-reviews: # PR reviews for Chinese content
- chenrui333
- chenxuc
- howieyuen
- idealhack
# idealhack
- kinzhi
- mengjiao-liu
- pigletfly
- my-git9
# pigletfly
- SataQiu
- Sea-n
- tanjunchen
- tengqm
- windsonsea
- xichengliudui
- ydFu
# zhangxiaoyu-zidif
sig-docs-pt-owners: # Admins for Portuguese content
- devlware
- edsoncelio
- femrtnz
- jailton
- jcjesus
- devlware
- jhonmike
- rikatz
- stormqueen1990
- yagonobre
sig-docs-pt-reviews: # PR reviews for Portugese content
- devlware
- edsoncelio
- femrtnz
- jailton
- jcjesus
- devlware
- jhonmike
- rikatz
- mrerlison
- stormqueen1990
- yagonobre
sig-docs-vi-owners: # Admins for Vietnamese content
- huynguyennovem
- ngtuna
- truongnh1992
sig-docs-vi-reviews: # PR reviews for Vietnamese content
- huynguyennovem
- ngtuna
- truongnh1992
sig-docs-ru-owners: # Admins for Russian content
- msheldyakov
- aisonaku
- potapy4
- dianaabv
- Arhell
- shurup
sig-docs-ru-reviews: # PR reviews for Russian content
- Arhell
- msheldyakov
- aisonaku
- potapy4
- dianaabv
- shurup
sig-docs-pl-owners: # Admins for Polish content
- mfilocha
- nvtkaszpir
sig-docs-pl-reviews: # PR reviews for Polish content
- kpucynski
- mfilocha
- nvtkaszpir
- kpucynski
sig-docs-uk-owners: # Admins for Ukrainian content
- anastyakulyk
- butuzov
- Arhell
- MaxymVlasov
sig-docs-uk-reviews: # PR reviews for Ukrainian content
- anastyakulyk
- Arhell
- butuzov
- idvoretskyi
- MaxymVlasov
- Potapy4
# authoritative source: git.k8s.io/community/OWNERS_ALIASES
committee-steering: # provide PR approvals for announcements
- cblecker
- dims
- cpanato
- bentheelder
- justaugustus
- liggitt
- mrbobbytables
- parispittman
- palnabarun
- tpepper
# authoritative source: https://git.k8s.io/sig-release/OWNERS_ALIASES
sig-release-leads:
- cpanato # SIG Technical Lead
- jeremyrickard # SIG Technical Lead
- jeremyrickard # SIG Chair
- justaugustus # SIG Chair
- puerco # SIG Technical Lead
- saschagrunert # SIG Chair
- Verolop # SIG Technical Lead
release-engineering-approvers:
- cpanato # Release Manager
- palnabarun # Release Manager
- puerco # Release Manager
- saschagrunert # subproject owner / Release Manager
- cpanato # subproject owner / Release Manager
- jeremyrickard # subproject owner / Release Manager
- justaugustus # subproject owner / Release Manager
- Verolop # Release Manager
- palnabarun # Release Manager
- puerco # subproject owner / Release Manager
- saschagrunert # subproject owner / Release Manager
- Verolop # subproject owner / Release Manager
- xmudrii # Release Manager
release-engineering-reviewers:
- ameukam # Release Manager Associate
- cici37 # Release Manager Associate
- jimangel # Release Manager Associate
- markyjackson-taulia # Release Manager Associate
- mkorbi # Release Manager Associate
- onlydole # Release Manager Associate
- sethmccombs # Release Manager Associate
- thejoycekung # Release Manager Associate
- wilsonehusin # Release Manager Associate
- jrsapi # Release Manager Associate
- salaxander # Release Manager Associate

View File

@ -16,7 +16,7 @@ Hugo(Extended version)を使用してWebサイトをローカルで実行する
このリポジトリを使用するには、以下をローカルにインストールする必要があります。
- [npm](https://www.npmjs.com/)
- [Go](https://golang.org/)
- [Go](https://go.dev/)
- [Hugo(Extended version)](https://gohugo.io/)
- [Docker](https://www.docker.com/)などのコンテナランタイム

View File

@ -13,7 +13,7 @@ Você pode executar o website localmente utilizando o Hugo (versão Extended), o
Para usar este repositório, você precisa instalar:
- [npm](https://www.npmjs.com/)
- [Go](https://golang.org/)
- [Go](https://go.dev/)
- [Hugo (versão Extended)](https://gohugo.io/)
- Um container runtime, por exemplo [Docker](https://www.docker.com/).

View File

@ -13,7 +13,7 @@
Чтобы работать с этим репозиторием, понадобятся следующие компоненты, установленные локально:
- [npm](https://www.npmjs.com/)
- [Go](https://golang.org/)
- [Go](https://go.dev/)
- [Hugo (Extended version)](https://gohugo.io/)
- Исполняемая среда для контейнеров вроде [Docker](https://www.docker.com/)

View File

@ -9,8 +9,8 @@
<!--
This repository contains the assets required to build the [Kubernetes website and documentation](https://kubernetes.io/). We're glad that you want to contribute!
-->
本仓库包含了所有用于构建 [Kubernetes 网站和文档](https://kubernetes.io/) 的软件资产。
我们非常高兴想要参与贡献!
本仓库包含了所有用于构建 [Kubernetes 网站和文档](https://kubernetes.io/)的软件资产。
我们非常高兴想要参与贡献!
<!--
- [Contributing to the docs](#contributing-to-the-docs)
@ -22,11 +22,11 @@ This repository contains the assets required to build the [Kubernetes website an
<!--
## Using this repository
You can run the website locally using Hugo (Extended version), or you can run it in a container runtime. We strongly recommend using the container runtime, as it gives deployment consistency with the live website.
You can run the website locally using [Hugo (Extended version)](https://gohugo.io/), or you can run it in a container runtime. We strongly recommend using the container runtime, as it gives deployment consistency with the live website.
-->
## 使用这个仓库
可以使用 Hugo扩展版在本地运行网站也可以在容器中运行它。强烈建议使用容器因为这样可以和在线网站的部署保持一致。
可以使用 [Hugo扩展版](https://gohugo.io/)在本地运行网站,也可以在容器中运行它。强烈建议使用容器,因为这样可以和在线网站的部署保持一致。
<!--
## Prerequisites
@ -34,19 +34,18 @@ You can run the website locally using Hugo (Extended version), or you can run it
To use this repository, you need the following installed locally:
- [npm](https://www.npmjs.com/)
- [Go](https://golang.org/)
- [Go](https://go.dev/)
- [Hugo (Extended version)](https://gohugo.io/)
- A container runtime, like [Docker](https://www.docker.com/).
-->
## 前提条件
使用这个仓库,需要在本地安装以下软件:
- [npm](https://www.npmjs.com/)
- [Go](https://golang.org/)
- [Hugo (Extended version)](https://gohugo.io/)
- 容器运行时,比如 [Docker](https://www.docker.com/).
- [Go](https://golang.google.cn/)
- [HugoExtended 版本)](https://gohugo.io/)
- 容器运行时,比如 [Docker](https://www.docker.com/)
<!--
Before you start, install the dependencies. Clone the repository and navigate to the directory:
@ -61,23 +60,43 @@ cd website
<!--
The Kubernetes website uses the [Docsy Hugo theme](https://github.com/google/docsy#readme). Even if you plan to run the website in a container, we strongly recommend pulling in the submodule and other development dependencies by running the following:
-->
Kubernetes 网站使用的是 [Docsy Hugo 主题](https://github.com/google/docsy#readme)。
即使你打算在容器中运行网站,我们也强烈建议你通过运行以下命令来引入子模块和其他开发依赖项:
Kubernetes 网站使用的是 [Docsy Hugo 主题](https://github.com/google/docsy#readme)。 即使你打算在容器中运行网站,我们也强烈建议你通过运行以下命令来引入子模块和其他开发依赖项:
```bash
# 引入 Docsy 子模块
<!--
### Windows
```powershell
# fetch submodule dependencies
git submodule update --init --recursive --depth 1
```
```
-->
### Windows
```powershell
# 获取子模块依赖
git submodule update --init --recursive --depth 1
```
<!--
### Linux / other Unix
```bash
# fetch submodule dependencies
make module-init
```
-->
### Linux / 其它 Unix
```bash
# 获取子模块依赖
make module-init
```
<!--
## Running the website using a container
To build the site in a container, run the following to build the container image and run it:
To build the site in a container, run the following:
-->
## 在容器中运行网站
要在容器中构建网站,请通过以下命令来构建容器镜像并运行:
要在容器中构建网站,请运行以下命令
```bash
# 你可以将 $CONTAINER_ENGINE 设置为任何 Docker 类容器工具的名称
@ -87,7 +106,7 @@ make container-serve
<!--
If you see errors, it probably means that the hugo container did not have enough computing resources available. To solve it, increase the amount of allowed CPU and memory usage for Docker on your machine ([MacOSX](https://docs.docker.com/docker-for-mac/#resources) and [Windows](https://docs.docker.com/docker-for-windows/#resources)).
-->
如果您看到错误,这可能意味着 hugo 容器没有足够的可用计算资源。
如果你看到错误,这可能意味着 Hugo 容器没有足够的可用计算资源。
要解决这个问题,请增加机器([MacOSX](https://docs.docker.com/docker-for-mac/#resources)
和 [Windows](https://docs.docker.com/docker-for-windows/#resources))上
Docker 允许的 CPU 和内存使用量。
@ -108,7 +127,7 @@ To build and test the site locally, run:
## 在本地使用 Hugo 来运行网站
请确保安装的是 [`netlify.toml`](netlify.toml#L10) 文件中环境变量 `HUGO_VERSION` 所指定的
Hugo 扩展版本。
Hugo Extended 版本。
若要在本地构造和测试网站,请运行:
@ -131,13 +150,13 @@ This will start the local Hugo server on port 1313. Open up your browser to <htt
## 构建 API 参考页面
<!--
The API reference pages located in `content/en/docs/reference/kubernetes-api` are built from the Swagger specification, using <https://github.com/kubernetes-sigs/reference-docs/tree/master/gen-resourcesdocs>.
The API reference pages located in `content/en/docs/reference/kubernetes-api` are built from the Swagger specification, also known as OpenAPI specification, using <https://github.com/kubernetes-sigs/reference-docs/tree/master/gen-resourcesdocs>.
To update the reference pages for a new Kubernetes release follow these steps:
-->
位于 `content/en/docs/reference/kubernetes-api` 的 API 参考页面是根据 Swagger 规范构建的,使用 <https://github.com/kubernetes-sigs/reference-docs/tree/master/gen-resourcesdocs>
位于 `content/en/docs/reference/kubernetes-api` 的 API 参考页面是使用 <https://github.com/kubernetes-sigs/reference-docs/tree/master/gen-resourcesdocs> 根据 Swagger 规范(也称为 OpenAPI 规范)构建的
要更新 Kubernetes 版本的参考页面,请执行以下步骤:
要更新 Kubernetes 版本的参考页面,请执行以下步骤:
<!--
1. Pull in the `api-ref-generator` submodule:
@ -171,19 +190,19 @@ To update the reference pages for a new Kubernetes release follow these steps:
make api-reference
```
<!--
<!--
You can test the results locally by making and serving the site from a container image:
-->
可以通过从容器映像创建和提供站点来在本地测试结果:
-->
可以通过从容器映像创建和提供站点来在本地测试结果:
```bash
make container-image
make container-serve
```
<!--
<!--
In a web browser, go to <http://localhost:1313/docs/reference/kubernetes-api/> to view the API reference.
-->
-->
在 Web 浏览器中,打开 <http://localhost:1313/docs/reference/kubernetes-api/> 查看 API 参考。
<!--
@ -200,17 +219,17 @@ Hugo is shipped in two set of binaries for technical reasons. The current websit
-->
## 故障排除
### error: failed to transform resource: TOCSS: failed to transform "scss/main.scss" (text/x-scss): this feature is not available in your current Hugo version
### error: failed to transform resource: TOCSS: failed to transform "scss/main.scss" (text/x-scss): this feature is not available in your current Hugo version
由于技术原因Hugo 会发布两套二进制文件。
当前网站仅基于 **Hugo Extended** 版本运行。
在 [发布页面](https://github.com/gohugoio/hugo/releases) 中查找名称为 `extended` 的归档。可以运行 `hugo version` 查看是否有单词 `extended` 来确认。
在[发布页面](https://github.com/gohugoio/hugo/releases)中查找名称为 `extended` 的归档。
可以运行 `hugo version` 查看是否有单词 `extended` 来确认。
<!--
### Troubleshooting macOS for too many open files
If you run `make serve` on macOS and receive the following error:
-->
### 对 macOS 上打开太多文件的故障排除
@ -236,7 +255,7 @@ Then run the following commands (adapted from <https://gist.github.com/tombigel/
```shell
#!/bin/sh
# These are the original gist links, linking to my gists now.
# 这些是原始的 gist 链接,立即链接到我的 gist。
# curl -O https://gist.githubusercontent.com/a2ikm/761c2ab02b7b3935679e55af5d81786a/raw/ab644cb92f216c019a2f032bbf25e258b01d87f9/limit.maxfiles.plist
# curl -O https://gist.githubusercontent.com/a2ikm/761c2ab02b7b3935679e55af5d81786a/raw/ab644cb92f216c019a2f032bbf25e258b01d87f9/limit.maxproc.plist
@ -257,6 +276,51 @@ This works for Catalina as well as Mojave macOS.
-->
这适用于 Catalina 和 Mojave macOS。
### 对执行 make container-image 命令部分地区访问超时的故障排除
现象如下:
```shell
langs/language.go:23:2: golang.org/x/text@v0.3.7: Get "https://proxy.golang.org/golang.org/x/text/@v/v0.3.7.zip": dial tcp 142.251.43.17:443: i/o timeout
langs/language.go:24:2: golang.org/x/text@v0.3.7: Get "https://proxy.golang.org/golang.org/x/text/@v/v0.3.7.zip": dial tcp 142.251.43.17:443: i/o timeout
common/text/transform.go:21:2: golang.org/x/text@v0.3.7: Get "https://proxy.golang.org/golang.org/x/text/@v/v0.3.7.zip": dial tcp 142.251.43.17:443: i/o timeout
common/text/transform.go:22:2: golang.org/x/text@v0.3.7: Get "https://proxy.golang.org/golang.org/x/text/@v/v0.3.7.zip": dial tcp 142.251.43.17:443: i/o timeout
common/text/transform.go:23:2: golang.org/x/text@v0.3.7: Get "https://proxy.golang.org/golang.org/x/text/@v/v0.3.7.zip": dial tcp 142.251.43.17:443: i/o timeout
hugolib/integrationtest_builder.go:29:2: golang.org/x/tools@v0.1.11: Get "https://proxy.golang.org/golang.org/x/tools/@v/v0.1.11.zip": dial tcp 142.251.42.241:443: i/o timeout
deploy/google.go:24:2: google.golang.org/api@v0.76.0: Get "https://proxy.golang.org/google.golang.org/api/@v/v0.76.0.zip": dial tcp 142.251.43.17:443: i/o timeout
parser/metadecoders/decoder.go:32:2: gopkg.in/yaml.v2@v2.4.0: Get "https://proxy.golang.org/gopkg.in/yaml.v2/@v/v2.4.0.zip": dial tcp 142.251.42.241:443: i/o timeout
The command '/bin/sh -c mkdir $HOME/src && cd $HOME/src && curl -L https://github.com/gohugoio/hugo/archive/refs/tags/v${HUGO_VERSION}.tar.gz | tar -xz && cd "hugo-${HUGO_VERS ION}" && go install --tags extended' returned a non-zero code: 1
make: *** [Makefile:69container-image] error 1
```
请修改 `Dockerfile` 文件,为其添加网络代理。修改内容如下:
```dockerfile
...
FROM golang:1.18-alpine
LABEL maintainer="Luc Perkins <lperkins@linuxfoundation.org>"
ENV GO111MODULE=on # 需要添加内容1
ENV GOPROXY=https://proxy.golang.org,direct # 需要添加内容2
RUN apk add --no-cache \
curl \
gcc \
g++ \
musl-dev \
build-base \
libc6-compat
ARG HUGO_VERSION
...
```
将 "https://proxy.golang.org" 替换为本地可以使用的代理地址。
**注意:** 此部分仅适用于中国大陆
<!--
## Get involved with SIG Docs
@ -268,10 +332,10 @@ You can also reach the maintainers of this project at:
- [Get an invite for this Slack](https://slack.k8s.io/)
- [Mailing List](https://groups.google.com/forum/#!forum/kubernetes-sig-docs)
-->
# 参与 SIG Docs 工作
## 参与 SIG Docs 工作
通过 [社区页面](https://github.com/kubernetes/community/tree/master/sig-docs#meetings)
进一步了解 SIG Docs Kubernetes 社区和会议信息。
通过[社区页面](https://github.com/kubernetes/community/tree/master/sig-docs#meetings)进一步了解
SIG Docs Kubernetes 社区和会议信息。
你也可以通过以下渠道联系本项目的维护人员:
@ -286,16 +350,15 @@ You can click the **Fork** button in the upper-right area of the screen to creat
Once your pull request is created, a Kubernetes reviewer will take responsibility for providing clear, actionable feedback. As the owner of the pull request, **it is your responsibility to modify your pull request to address the feedback that has been provided to you by the Kubernetes reviewer.**
-->
# 为文档做贡献
## 为文档做贡献
你也可以点击屏幕右上方区域的 **Fork** 按钮,在你自己的 GitHub
账号下创建本仓库的拷贝。此拷贝被称作 _fork_
账号下创建本仓库的拷贝。此拷贝被称作 **fork**
你可以在自己的拷贝中任意地修改文档,并在你已准备好将所作修改提交给我们时,
在你自己的拷贝下创建一个拉取请求Pull Request以便让我们知道。
一旦你创建了拉取请求,某个 Kubernetes 评审人会负责提供明确的、可执行的反馈意见。
作为拉取请求的拥有者,*修改拉取请求以解决 Kubernetes
评审人所提出的反馈是你的责任*。
作为拉取请求的拥有者,**修改拉取请求以解决 Kubernetes 评审人所提出的反馈是你的责任**。
<!--
Also, note that you may end up having more than one Kubernetes reviewer provide you feedback or you may end up getting feedback from a Kubernetes reviewer that is different than the one initially assigned to provide you feedback.
@ -305,9 +368,8 @@ Furthermore, in some cases, one of your reviewers might ask for a technical revi
还要提醒的一点,有时可能会有不止一个 Kubernetes 评审人为你提供反馈意见。
有时候,某个评审人的意见和另一个最初被指派的评审人的意见不同。
更进一步,在某些时候,评审人之一可能会在需要的时候请求 Kubernetes
技术评审人来执行技术评审。
评审人会尽力及时地提供反馈意见,不过具体的响应时间可能会因时而异。
另外在某些时候,某个评审人可能会在需要的时候请求一名 Kubernetes 技术评审人来执行技术评审。
这些评审人会尽力及时地提供反馈意见,不过具体的响应时间可能会因时而异。
<!--
For more information about contributing to the Kubernetes documentation, see:
@ -319,10 +381,10 @@ For more information about contributing to the Kubernetes documentation, see:
-->
有关为 Kubernetes 文档做出贡献的更多信息,请参阅:
- [贡献 Kubernetes 文档](https://kubernetes.io/docs/contribute/)
- [页面内容类型](https://kubernetes.io/docs/contribute/style/page-content-types/)
- [文档风格指南](https://kubernetes.io/docs/contribute/style/style-guide/)
- [本地化 Kubernetes 文档](https://kubernetes.io/docs/contribute/localization/)
- [贡献 Kubernetes 文档](https://kubernetes.io/zh-cn/docs/contribute/)
- [页面内容类型](https://kubernetes.io/zh-cn/docs/contribute/style/page-content-types/)
- [文档风格指南](https://kubernetes.io/zh-cn/docs/contribute/style/style-guide/)
- [本地化 Kubernetes 文档](https://kubernetes.io/zh-cn/docs/contribute/localization/)
<!--
### New contributor ambassadors
@ -332,7 +394,7 @@ For more information about contributing to the Kubernetes documentation, see:
<!--
If you need help at any point when contributing, the [New Contributor Ambassadors](https://kubernetes.io/docs/contribute/advanced/#serve-as-a-new-contributor-ambassador) are a good point of contact. These are SIG Docs approvers whose responsibilities include mentoring new contributors and helping them through their first few pull requests. The best place to contact the New Contributors Ambassadors would be on the [Kubernetes Slack](https://slack.k8s.io/). Current New Contributors Ambassadors for SIG Docs:
-->
如果在贡献时需要帮助,[新贡献者大使](https://kubernetes.io/docs/contribute/advanced/#serve-as-a-new-contributor-ambassador)是一个很好的联系人。
如果在贡献时需要帮助,[新贡献者大使](https://kubernetes.io/zh-cn/docs/contribute/advanced/#serve-as-a-new-contributor-ambassador)是一个很好的联系人。
这些是 SIG Docs 批准者,其职责包括指导新贡献者并帮助他们完成最初的几个拉取请求。
联系新贡献者大使的最佳地点是 [Kubernetes Slack](https://slack.k8s.io/)。
SIG Docs 的当前新贡献者大使:
@ -378,16 +440,16 @@ SIG Docs 的当前新贡献者大使:
* Rui Chen ([GitHub - @chenrui333](https://github.com/chenrui333))
* He Xiaolong ([GitHub - @markthink](https://github.com/markthink))
* [Slack channel](https://kubernetes.slack.com/messages/kubernetes-docs-zh)
* [Slack 频道](https://kubernetes.slack.com/messages/kubernetes-docs-zh)
<!--
## Code of conduct
Participation in the Kubernetes community is governed by the [CNCF Code of Conduct](https://github.com/cncf/foundation/blob/master/code-of-conduct.md).
Participation in the Kubernetes community is governed by the [CNCF Code of Conduct](https://github.com/cncf/foundation/blob/main/code-of-conduct.md).
-->
## 行为准则
参与 Kubernetes 社区受 [CNCF 行为准则](https://github.com/cncf/foundation/blob/master/code-of-conduct.md) 约束。
参与 Kubernetes 社区受 [CNCF 行为准则](https://github.com/cncf/foundation/blob/main/code-of-conduct.md)约束。
<!--
## Thank you
@ -396,4 +458,4 @@ Kubernetes thrives on community participation, and we appreciate your contributi
-->
## 感谢你
Kubernetes 因为社区的参与而蓬勃发展,感谢对我们网站和文档的贡献!
Kubernetes 因为社区的参与而蓬勃发展,感谢对我们网站和文档的贡献!

View File

@ -9,14 +9,14 @@ This repository contains the assets required to build the [Kubernetes website an
## Using this repository
You can run the website locally using Hugo (Extended version), or you can run it in a container runtime. We strongly recommend using the container runtime, as it gives deployment consistency with the live website.
You can run the website locally using [Hugo (Extended version)](https://gohugo.io/), or you can run it in a container runtime. We strongly recommend using the container runtime, as it gives deployment consistency with the live website.
## Prerequisites
To use this repository, you need the following installed locally:
- [npm](https://www.npmjs.com/)
- [Go](https://golang.org/)
- [Go](https://go.dev/)
- [Hugo (Extended version)](https://gohugo.io/)
- A container runtime, like [Docker](https://www.docker.com/).
@ -29,11 +29,18 @@ cd website
The Kubernetes website uses the [Docsy Hugo theme](https://github.com/google/docsy#readme). Even if you plan to run the website in a container, we strongly recommend pulling in the submodule and other development dependencies by running the following:
```bash
# pull in the Docsy submodule
### Windows
```powershell
# fetch submodule dependencies
git submodule update --init --recursive --depth 1
```
### Linux / other Unix
```bash
# fetch submodule dependencies
make module-init
```
## Running the website using a container
To build the site in a container, run the following:
@ -63,7 +70,7 @@ This will start the local Hugo server on port 1313. Open up your browser to <htt
## Building the API reference pages
The API reference pages located in `content/en/docs/reference/kubernetes-api` are built from the Swagger specification, using <https://github.com/kubernetes-sigs/reference-docs/tree/master/gen-resourcesdocs>.
The API reference pages located in `content/en/docs/reference/kubernetes-api` are built from the Swagger specification, also known as OpenAPI specification, using <https://github.com/kubernetes-sigs/reference-docs/tree/master/gen-resourcesdocs>.
To update the reference pages for a new Kubernetes release follow these steps:

View File

@ -11,5 +11,9 @@
# INSTRUCTIONS AT https://kubernetes.io/security/
divya-mohan0209
jimangel
reylejano
sftim
tengqm
onlydole
kbhawkey
natalisucks

File diff suppressed because it is too large Load Diff

View File

@ -4,6 +4,7 @@
fields:
- containers
- initContainers
- ephemeralContainers
- imagePullSecrets
- enableServiceLinks
- os
@ -20,7 +21,9 @@
- runtimeClassName
- priorityClassName
- priority
- preemptionPolicy
- topologySpreadConstraints
- overhead
- name: Lifecycle
fields:
- restartPolicy
@ -48,11 +51,11 @@
- name: Security context
fields:
- securityContext
- name: Beta level
- name: Alpha level
fields:
- ephemeralContainers
- preemptionPolicy
- overhead
- hostUsers
- resourceClaims
- schedulingGates
- name: Deprecated
fields:
- serviceAccount
@ -96,6 +99,7 @@
- initContainerStatuses
- containerStatuses
- ephemeralContainerStatuses
- resize
- definition: io.k8s.api.core.v1.Container
field_categories:
@ -124,6 +128,7 @@
- name: Resources
fields:
- resources
- resizePolicy
- name: Lifecycle
fields:
- lifecycle
@ -216,6 +221,9 @@
fields:
- volumeMounts
- volumeDevices
- name: Resources
fields:
- resizePolicy
- name: Lifecycle
fields:
- terminationMessagePath
@ -316,6 +324,7 @@
- volumeClaimTemplates
- minReadySeconds
- persistentVolumeClaimRetentionPolicy
- ordinals
- definition: io.k8s.api.apps.v1.StatefulSetUpdateStrategy
field_categories:
@ -384,6 +393,9 @@
fields:
- selector
- manualSelector
- name: Alpha level
fields:
- podFailurePolicy
- definition: io.k8s.api.batch.v1.JobStatus
field_categories:
@ -396,7 +408,7 @@
- completedIndexes
- conditions
- uncountedTerminatedPods
- name: Alpha level
- name: Beta level
fields:
- ready
@ -525,6 +537,7 @@
- cephfs
- cinder
- csi
- ephemeral
- fc
- flexVolume
- flocker
@ -539,9 +552,6 @@
- scaleIO
- storageos
- vsphereVolume
- name: Alpha level
fields:
- ephemeral
- name: Deprecated
fields:
- gitRepo
@ -591,7 +601,7 @@
- volumeName
- storageClassName
- volumeMode
- name: Alpha level
- name: Beta level
fields:
- dataSource
- dataSourceRef
@ -714,6 +724,3 @@
- resourceVersion
- selfLink
- uid
- name: Ignored
fields:
- clusterName

View File

@ -63,12 +63,21 @@ parts:
- name: HorizontalPodAutoscaler
group: autoscaling
version: v2
- name: HorizontalPodAutoscaler
group: autoscaling
version: v2beta2
- name: PriorityClass
group: scheduling.k8s.io
version: v1
- name: PodSchedulingContext
group: resource.k8s.io
version: v1alpha2
- name: ResourceClaim
group: resource.k8s.io
version: v1alpha2
- name: ResourceClaimTemplate
group: resource.k8s.io
version: v1alpha2
- name: ResourceClass
group: resource.k8s.io
version: v1alpha2
- name: Service Resources
chapters:
- name: Service
@ -139,6 +148,12 @@ parts:
- name: CertificateSigningRequest
group: certificates.k8s.io
version: v1
- name: ClusterTrustBundle
group: certificates.k8s.io
version: v1alpha1
- name: SelfSubjectReview
group: authentication.k8s.io
version: v1beta1
- name: Authorization Resources
chapters:
- name: LocalSubjectAccessReview
@ -153,6 +168,9 @@ parts:
- name: SubjectAccessReview
group: authorization.k8s.io
version: v1
- name: SelfSubjectReview
group: authentication.k8s.io
version: v1alpha1
- name: ClusterRole
group: rbac.authorization.k8s.io
version: v1
@ -179,9 +197,9 @@ parts:
- name: PodDisruptionBudget
group: policy
version: v1
- name: PodSecurityPolicy
group: policy
version: v1beta1
- name: IPAddress
group: networking.k8s.io
version: v1alpha1
- name: Extend Resources
chapters:
- name: CustomResourceDefinition
@ -198,6 +216,12 @@ parts:
- name: ValidatingWebhookConfiguration
group: admissionregistration.k8s.io
version: v1
- name: ValidatingAdmissionPolicy
group: admissionregistration.k8s.io
version: v1alpha1
otherDefinitions:
- ValidatingAdmissionPolicyList
- ValidatingAdmissionPolicyBinding
- name: Cluster Resources
chapters:
- name: Node
@ -220,16 +244,19 @@ parts:
version: v1
- name: FlowSchema
group: flowcontrol.apiserver.k8s.io
version: v1beta2
version: v1beta3
- name: PriorityLevelConfiguration
group: flowcontrol.apiserver.k8s.io
version: v1beta2
version: v1beta3
- name: Binding
group: ""
version: v1
- name: ComponentStatus
group: ""
version: v1
- name: ClusterCIDR
group: networking.k8s.io
version: v1alpha1
- name: Common Definitions
chapters:
- name: DeleteOptions

View File

@ -102,6 +102,16 @@ main {
}
}
::selection {
background: #326ce5;
color: white;
}
::-moz-selection {
background: #326ce5;
color: white;
}
// HEADER
#hamburger {
@ -861,6 +871,10 @@ section#cncf {
// nav-tabs and tab-content
.nav-tabs {
border-bottom: none !important;
.nav-item {
margin-bottom: 0;
}
}
.td-content .tab-content .highlight {

View File

@ -45,6 +45,30 @@ body {
}
}
/* Complex table layout support */
.td-content, body.td-content {
table.complex-layout {
tbody tr,
tbody tr:nth-of-type(2n+1) {
/* Avoid stripes */
background-color: initial;
}
tbody tr:not(:last-child) > td[colspan] {
/* provide a visual break between rows */
padding-bottom: 1.5em;
}
tbody > tr > th[scope="row"]:first-child {
min-width: 9em;
}
tbody > tr > th[rowspan] {
vertical-align: middle;
}
border-collapse: separate;
border-spacing: 0 0;
max-width: calc(max(min(100vw, 110%), 40vw));
}
}
/* Emphasize first paragraph of running text on site front page */
body.td-home main[role="main"] > section:first-of-type .content p:first-child {
@ -163,7 +187,7 @@ body.td-404 main .error-details {
margin-top: 3.5rem !important;
}
@media only screen and (min-width: 1075px) {
@media only screen and (min-width: 1170px) {
margin-top: 1rem !important;
}
}
@ -844,6 +868,11 @@ dl {
}
}
.release-highlighted-date {
font-weight: bold;
}
.no-js .mermaid {
display: none;
}
@ -853,3 +882,37 @@ div.alert > em.javascript-required {
min-height: 1.5em;
margin: calc(max(4em, ( 8vh + 4em ) / 2)) 0 0.25em 0;
}
// Consistent spacing for yaml code
.language-yaml > span {
height: 1.6em;
}
.content__box
{
margin: 0 0 20px;
padding: 20px;
}
.content__box_lined
{
box-shadow: inset 0 0 0 1px #326de6;
}
.content__box_fill
{
color: #fff;
background: #326de6;
}
// Adjust Bing search result page
#bing-results-container {
padding: 1em;
}
#bing-pagination-container {
padding: 1em;
margin-bottom: 1em;
a.bing-page-anchor {
padding: 0.5em;
margin: 0.25em;
}
}

View File

@ -1,24 +1,28 @@
# See https://cloud.google.com/cloud-build/docs/build-config
# this must be specified in seconds. If omitted, defaults to 600s (10 mins)
timeout: 1200s
timeout: 9000s
# this prevents errors if you don't use both _GIT_TAG and _PULL_BASE_REF,
# or any new substitutions added in the future.
options:
substitution_option: ALLOW_LOOSE
machineType: 'E2_HIGHCPU_8'
steps:
# It's fine to bump the tag to a recent version, as needed
- name: "gcr.io/k8s-staging-test-infra/gcb-docker-gcloud:v20210917-12df099d55"
- name: "gcr.io/k8s-staging-test-infra/gcb-docker-gcloud:v20220830-45cbff55bc"
entrypoint: 'bash'
env:
- DOCKER_CLI_EXPERIMENTAL=enabled
- DOCKER_BUILDKIT=1
- DOCKER_BUILDX=/root/.docker/cli-plugins/docker-buildx
- TAG=$_GIT_TAG
- BASE_REF=$_PULL_BASE_REF
args:
- -c
- |
gcloud auth configure-docker \
&& make container-push
&& apk add sed \
&& make docker-push
substitutions:
# _GIT_TAG will be filled with a git-based tag for the image, of the form vYYYYMMDD-hash, and
# can be used as a substitution

View File

@ -42,12 +42,12 @@ Kubernetes ist Open Source und bietet Dir die Freiheit, die Infrastruktur vor Or
<button id="desktopShowVideoButton" onclick="kub.showVideo()">Video ansehen</button>
<br>
<br>
<a href="https://events.linuxfoundation.org/kubecon-cloudnativecon-europe-2022/?utm_source=kubernetes.io&utm_medium=nav&utm_campaign=kccnceu22" button id="desktopKCButton">Besuche die KubeCon Europe vom 16. bis 20. Mai 2022</a>
<a href="https://events.linuxfoundation.org/kubecon-cloudnativecon-europe/" button id="desktopKCButton">Besuche die KubeCon Europe vom 18. bis 21. April 2023</a>
<br>
<br>
<br>
<br>
<a href="https://events.linuxfoundation.org/kubecon-cloudnativecon-north-america/?utm_source=kubernetes.io&utm_medium=nav&utm_campaign=kccncna22" button id="desktopKCButton">Besuchen die KubeCon North America vom 24. bis 28. Oktober 2022</a>
<a href="https://events.linuxfoundation.org/kubecon-cloudnativecon-north-america/" button id="desktopKCButton">Besuche die KubeCon North America vom 6. bis 9. November 2023</a>
</div>
<div id="videoPlayer">
<iframe data-url="https://www.youtube.com/embed/H06qrNmGqyE?autoplay=1" frameborder="0" allowfullscreen></iframe>

View File

@ -0,0 +1,48 @@
---
layout: blog
title: "k8s.gcr.io Image Registry Will Be Frozen From the 3rd of April 2023"
date: 2023-03-10
slug: k8s-gcr-io-freeze-announcement
---
**Authors**: Michael Mueller (Giant Swarm)
Das Kubernetes-Projekt betreibt eine zur Community gehörende Container-Image-Registry namens `registry.k8s.io`, um die zum Projekt gehörenden Container-Images zu hosten. Am 3. April 2023 wird diese Container-Image-Registry `k8s.gcr.io` eingefroren und es werden keine weiteren Container-Images für Kubernetes und Teilprojekte in die alte Registry gepusht.
Die Container-Image-Registry `registry.k8s.io` ist bereits seit einigen Monaten verfügbar und wird die alte Registry ersetzen. Wir haben einen [Blogbeitrag](/blog/2022/11/28/registry-k8s-io-faster-cheaper-ga/) über die Vorteile für die Community und das Kubernetes-Projekt veröffentlicht. In diesem Beitrag wurde auch angekündigt, dass zukünftige Versionen von Kubernetes nicht mehr in der alten Registry Released sein werden.
Was bedeutet dies für Contributors:
- Wenn Du ein Maintainer eines Teilprojekts bist, musst du die Manifeste und Helm-Charts entsprechend anpassen, um die neue Container-Registry zu verwenden.
Was bedeutet dies Änderung für Endanwender:
- Das Kubernetes Release 1.27 wird nicht auf der alten Registry veröffentlicht.
- Patchreleases für 1.24, 1.25 und 1.26 werden ab April nicht mehr in der alten Container-Image-Registry veröffentlicht. Bitte beachte den untenstehenden Zeitplan für die Details zu Patchreleases in der alten Container-Registry.
- Beginnend mit dem Release 1.25, wurde die Standardeinstellung der Container-Image-Registry auf `registry.k8s.io` geändert. Diese Einstellung kann in `kubeadm` und dem `kubelet` abgeändert werden, sollte der Wert jedoch auf `k8s.gcr.io` gesetzt werden, wird dies für neue Releases ab April fehlschlagen, da diese nicht in die alte Container-Image-Registry gepusht werden.
- Solltest Du die Zuverlässigkeit der Cluster erhöhen wollen und Abhängigkeiten zu dem zur Community gehörenden Container-Image-Registry auflösen wollen, oder betreibst Cluster in einer Umgebung mit eingeschränktem externen Netzwerkzugriff, solltest Du in Betracht ziehen eine lokale Container-Image-Registry als Mirror zu betreiben. Einige Cloud-Anbieter haben hierfür entsprechende Angebote.
## Zeitplan der Änderungen
- `k8s.gcr.io` wird zum 3.April 2023 eingefroren
- Der 1.27 Release wird zum 12.April 2023 erwartet
- Das letzte 1.23 Release auf `k8s.gcr.io` wird 1.23.18 sein (1.23 wird end-of-life vor dem einfrieren erreichen)
- Das letzte 1.24 Release auf `k8s.gcr.io` wird 1.24.12 sein
- Das letzte 1.25 Release auf `k8s.gcr.io` wird 1.25.8 sein
- Das letzte 1.26 Release auf `k8s.gcr.io` wird 1.26.3 sein
## Was geschieht nun
Bitte stelle sicher, dass die Cluster keine Abhängigkeiten zu der Alten Container-Image-Registry haben. Dies kann zum Beispiel folgendermaßen überprüft werden, durch Ausführung des fogenden Kommandos erhält man eine Liste der Container-Images die ein Pod verwendet:
```shell
kubectl get pods --all-namespaces -o jsonpath="{.items[*].spec.containers[*].image}" |\
tr -s '[[:space:]]' '\n' |\
sort |\
uniq -c
```
Es können durchaus weitere Abhängigkeiten zu der alten Container-Image-Registry bestehen, stelle also sicher, dass du alle möglichen Abhängigkeiten überprüfst, um die Cluster funktional und auf dem neuesten Stand zu halten.
## Acknowledgments
__Change is hard__, die Weiterentwicklung unserer Container-Image-Registry ist notwendig, um eine nachhaltige Zukunft für das Projekt zu gewährleisten. Wir bemühen uns, Dinge für alle, die Kubernetes nutzen, zu verbessern. Viele Mitwirkende aus allen Ecken unserer Community haben lange und hart daran gearbeitet, sicherzustellen, dass wir die bestmöglichen Entscheidungen treffen, Pläne umsetzen und unser Bestes tun, um diese Pläne zu kommunizieren.
Dank geht an Aaron Crickenberger, Arnaud Meukam, Benjamin Elder, Caleb Woodbine, Davanum Srinivas, Mahamed Ali, und Tim Hockin von SIG K8s Infra, Brian McQueen, und Sergey Kanzhelev von SIG Node, Lubomir Ivanov von SIG Cluster Lifecycle, Adolfo García Veytia, Jeremy Rickard, Sascha Grunert, und Stephen Augustus von SIG Release, Bob Killen und Kaslin Fields von SIG Contribex, Tim Allclair von the Security Response Committee. Also a big thank you to our friends acting as liaisons with our cloud provider partners: Jay Pipes von Amazon und Jon Johnson Jr. von Google.

View File

@ -13,64 +13,4 @@ Im Abschnitt Konzepte erfahren Sie mehr über die Bestandteile des Kubernetes-Sy
<!-- body -->
## Überblick
Um mit Kubernetes zu arbeiten, verwenden Sie *Kubernetes-API-Objekte*, um den *gewünschten Status Ihres Clusters* zu beschreiben:
welche Anwendungen oder anderen Workloads Sie ausführen möchten, welche Containerimages sie verwenden, die Anzahl der Replikate, welche Netzwerk- und Festplattenressourcen Sie zur Verfügung stellen möchten, und vieles mehr. Sie legen den gewünschten Status fest, indem Sie Objekte mithilfe der Kubernetes-API erstellen. Dies geschieht normalerweise über die Befehlszeilenschnittstelle `kubectl`. Sie können die Kubernetes-API auch direkt verwenden, um mit dem Cluster zu interagieren und den gewünschten Status festzulegen oder zu ändern.
Sobald Sie den gewünschten Status eingestellt haben, wird das *Kubernetes Control Plane* dafür sorgen, dass der aktuelle Status des Clusters mit dem gewünschten Status übereinstimmt. Zu diesem Zweck führt Kubernetes verschiedene Aufgaben automatisch aus, z. B. das Starten oder Neustarten von Containern, Skalieren der Anzahl der Repliken einer bestimmten Anwendung und vieles mehr. Das Kubernetes Control Plane besteht aus einer Reihe von Prozessen, die in Ihrem Cluster ausgeführt werden:
* Der **Kubernetes Master** bestehet aus drei Prozessen, die auf einem einzelnen Node in Ihrem Cluster ausgeführt werden, der als Master-Node bezeichnet wird. Diese Prozesse sind:[kube-apiserver](/docs/admin/kube-apiserver/), [kube-controller-manager](/docs/admin/kube-controller-manager/) und [kube-scheduler](/docs/admin/kube-scheduler/).
* Jeder einzelne Node in Ihrem Cluster, welcher nicht der Master ist, führt zwei Prozesse aus:
* **[kubelet](/docs/admin/kubelet/)**, das mit dem Kubernetes Master kommuniziert.
* **[kube-proxy](/docs/admin/kube-proxy/)**, ein Netzwerk-Proxy, der die Netzwerkdienste von Kubernetes auf jedem Node darstellt.
## Kubernetes Objects
Kubernetes enthält eine Reihe von Abstraktionen, die den Status Ihres Systems darstellen: im Container eingesetzte Anwendungen und Workloads, die zugehörigen Netzwerk- und Festplattenressourcen sowie weitere Informationen zu den Aufgaben Ihres Clusters. Diese Abstraktionen werden durch Objekte in der Kubernetes-API dargestellt; Lesen Sie [Kubernetes Objects Überblick](/docs/concepts/abstractions/overview/) für weitere Details.
Die Basisobjekte von Kubernetes umfassen:
* [Pod](/docs/concepts/workloads/pods/pod-overview/)
* [Service](/docs/concepts/services-networking/service/)
* [Volume](/docs/concepts/storage/volumes/)
* [Namespace](/docs/concepts/overview/working-with-objects/namespaces/)
Darüber hinaus enthält Kubernetes Abstraktionen auf höherer Ebene, die als Controller bezeichnet werden. Controller bauen auf den Basisobjekten auf und bieten zusätzliche Funktionen und Komfortfunktionen. Sie beinhalten:
* [ReplicaSet](/docs/concepts/workloads/controllers/replicaset/)
* [Deployment](/docs/concepts/workloads/controllers/deployment/)
* [StatefulSet](/docs/concepts/workloads/controllers/statefulset/)
* [DaemonSet](/docs/concepts/workloads/controllers/daemonset/)
* [Job](/docs/concepts/workloads/controllers/jobs-run-to-completion/)
## Kubernetes Control Plane
Die verschiedenen Teile der Kubernetes-Steuerungsebene (Control Plane), wie der Kubernetes Master- und der Kubelet-Prozess, bestimmen, wie Kubernetes mit Ihrem Cluster kommuniziert. Das Control Plane verwaltet ein Inventar aller Kubernetes-Objekte im System und führt kontinuierlich Kontrollschleifen aus, um den Status dieser Objekte zu verwalten. Zu jeder Zeit reagieren die Kontrollschleifen des Control Plane auf Änderungen im Cluster und arbeiten daran, dass der tatsächliche Status aller Objekte im System mit dem von Ihnen definierten Status übereinstimmt.
Wenn Sie beispielsweise mit der Kubernetes-API ein Deployment-Objekt erstellen, geben Sie einen neuen gewünschten Status für das System an. Das Kubernetes Control Plane zeichnet die Objekterstellung auf und führt Ihre Anweisungen aus, indem es die erforderlichen Anwendungen startet und Sie für auf den Cluster-Nodes plant - Dadurch wird der tatsächliche Status des Clusters an den gewünschten Status angepasst.
### Kubernetes Master
Der Kubernetes-Master ist für Erhalt des gewünschten Status Ihres Clusters verantwortlich. Wenn Sie mit Kubernetes interagieren, beispielsweise mit dem Kommandozeilen-Tool `kubectl`, kommunizieren Sie mit dem Kubernetes-Master Ihres Clusters.
> Der Begriff "Master" bezeichnet dabei eine Reihe von Prozessen, die den Clusterstatus verwalten. Normalerweise werden diese Prozesse alle auf einem einzigen Node im Cluster ausgeführt. Dieser Node wird auch als Master bezeichnet. Der Master kann repliziert werden, um die Verfügbarkeit und Redundanz zu erhöhen.
### Kubernetes Nodes
Die Nodes in einem Cluster sind die Maschinen (VMs, physische Server usw.), auf denen Ihre Anwendungen und Cloud-Workflows ausgeführt werden. Der Kubernetes-Master steuert jeden Node; Sie werden selten direkt mit Nodes interagieren.
#### Objekt Metadata
* [Anmerkungen](/docs/concepts/overview/working-with-objects/annotations/)
## {{% heading "whatsnext" %}}
Wenn Sie eine Konzeptseite schreiben möchten, lesen Sie [Seitenvorlagen verwenden](/docs/home/contribute/page-templates/)
für Informationen zum Konzeptseitentyp und zur Dokumentations Vorlage.

View File

@ -1,4 +1,6 @@
---
title: "Kubernetes Architekur"
weight: 30
description: >
Hier werden die architektonischen Konzepte von Kubernetes beschrieben.
---

View File

@ -1,5 +1,7 @@
---
title: "Cluster Administration"
weight: 100
description: >
Tiefergreifende Details, die für das Erstellen und Administrieren eines Kubernetes Clusters relevant sind.
---

View File

@ -21,15 +21,15 @@ Die Add-Ons in den einzelnen Kategorien sind alphabetisch sortiert - Die Reihenf
* [ACI](https://www.github.com/noironetworks/aci-containers) bietet Container-Networking und Network-Security mit Cisco ACI.
* [Calico](https://docs.projectcalico.org/latest/introduction/) ist ein Networking- und Network-Policy-Provider. Calico unterstützt eine Reihe von Networking-Optionen, damit Du die richtige für deinen Use-Case wählen kannst. Dies beinhaltet Non-Overlaying and Overlaying-Networks mit oder ohne BGP. Calico nutzt die gleiche Engine um Network-Policies für Hosts, Pods und (falls Du Istio & Envoy benutzt) Anwendungen auf Service-Mesh-Ebene durchzusetzen.
* [Canal](https://github.com/tigera/canal/tree/master/k8s-install) vereint Flannel und Calico um Networking- und Network-Policies bereitzustellen.
* [Canal](https://projectcalico.docs.tigera.io/getting-started/kubernetes/flannel/flannel) vereint Flannel und Calico um Networking- und Network-Policies bereitzustellen.
* [Cilium](https://github.com/cilium/cilium) ist ein L3 Network- and Network-Policy-Plugin welches das transparent HTTP/API/L7-Policies durchsetzen kann. Sowohl Routing- als auch Overlay/Encapsulation-Modes werden uterstützt. Außerdem kann Cilium auf andere CNI-Plugins aufsetzen.
* [CNI-Genie](https://github.com/Huawei-PaaS/CNI-Genie) ermöglicht das nahtlose Verbinden von Kubernetes mit einer Reihe an CNI-Plugins wie z.B. Calico, Canal, Flannel, Romana, oder Weave.
* [CNI-Genie](https://github.com/cni-genie/CNI-Genie) ermöglicht das nahtlose Verbinden von Kubernetes mit einer Reihe an CNI-Plugins wie z.B. Calico, Canal, Flannel, Romana, oder Weave.
* [Contiv](https://contivpp.io/) bietet konfigurierbares Networking (Native L3 auf BGP, Overlay mit vxlan, Klassisches L2, Cisco-SDN/ACI) für verschiedene Anwendungszwecke und auch umfangreiches Policy-Framework. Das Contiv-Projekt ist vollständig [Open Source](http://github.com/contiv). Der [installer](http://github.com/contiv/install) bietet sowohl kubeadm als auch nicht-kubeadm basierte Installationen.
* [Contrail](http://www.juniper.net/us/en/products-services/sdn/contrail/contrail-networking/), basierend auf [Tungsten Fabric](https://tungsten.io), ist eine Open Source, multi-Cloud Netzwerkvirtualisierungs- und Policy-Management Plattform. Contrail und Tungsten Fabric sind mit Orechstratoren wie z.B. Kubernetes, OpenShift, OpenStack und Mesos integriert und bieten Isolationsmodi für Virtuelle Maschinen, Container (bzw. Pods) und Bare Metal workloads.
* [Flannel](https://github.com/flannel-io/flannel#deploying-flannel-manually) ist ein Overlay-Network-Provider der mit Kubernetes genutzt werden kann.
* [Knitter](https://github.com/ZTE/Knitter/) ist eine Network-Lösung die Mehrfach-Network in Kubernetes ermöglicht.
* Multus ist ein Multi-Plugin für Mehrfachnetzwerk-Unterstützung um alle CNI-Plugins (z.B. Calico, Cilium, Contiv, Flannel), zusätzlich zu SRIOV-, DPDK-, OVS-DPDK- und VPP-Basierten Workloads in Kubernetes zu unterstützen.
* [NSX-T](https://docs.vmware.com/en/VMware-NSX-T/2.0/nsxt_20_ncp_kubernetes.pdf) Container Plug-in (NCP) bietet eine Integration zwischen VMware NSX-T und einem Orchestator wie z.B. Kubernetes. Außerdem bietet es eine Integration zwischen NSX-T und Containerbasierten CaaS/PaaS-Plattformen wie z.B. Pivotal Container Service (PKS) und OpenShift.
* [Multus](https://github.com/k8snetworkplumbingwg/multus-cni) ist ein Multi-Plugin für Mehrfachnetzwerk-Unterstützung um alle CNI-Plugins (z.B. Calico, Cilium, Contiv, Flannel), zusätzlich zu SRIOV-, DPDK-, OVS-DPDK- und VPP-Basierten Workloads in Kubernetes zu unterstützen.
* [NSX-T](https://docs.vmware.com/en/VMware-NSX-T-Data-Center/index.html) Container Plug-in (NCP) bietet eine Integration zwischen VMware NSX-T und einem Orchestator wie z.B. Kubernetes. Außerdem bietet es eine Integration zwischen NSX-T und Containerbasierten CaaS/PaaS-Plattformen wie z.B. Pivotal Container Service (PKS) und OpenShift.
* [Nuage](https://github.com/nuagenetworks/nuage-kubernetes/blob/v5.1.1-1/docs/kubernetes-1-installation.rst) ist eine SDN-Plattform die Policy-Basiertes Networking zwischen Kubernetes Pods und nicht-Kubernetes Umgebungen inklusive Sichtbarkeit und Security-Monitoring bereitstellt.
* [Romana](https://github.com/romana/romana) ist eine Layer 3 Network-Lösung für Pod-Netzwerke welche auch die [NetworkPolicy API](/docs/concepts/services-networking/network-policies/) unterstützt. Details zur Installation als kubeadm Add-On sind [hier](https://github.com/romana/romana/tree/master/containerize) verfügbar.
* [Weave Net](https://www.weave.works/docs/net/latest/kube-addon/) bietet Networking and Network-Policies und arbeitet auf beiden Seiten der Network-Partition ohne auf eine externe Datenbank angwiesen zu sein.

View File

@ -23,7 +23,7 @@ Es gibt mehrere verschiedene Proxies, die die bei der Verwendung von Kubernetes
- lokalisiert den API Server
- fügt Authentifizierungs-Header hinzu
1. Der [API Server Proxy](/docs/tasks/access-application-cluster/access-cluster/#discovering-builtin-services):
1. Der [API Server Proxy](/docs/tasks/access-application-cluster/access-cluster-services/#discovering-builtin-services):
- ist eine Bastion, die in den API Server eingebaut ist
- verbindet einen Benutzer außerhalb des Clusters mit Cluster IPs, die sonst möglicherweise nicht erreichbar wären

View File

@ -1,5 +1,7 @@
---
title: "Konfiguration"
weight: 80
description: >
Resourcen, die bei der Konfiguration von Pods in Kubernetes nützlich sind.
---

View File

@ -0,0 +1,9 @@
---
title: Resourcen-Verwaltung für Pods und Container
content_type: concept
weight: 40
feature:
title: Automatisches Bin Packing
description: >
Container können je nach Systemanforderungen auf spezifischen Nodes ausgeführt werden. Somit kann eine effiziente Nutzung von Ressourcen erreicht werden.
---

View File

@ -0,0 +1,9 @@
---
title: Secrets
content_type: concept
feature:
title: Verwaltung von Secrets und Konfigurationen
description: >
Deploye und aktualisiere Secrets sowie Anwendungskonfigurationen, ohne ein Image neu zu bauen oder Secrets preiszugeben.
weight: 30
---

View File

@ -1,5 +1,7 @@
---
title: "Container"
weight: 40
description: >
Methoden, um Anwendungen und ihre Abhängigkeiten zu zusammenzufassen.
---

View File

@ -5,7 +5,7 @@ weight: 10
---
<!-- overview -->
Sie erstellen ihr Docker Image und laden es in eine Registry hoch, bevor es in einem Kubernetes Pod referenziert werden kann.
Sie erstellen Ihr Docker Image und laden es in eine Registry hoch, bevor es in einem Kubernetes Pod referenziert werden kann.
Die `image` Eigenschaft eines Containers unterstüzt die gleiche Syntax wie die des `docker` Kommandos, inklusive privater Registries und Tags.
@ -16,7 +16,7 @@ Die `image` Eigenschaft eines Containers unterstüzt die gleiche Syntax wie die
## Aktualisieren von Images
Die Standardregel für das Herunterladen von Images ist `IfNotPresent`, dies führt dazu, dass das Kubelet Images überspringt, die bereits auf einem Node vorliegen.
Die Standardregel für das Herunterladen von Images ist `IfNotPresent`, dies führt dazu, dass das Image wird nur heruntergeladen wenn es noch nicht lokal verfügbar ist.
Wenn sie stattdessen möchten, dass ein Image immer forciert heruntergeladen wird, können sie folgendes tun:
@ -75,7 +75,7 @@ Authentifizierungsdaten können auf verschiedene Weisen hinterlegt werden:
- Alle Pods können jedes gecachte Image auf einem Node nutzen
- Setzt root - Zugriff auf allen Nodes zum Einrichten voraus
- Spezifizieren eines ImagePullSecrets auf einem Pod
- Nur Pods, die eigene Secret tragen, haben Zugriff auf eine private Registry
- Nur Pods, die eigene Secrets tragen, haben Zugriff auf eine private Registry
Jede Option wird im Folgenden im Detail beschrieben
@ -246,7 +246,7 @@ Falls jedoch die `imagePullPolicy` Eigenschaft der Containers auf `IfNotPresent`
Wenn Sie sich auf im Voraus heruntergeladene Images als Ersatz für eine Registry - Authentifizierung verlassen möchten, müssen sie sicherstellen, dass alle Knoten die gleichen, im Voraus heruntergeladenen Images aufweisen.
Diese Medthode kann dazu genutzt werden, bestimmte Images aus Geschwindigkeitsgründen im Voraus zu laden, oder als Alternative zur Authentifizierung an einer eigenen Registry zu nutzen.
Diese Methode kann dazu genutzt werden, bestimmte Images aus Geschwindigkeitsgründen im Voraus zu laden, oder als Alternative zur Authentifizierung an einer eigenen Registry zu nutzen.
Alle Pods haben Leserechte auf alle im Voraus geladenen Images.
@ -277,7 +277,7 @@ Pods können nur eigene Image Pull Secret in ihrem eigenen Namespace referenzier
#### Referenzierung eines imagePullSecrets bei einem Pod
Nun können Sie Pods erstellen, die dieses Secret referenzieren, indem Sie einen Aschnitt `imagePullSecrets` zu ihrer Pod - Definition hinzufügen.
Nun können Sie Pods erstellen, die dieses Secret referenzieren, indem Sie einen Abschnitt `imagePullSecrets` zu ihrer Pod - Definition hinzufügen.
```shell
cat <<EOF > pod.yaml

View File

@ -2,6 +2,9 @@
title: Konzept Dokumentations-Vorlage
content_type: concept
toc_hide: true
description: >
Wenn Sie eine Konzeptseite schreiben möchten, lesen Sie [Seitenvorlagen verwenden](/docs/home/contribute/page-templates/)
für Informationen zum Konzeptseitentyp und zur Dokumentations-Vorlage.
---
<!-- overview -->

View File

@ -1,4 +1,10 @@
---
title: "Kubernetes erweitern"
weight: 110
feature:
title: Für Erweiterungen entworfen
description: >
Kubernetes kann ohne Änderungen am Upstream-Quelltext erweitert werden.
description: >
Verschiedene Wege, um die Funktionalität von Kubernetes zu erweitern.
---

View File

@ -1,5 +1,9 @@
---
title: "Überblick"
weight: 20
description: >
Kubernetes ist eine portable, erweiterbare und quelloffene Plattform, um containerisierte Arbeitslasten und Dienste zu verwalten.
Dies wird mithilfe von Automatisierungen und deklarativen Konfigurationen erreicht. Kubernetes hat ein großes, schnell wachsendes Ökosystem.
Dienstleistungen, Hilfestellungen und Tools für Kubernetes sind weit verbreitet.
---

View File

@ -54,7 +54,7 @@ die Entwicklern und Anwendern zur Verfügung stehen. Benutzer können ihre eigen
ihren [eigenen APIs](/docs/concepts/api-extension/custom-resources/) schreiben, die von einem
universellen [Kommandozeilen-Tool](/docs/user-guide/kubectl-overview/) angesprochen werden können.
Dieses [Design](https://git.k8s.io/community/contributors/design-proposals/architecture/architecture.md) hat es einer Reihe anderer Systeme ermöglicht, auf Kubernetes aufzubauen.
Dieses [Design](https://git.k8s.io/design-proposals-archive/architecture/architecture.md) hat es einer Reihe anderer Systeme ermöglicht, auf Kubernetes aufzubauen.
## Was Kubernetes nicht ist

View File

@ -1,5 +1,7 @@
---
title: "Richtlinien"
weight: 90
description: >
Sie können Richtlinien erstellen, die Resource-Gruppen zugewiesen werden können.
---

View File

@ -1,5 +1,7 @@
---
title: "Dienste, Lastverteilung und Netzwerkfunktionen"
weight: 60
description: >
Konzepte und Resourcen bezüglich Netzwerktechnik in Kubernetes
---

View File

@ -0,0 +1,18 @@
---
title: IPv4/IPv6 dual-stack
description: >-
Kubernetes erlaubt Netzwerkkonfigurationen mit IPv4 oder IPv6 (Single Stack).
Im Dual-Stack-Betrieb kann IPv4 im Verbund mit IPv6 verwendet werden.
feature:
title: IPv4/IPv6 Dual-Stack
description: >
Pods und Dienste können gleichzeitig IPv4- und IPv6-Adressen verwenden.
content_type: concept
reviewers:
- lachie83
- khenidak
- aramase
- bridgetkromhout
weight: 90
---

View File

@ -0,0 +1,12 @@
---
title: Services
feature:
title: Service-Discovery und Load Balancing
description: >
Anwendungen müssen keinen komplizierten Mechanismus für Service-Discovery verwenden. Kubernetes verteilt IP-Adressen und DNS-Einträge automatisch an Pods und übernimmt auch das Load Balancing.
description: >-
Veröffentliche deine Applikation über einen einzelnen, nach außen sichtbaren Endpunkt,
auch wenn die Workload über mehrere Backends verteilt ist.
content_type: concept
weight: 10
---

View File

@ -1,5 +1,7 @@
---
title: "Speicher"
weight: 70
description: >
Methoden, um volatilen oder persistenten Speicher für Pods im Cluster zur Verfügung zu stellen.
---

View File

@ -0,0 +1,10 @@
---
title: Persistente Volumes
feature:
title: Speicher-Orchestrierung
description: >
Binde automatisch deinen gewünschten Speicher ein. Egal, ob lokaler Speicher, Speicher eines Cloud Providers (z.B. AWS oder GCP) oder ein Netzwerkspeicher (z.B. NFS, iSCSI, Ceph oder Cinder).
content_type: concept
weight: 10
---

View File

@ -1,5 +1,8 @@
---
title: "Workloads"
weight: 50
description: >
Informationen über Pods, die kleinsten Einheiten, die in Kubernetes bereitgestellt werden können und
über Abstraktionen, die hierbei behilflich sind.
---

View File

@ -0,0 +1,10 @@
---
title: Deployments
feature:
title: Automatisierte Rollouts und Rollbacks
description: >
Kubernetes wendet Änderungen an deinen Anwendungen oder seiner eigenen Konfiguration stufenweise an. Währenddessen achtet es darauf, dass nicht alle Instanzen der Anwendung zur gleichen Zeit beeinträchtigt werden. Falls etwas schief geht, macht Kubernetes die Änderungen rückgängig.
content_type: concept
weight: 10
---

View File

@ -0,0 +1,9 @@
---
title: Jobs
content_type: concept
feature:
title: Stapelweise Ausführung
description: >
Neben Diensten kann Kubernetes auch die stapelweise Ausführung von Workloads verwalten. Im Falle eines Fehlers können Container ausgetauscht werden.
weight: 50
---

View File

@ -0,0 +1,10 @@
---
title: ReplicaSet
feature:
title: Selbstheilung
anchor: Funktionsweise eines ReplicaSets
description: >
Container werden mithilfe von Health-Checks überwacht und im Falle eines Fehlers neu gestartet. Sie werden erst wieder verwendet, wenn Sie komplett einsatzbereit sind.
content_type: concept
weight: 20
---

View File

@ -130,13 +130,13 @@ enthaltenen Container bereit:
Du wirst selten einzelne Pods direkt in Kubernetes erstellen, selbst
Singleton-Pods. Das liegt daran, dass Pods als relativ kurzlebige
Einweg-Einheiten konzipiert sind. Wann Ein Pod erstellt wird (entweder direkt
Einweg-Einheiten konzipiert sind. Wenn ein Pod erstellt wird (entweder direkt
von Ihnen oder indirekt von einem
{{<glossary_tooltip text="Controller" term_id="controller">}}), wird die
Ausführung auf einem {{<glossary_tooltip term_id="node">}} in Ihrem Cluster
geplant. Der Pod bleibt auf diesem (virtuellen) Server, bis entweder der Pod die
Ausführung beendet hat, das Pod-Objekt gelöscht wird, der Pod aufgrund
mangelnder Ressourcen *evakuiert* wird oder oder der Node ausfällt.
mangelnder Ressourcen *evakuiert* wird oder der Node ausfällt.
{{< note >}}
Das Neustarten eines Containers in einem Pod sollte nicht mit dem Neustarten
@ -163,20 +163,20 @@ verwalten:
* {{< glossary_tooltip text="StatefulSet" term_id="statefulset" >}}
* {{< glossary_tooltip text="DaemonSet" term_id="daemonset" >}}
### Pod Vorlagen
### Pod-Vorlagen
Controller für
{{<glossary_tooltip text="Workload" term_id="workload">}}-Ressourcen
erstellen Pods von einer _Pod Vorlage_ und verwalten diese Pods für dich.
erstellen Pods von einer _Pod-Vorlage_ und verwalten diese Pods für dich.
Pod Vorlagen sind Spezifikationen zum Erstellen von Pods und sind in
Pod-Vorlagen sind Spezifikationen zum Erstellen von Pods und sind in
Workload-Ressourcen enthalten wie z. B.
[Deployments](/docs/concepts/workloads/controllers/deployment/),
[Jobs](/docs/concepts/workloads/controllers/job/), and
[DaemonSets](/docs/concepts/workloads/controllers/daemonset/).
Jeder Controller für eine Workload-Ressource verwendet die Pod Vorlage innerhalb
des Workload-Objektes, um Pods zu erzeugen. Die Pod Vorlage ist Teil des
Jeder Controller für eine Workload-Ressource verwendet die Pod-Vorlage innerhalb
des Workload-Objektes, um Pods zu erzeugen. Die Pod-Vorlage ist Teil des
gewünschten Zustands der Workload-Ressource, mit der du deine Anwendung
ausgeführt hast.
@ -191,29 +191,29 @@ metadata:
name: hello
spec:
template:
# Dies is the Pod Vorlage
# Dies is the Pod-Vorlage
spec:
containers:
- name: hello
image: busybox
command: ['sh', '-c', 'echo "Hello, Kubernetes!" && sleep 3600']
restartPolicy: OnFailure
# Die Pod Vorlage endet hier
# Die Pod-Vorlage endet hier
```
Das Ändern der Pod Vorlage oder der Wechsel zu einer neuen Pod Vorlage hat keine
direkten Auswirkungen auf bereits existierende Pods. Wenn du die Pod Vorlage für
Das Ändern der Pod-Vorlage oder der Wechsel zu einer neuen Pod-Vorlage hat keine
direkten Auswirkungen auf bereits existierende Pods. Wenn du die Pod-Vorlage für
eine Workload-Ressource änderst, dann muss diese Ressource die Ersatz-Pods
erstellen, welche die aktualisierte Vorlage verwenden.
Beispielsweise stellt der StatefulSet-Controller sicher, dass für jedes
StatefulSet-Objekt die ausgeführten Pods mit der aktueller Pod Vorlage
StatefulSet-Objekt die ausgeführten Pods mit der aktueller Pod-Vorlage
übereinstimmen. Wenn du das StatefulSet bearbeitest und die Vorlage änderst,
beginnt das StatefulSet mit der Erstellung neuer Pods basierend auf der
aktualisierten Vorlage. Schließlich werden alle alten Pods durch neue Pods
ersetzt, und das Update ist abgeschlossen.
Jede Workload-Ressource implementiert eigenen Regeln für die Umsetzung von
Änderungen der Pod Vorlage. Wenn du mehr über StatefulSet erfahren möchtest,
Änderungen der Pod-Vorlage. Wenn du mehr über StatefulSet erfahren möchtest,
dann lese die Seite
[Update-Strategien](/docs/tutorials/stateful-application/basic-stateful-set/#updating-statefulsets)
im Tutorial StatefulSet Basics.
@ -221,7 +221,7 @@ im Tutorial StatefulSet Basics.
Auf Nodes beobachtet oder verwaltet das
{{< glossary_tooltip term_id="kubelet" text="Kubelet" >}}
nicht direkt die Details zu Pod Vorlagen und Updates. Diese Details sind
nicht direkt die Details zu Pod-Vorlagen und Updates. Diese Details sind
abstrahiert. Die Abstraktion und Trennung von Aufgaben vereinfacht die
Systemsemantik und ermöglicht so das Verhalten des Clusters zu ändern ohne
vorhandenen Code zu ändern.
@ -229,7 +229,7 @@ vorhandenen Code zu ändern.
## Pod Update und Austausch
Wie im vorherigen Abschnitt erwähnt, erstellt der Controller neue Pods basierend
auf der aktualisierten Vorlage, wenn die Pod Vorlage für eine Workload-Ressource
auf der aktualisierten Vorlage, wenn die Pod-Vorlage für eine Workload-Ressource
geändert wird anstatt die vorhandenen Pods zu aktualisieren oder zu patchen.
Kubernetes hindert dich nicht daran, Pods direkt zu verwalten. Es ist möglich,
@ -366,4 +366,4 @@ kannst du Artikel zu früheren Technologien lesen, unter anderem:
* [Borg](https://research.google.com/pubs/pub43438.html)
* [Marathon](https://mesosphere.github.io/marathon/docs/rest-api.html)
* [Omega](https://research.google/pubs/pub41684/)
* [Tupperware](https://engineering.fb.com/data-center-engineering/tupperware/).
* [Tupperware](https://engineering.fb.com/data-center-engineering/tupperware/).

View File

@ -81,9 +81,9 @@ Du kannst auch einen Slack-Kanal für deine Lokalisierung im `kubernetes/communi
### Ändere die Website-Konfiguration
Die Kubernetes-Website verwendet Hugo als Web-Framework. Die Hugo-Konfiguration der Website befindet sich in der Datei [`config.toml`](https://github.com/kubernetes/website/tree/master/config.toml). Um eine neue Lokalisierung zu unterstützen, musst du die Datei `config.toml` modifizieren.
Die Kubernetes-Website verwendet Hugo als Web-Framework. Die Hugo-Konfiguration der Website befindet sich in der Datei [`hugo.toml`](https://github.com/kubernetes/website/tree/master/hugo.toml). Um eine neue Lokalisierung zu unterstützen, musst du die Datei `hugo.toml` modifizieren.
Dazu fügst du einen neuen Block für die neue Sprache unter den bereits existierenden `[languages]` Block in das `config.toml` ein, wie folgendes Beispiel zeigt:
Dazu fügst du einen neuen Block für die neue Sprache unter den bereits existierenden `[languages]` Block in das `hugo.toml` ein, wie folgendes Beispiel zeigt:
```toml
[languages.de]
@ -111,7 +111,7 @@ mkdir content/de
### Lokalisierungs README Datei hinzufügen
Um andere Lokalisierungsmitwirkende anzuleiten, füge eine neue [`README-**.md`](https://help.github.com/articles/about-readmes/) auf der obersten Ebene von k/website hinzu, wobei `**` der aus zwei Buchstaben bestehende Sprachcode ist. Eine deutsche README-Datei wäre zum Beispiel `README-de.md`.
Um andere Lokalisierungsmitwirkende anzuleiten, füge eine neue [`README-**.md`](https://help.github.com/articles/about-readmes/) auf der obersten Ebene von kubernetes/website hinzu, wobei `**` der aus zwei Buchstaben bestehende Sprachcode ist. Eine deutsche README-Datei wäre zum Beispiel `README-de.md`.
Gebe den Lokalisierungsmitwirkende in der lokalisierten `README-**.md`-Datei Anleitung zum Mitwirken. Füge dieselben Informationen ein, die auch in `README.md` enthalten sind, sowie:
@ -262,7 +262,7 @@ In einem Entwicklungszweig zusammenzuarbeiten:
`dev-<Quellversion>-<Sprachcode>.<Team-Meilenstein>`
Beispielsweise öffnet ein Genehmigender in einem deutschen Lokalisierungsteam den Entwicklungszweig `dev-1.12-de.1` direkt gegen das k/website-Repository, basierend auf dem Quellzweig für Kubernetes v1.12.
Beispielsweise öffnet ein Genehmigender in einem deutschen Lokalisierungsteam den Entwicklungszweig `dev-1.12-de.1` direkt gegen das kubernetes/website-Repository, basierend auf dem Quellzweig für Kubernetes v1.12.
2. Einzelne Mitwirkende öffnen Feature-Zweige, die auf dem Entwicklungszweig basieren.

View File

@ -41,7 +41,7 @@ GitHub-Teams und OWNERS-Dateien.
Es gibt zwei Kategorien von SIG Docs [Teams](https://github.com/orgs/kubernetes/teams?query=sig-docs) auf GitHub:
- `@sig-docs-{language}-owners` sind Genehmiger und Verantwortliche
- `@sig-docs-{language}-reviewers` sind Reviewer
- `@sig-docs-{language}-reviews` sind Reviewer
Jede Gruppe kann in GitHub-Kommentaren mit ihrem `@name` referenziert werden, um mit allen Mitgliedern dieser Gruppe zu kommunizieren.

View File

@ -56,6 +56,6 @@ Offiziell unterstützte Clientbibliotheken:
## Design Dokumentation
Ein Archiv der Designdokumente für Kubernetes-Funktionalität. Gute Ansatzpunkte sind [Kubernetes Architektur](https://git.k8s.io/community/contributors/design-proposals/architecture/architecture.md) und [Kubernetes Design Übersicht](https://git.k8s.io/community/contributors/design-proposals).
Ein Archiv der Designdokumente für Kubernetes-Funktionalität. Gute Ansatzpunkte sind [Kubernetes Architektur](https://git.k8s.io/design-proposals-archive/architecture/architecture.md) und [Kubernetes Design Übersicht](https://git.k8s.io/community/contributors/design-proposals).

View File

@ -34,7 +34,7 @@ Benutzen Sie eine Docker-basierende Lösung, wenn Sie Kubernetes erlernen wollen
| | [IBM Cloud Private-CE (Community Edition)](https://github.com/IBM/deploy-ibm-cloud-private) |
| | [IBM Cloud Private-CE (Community Edition) on Linux Containers](https://github.com/HSBawa/icp-ce-on-linux-containers)|
| | [k3s](https://k3s.io)|
{{< /table >}}
## Produktionsumgebung
@ -98,5 +98,6 @@ Die folgende Tabelle für Produktionsumgebungs-Lösungen listet Anbieter und der
| [VEXXHOST](https://vexxhost.com/) | &#x2714; | &#x2714; | | | |
| [VMware](https://cloud.vmware.com/) | [VMware Cloud PKS](https://cloud.vmware.com/vmware-cloud-pks) |[VMware Enterprise PKS](https://cloud.vmware.com/vmware-enterprise-pks) | [VMware Enterprise PKS](https://cloud.vmware.com/vmware-enterprise-pks) | [VMware Essential PKS](https://cloud.vmware.com/vmware-essential-pks) | |[VMware Essential PKS](https://cloud.vmware.com/vmware-essential-pks)
| [Z.A.R.V.I.S.](https://zarvis.ai/) | &#x2714; | | | | | |
{{< /table >}}

View File

@ -52,7 +52,7 @@ Creating machine...
Starting local Kubernetes cluster...
```
```shell
kubectl create deployment hello-minikube --image=k8s.gcr.io/echoserver:1.10
kubectl create deployment hello-minikube --image=registry.k8s.io/echoserver:1.10
```
```
deployment.apps/hello-minikube created
@ -424,7 +424,7 @@ export no_proxy=$no_proxy,$(minikube ip)
Minikube verwendet [libmachine](https://github.com/docker/machine/tree/master/libmachine) zur Bereitstellung von VMs, und [kubeadm](https://github.com/kubernetes/kubeadm) um einen Kubernetes-Cluster in Betrieb zu nehmen.
Weitere Informationen zu Minikube finden Sie im [Vorschlag](https://git.k8s.io/community/contributors/design-proposals/cluster-lifecycle/local-cluster-ux.md).
Weitere Informationen zu Minikube finden Sie im [Vorschlag](https://git.k8s.io/design-proposals-archive/cluster-lifecycle/local-cluster-ux.md).
## Zusätzliche Links

View File

@ -27,6 +27,6 @@ cd kubernetes
make release
```
Mehr Informationen zum Release-Prozess finden Sie im kubernetes/kubernetes [`build`](http://releases.k8s.io/{{< param "githubbranch" >}}/build/) Verzeichnis.
Mehr Informationen zum Release-Prozess finden Sie im kubernetes/kubernetes [`build`](http://releases.k8s.io/master/build/) Verzeichnis.

View File

@ -11,7 +11,7 @@ weight: 90
<!-- overview -->
Der Horizontal Pod Autoscaler skaliert automatisch die Anzahl der Pods eines Replication Controller, Deployment oder Replikat Set basierend auf der beobachteten CPU-Auslastung (oder, mit Unterstützung von [benutzerdefinierter Metriken](https://git.k8s.io/community/contributors/design-proposals/instrumentation/custom-metrics-api.md), von der Anwendung bereitgestellten Metriken). Beachte, dass die horizontale Pod Autoskalierung nicht für Objekte gilt, die nicht skaliert werden können, z. B. DaemonSets.
Der Horizontal Pod Autoscaler skaliert automatisch die Anzahl der Pods eines Replication Controller, Deployment oder Replikat Set basierend auf der beobachteten CPU-Auslastung (oder, mit Unterstützung von [benutzerdefinierter Metriken](https://git.k8s.io/design-proposals-archive/instrumentation/custom-metrics-api.md), von der Anwendung bereitgestellten Metriken). Beachte, dass die horizontale Pod Autoskalierung nicht für Objekte gilt, die nicht skaliert werden können, z. B. DaemonSets.
Der Horizontal Pod Autoscaler ist als Kubernetes API-Ressource und einem Controller implementiert.
Die Ressource bestimmt das Verhalten des Controllers.
@ -46,7 +46,7 @@ Das Verwenden von Metriken aus Heapster ist seit der Kubernetes Version 1.11 ver
Siehe [Unterstützung der Metrik APIs](#unterstützung-der-metrik-apis) für weitere Details.
Der Autoscaler greift über die Scale Sub-Ressource auf die entsprechenden skalierbaren Controller (z.B. Replication Controller, Deployments und Replika Sets) zu. Scale ist eine Schnittstelle, mit der Sie die Anzahl der Replikate dynamisch einstellen und jeden ihrer aktuellen Zustände untersuchen können. Weitere Details zu der Scale Sub-Ressource findest du [hier](https://git.k8s.io/community/contributors/design-proposals/autoscaling/horizontal-pod-autoscaler.md#scale-subresource).
Der Autoscaler greift über die Scale Sub-Ressource auf die entsprechenden skalierbaren Controller (z.B. Replication Controller, Deployments und Replika Sets) zu. Scale ist eine Schnittstelle, mit der Sie die Anzahl der Replikate dynamisch einstellen und jeden ihrer aktuellen Zustände untersuchen können. Weitere Details zu der Scale Sub-Ressource findest du [hier](https://git.k8s.io/design-proposals-archive/autoscaling/horizontal-pod-autoscaler.md#scale-subresource).
### Details zum Algorithmus
@ -90,7 +90,7 @@ Die aktuelle stabile Version, die nur die Unterstützung für die automatische S
Die Beta-Version, welche die Skalierung des Speichers und benutzerdefinierte Metriken unterstützt, befindet sich unter `autoscaling/v2beta2`. Die in `autoscaling/v2beta2` neu eingeführten Felder bleiben bei der Arbeit mit `autoscaling/v1` als Anmerkungen erhalten.
Weitere Details über das API Objekt kann unter dem [HorizontalPodAutoscaler Objekt](https://git.k8s.io/community/contributors/design-proposals/autoscaling/horizontal-pod-autoscaler.md#horizontalpodautoscaler-object) gefunden werden.
Weitere Details über das API Objekt kann unter dem [HorizontalPodAutoscaler Objekt](https://git.k8s.io/design-proposals-archive/autoscaling/horizontal-pod-autoscaler.md#horizontalpodautoscaler-object) gefunden werden.
## Unterstützung des Horizontal Pod Autoscaler in kubectl
@ -154,7 +154,7 @@ Standardmäßig ruft der HorizontalPodAutoscaler Controller Metriken aus einer R
Es kann als Cluster-Addon gestartet werden.
* Für benutzerdefinierte Metriken ist dies die API `custom.metrics.k8s.io`. Diese wird vom "Adapter" API Servern bereitgestellt, welches von Anbietern von Metrik Lösungen beliefert wird.
Überprüfe dies mit deiner Metrik Pipeline oder der [Liste bekannter Lösungen](https://github.com/kubernetes/metrics/blob/master/IMPLEMENTATIONS.md#custom-metrics-api).
Überprüfe dies mit deiner Metrik Pipeline.
Falls du deinen eigenen schreiben möchtest hilft dir folgender [boilerplate](https://github.com/kubernetes-incubator/custom-metrics-apiserver) um zu starten.
* Für externe Metriken ist dies die `external.metrics.k8s.io` API. Es kann sein, dass dies durch den benutzerdefinierten Metrik Adapter bereitgestellt wird.
@ -166,7 +166,7 @@ Standardmäßig ruft der HorizontalPodAutoscaler Controller Metriken aus einer R
## {{% heading "whatsnext" %}}
* Design Dokument [Horizontal Pod Autoscaling](https://git.k8s.io/community/contributors/design-proposals/autoscaling/horizontal-pod-autoscaler.md).
* Design Dokument [Horizontal Pod Autoscaling](https://git.k8s.io/design-proposals-archive/autoscaling/horizontal-pod-autoscaler.md).
* kubectl autoscale Befehl: [kubectl autoscale](/docs/reference/generated/kubectl/kubectl-commands/#autoscale).
* Verwenden des [Horizontal Pod Autoscaler](/docs/tasks/run-application/horizontal-pod-autoscale-walkthrough/).

View File

@ -1,5 +0,0 @@
---
title: "Service Catalog installieren"
weight: 150
---

View File

@ -3,3 +3,25 @@ title: "Werkzeuge installieren"
weight: 10
---
## kubectl
Das Kubernetes Befehlszeilenprogramm [kubectl](/docs/user-guide/kubectl/) ermöglicht es Ihnen, Befehle auf einem Kubernetes-Cluster auszuführen. Sie können mit kubectl Anwendungen bereitstellen, Cluster-Ressourcen überwachen und verwalten sowie Logs einsehen.
Weitere Informationen über alle verfügbaren `kubectl`-Befehle finden Sie in der [Kommandoreferenz von kubectl](/docs/reference/kubectl/).
`kubectl` kann unter Linux, macOS und Windows installiert werden. [Hier](install-kubectl) finden Sie Anleitungen zur Installation von `kubectl`.
## kind
Mit [`kind`](https://kind.sigs.k8s.io/) können Sie Kubernetes lokal auf Ihrem Computer ausführen. Voraussetzung hierfür ist eine konfigurierte und funktionierende [Docker](https://docs.docker.com/get-docker/)-Installation.
Die `kind` [Schnellstart](https://kind.sigs.k8s.io/docs/user/quick-start/)-Seite gibt Informationen darüber, was für den schnellen Einstieg mit `kind` benötigt wird.
## minikube
Ähnlich wie `kind` ist [`minikube`](https://minikube.sigs.k8s.io/) ein Tool, mit dem man Kubernetes lokal auf dem Computer ausführen kann. Minikube erstellt Cluster mit einer Node oder mehreren Nodes. Somit ist es ein praktisches Tool für tägliche Entwicklungsaktivitäten mit Kubernetes, oder um Kubernetes einfach einmal lokal auszuprobieren.
[Hier](/install-minikube) erfahren Sie, wie Sie `minikube` auf Ihrem Computer installieren können.
Falls Sie `minikube` bereits installiert haben, können Sie es verwenden, um eine [Beispiel-Anwendung zu bereitzustellen.](/docs/tutorials/hello-minikube/).
## kubeadm
Mit `kubeadm` können Sie Kubernetes-Cluster erstellen und verwalten. `kubeadm` führt alle notwendigen Schritte aus, um ein minimales aber sicheres Cluster in einer benutzerfreundlichen Art und Weise aufzusetzen.
[Auf dieser Seite](/docs/setup/production-environment/tools/kubeadm/install-kubeadm/) finden Sie Anleitungen zur Installation von `kubeadm`.
Sobald Sie `kubeadm` installiert haben, erfahren Sie [hier](/docs/setup/production-environment/tools/kubeadm/create-cluster-kubeadm/) wie man ein Cluster mit `kubeadm` erstellt.

View File

@ -0,0 +1,10 @@
---
title: "Enthaltene Tools"
description: "Snippets, die in die Hauptseiten von kubectl-installs-*.md aufgenommen werden."
headless: true
toc_hide: true
_build:
list: never
render: never
publishResources: false
---

View File

@ -0,0 +1,17 @@
---
title: "kubectl-convert Übersicht"
description: >-
Ein kubectl Plugin welches es ermöglicht, Manifeste von einer Version
der Kubernetes API zu einer anderen zu konvertieren.
headless: true
_build:
list: never
render: never
publishResources: false
---
Ein Plugin für das Kubernetes Kommandozeilentool `kubectl`, welches es ermöglicht Manifeste von einer Version der
Kubernetes API zu einer anderen zu konvertieren. Kann zum Beispiel hilfreich sein, Manifeste zu einer nicht als veraltet (deprecated)
markierten API Version mit einem neuerem Kubernetes Release zu migrieren.
Weitere Infos finden Sich unter: [zu nicht veralteten APIs migrieren](/docs/reference/using-api/deprecation-guide/#migrate-to-non-deprecated-apis)

View File

@ -0,0 +1,16 @@
---
title: "Wie geht es weiter?"
description: "Wie geht es nach der Installation von Kubectl weiter."
headless: true
_build:
list: never
render: never
publishResources: false
---
* [Minikube installieren](https://minikube.sigs.k8s.io/docs/start/)
* [Installations Guides](/docs/setup/) ansehen um mehr über die Clustererstellung zu erfahren.
* [Lernen wie man Applikationen startet und erreichbar macht.](/docs/tasks/access-application-cluster/service-access-application-cluster/)
* Falls Zugriff auf ein Cluster benötigt wird, welches nicht von einem selbst erstellt wurde, könnte
[Clusterzugang teilen](/docs/tasks/access-application-cluster/configure-access-multiple-clusters/) interessant sein.
* Lies die [kubectl Referenzdokumentation](/docs/reference/kubectl/kubectl/)

View File

@ -0,0 +1,23 @@
---
title: "fish Autovervollständigung"
description: "Optionale Konfiguration um die fish shell Autovervollständigung einzurichten."
headless: true
_build:
list: never
render: never
publishResources: false
---
{{< note >}}
Autovervollständigung für Fish benötigt kubectl 1.23 oder neuer.
{{< /note >}}
Das kubectl Autovervollständigungsskript für Fish kann mit folgendem Befehl `kubectl completion fish` generiert werden. Mit dem Befehl `kubectl completion fish | source` wird die Autovervollständigung in der aktuellen Sitzung aktiviert.
Um die Autovervollständigung in allen Sitzungen einzurichten, muss folgender Befehl in die `~/.config/fish/config.fish` Datei eingetragen werden:
```shell
kubectl completion fish | source
```
Nach dem Neuladen der Shell, sollte die kubectl Autovervollständigung funktionieren.

View File

@ -0,0 +1,28 @@
---
title: "zsh Autovervollständigung"
description: "Optionale Konfiguration der zsh Autovervollständigung."
headless: true
_build:
list: never
render: never
publishResources: false
---
Das kubectl Autovervollständigungsskript für Zsh kann mit folgendem Befehl `kubectl completion zsh` generiert werden. Mit dem Befehl `kubectl completion zsh | source` wird die Autovervollständigung in der aktuellen Sitzung aktiviert.
Um die Autovervollständigung in allen Sitzungen einzurichten, muss folgender Befehl in die `~/.zshrc` Datei eingetragen werden:
```zsh
source <(kubectl completion zsh)
```
Falls ein Alias für kubectl eingerichtet wurde, funktioniert die kubectl Autovervollständung automatisch.
Nach dem Neuladen der Shell, sollte die kubectl Autovervollständigung funktionieren.
Sollte ein Fehler auftreten wie dieser: `2: command not found: compdef`, muss bitte folgendes am Anfang der `~/.zshrc` Datei eingefügt werden:
```zsh
autoload -Uz compinit
compinit
```

View File

@ -0,0 +1,38 @@
---
title: "kubectl installation verifizieren"
description: "Wie die kubectl Installation verifiziert wird."
headless: true
_build:
list: never
render: never
publishResources: false
---
Um mithilfe von kubectl ein Cluster zu finden und darauf zuzugreifen benötigt es eine
[kubeconfig Datei](/docs/concepts/configuration/organize-cluster-access-kubeconfig/),
welche automatisch angelegt wird, wenn ein Cluster mit Hilfe der
[kube-up.sh](https://github.com/kubernetes/kubernetes/blob/master/cluster/kube-up.sh)
oder erfolgreich ein Cluster mit Minicube erstellt wurde.
Standardmäßig liegt die kubectl Konfigurationsdatei unter folgendem Pfad `~/.kube/config`.
Um zu überprüfen ob kubectl korrekt konfiguriert ist, kann der Cluster-Status abgefragt werden:
```shell
kubectl cluster-info
```
Wenn als Antwort eine URL ausgegeben wird, ist kubectl korrekt konfiguriert und kann auf das Cluster zugreifen.
Falls eine Nachricht ähnlich wie die Folgende zu sehen ist, ist kubectl nicht korrekt konfiguriert oder nicht in der Lage das Cluster zu erreichen.
```
The connection to the server <server-name:port> was refused - did you specify the right host or port?
```
Wenn zum Beispiel versucht wird ein Kubernetes Cluster lokal auf dem Laptop zu starten, muss ein Tool wie zum Beispiel Minikube zuerst installiert werden. Danach können die oben erwähnten Befehle erneut ausgeführt werden.
Falls kubectl cluster-info eine URL zurück gibt, aber nicht auf das Cluster zugreifen kann, prüfe ob kubectl korrekt konfiguriert wurde:
```shell
kubectl cluster-info dump
```

View File

@ -0,0 +1,287 @@
---
title: Kubectl installieren und konfigurieren auf Linux
content_type: task
weight: 10
card:
name: tasks
weight: 20
title: Kubectl auf Linux installieren
---
## {{% heading "prerequisites" %}}
Um kubectl zu verwenden darf die kubectl-Version nicht mehr als eine Minor-Version Unterschied zu dem Cluster aufweisen. Zum Beispiel: eine Client-Version v{{< skew currentVersion >}} kann mit folgenden Versionen kommunizieren v{{< skew currentVersionAddMinor -1 >}}, v{{< skew currentVersionAddMinor 0 >}}, und v{{< skew currentVersionAddMinor 1 >}}.
Die Verwendung der neuesten kompatiblen Version von kubectl hilft, unvorhergesehene Probleme zu vermeiden.
## Kubectl auf Linux installieren
Um kubectl auf Linux zu installieren, gibt es die folgenden Möglichkeiten:
- [{{% heading "prerequisites" %}}](#-heading-prerequisites-)
- [Kubectl auf Linux installieren](#kubectl-auf-linux-installieren)
- [Kubectl Binary mit curl auf Linux installieren](#kubectl-binary-mit-curl-auf-linux-installieren)
- [Installieren mit Hilfe des Linux eigenen Paketmanagers](#installieren-mit-hilfe-des-linux-eigenen-paketmanagers)
- [Installation mit anderen Paketmanagern](#installation-mit-anderen-paketmanagern)
- [Kubectl Konfiguration verifizieren](#kubectl-konfiguration-verifizieren)
- [Optionale kubectl Konfigurationen und Plugins](#optionale-kubectl-konfigurationen-und-plugins)
- [Shell Autovervollständigung einbinden](#shell-autovervollständigung-einbinden)
- [`kubectl-convert` Plugin installieren](#kubectl-convert-plugin-installieren)
- [{{% heading "whatsnext" %}}](#-heading-whatsnext-)
### Kubectl Binary mit curl auf Linux installieren
1. Das aktuellste Release downloaden:
```bash
curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl"
```
{{< note >}}
Um eine spezifische Version herunterzuladen, ersetze `$(curl -L -s https://dl.k8s.io/release/stable.txt)` mit der spezifischen Version.
Um zum Beispiel Version {{< param "fullversion" >}} auf Linux herunterzuladen:
```bash
curl -LO https://dl.k8s.io/release/{{< param "fullversion" >}}/bin/linux/amd64/kubectl
```
{{< /note >}}
2. Binary validieren (optional)
Download der kubectl Checksum-Datei:
```bash
curl -LO "https://dl.k8s.io/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl.sha256"
```
Kubectl Binary mit der Checksum-Datei validieren:
```bash
echo "$(cat kubectl.sha256) kubectl" | sha256sum --check
```
Wenn Valide, dann sieht die Ausgabe wie folgt aus:
```console
kubectl: OK
```
Falls die Validierung fehlschlägt, beendet sich `sha256` mit einem "nonzero"-Status und gibt einen Fehler aus, welcher so aussehen könnte:
```bash
kubectl: FAILED
sha256sum: WARNING: 1 computed checksum did NOT match
```
{{< note >}}
Lade von der kubectl Binary und Checksum-Datei immer die selben Versionen herunter.
{{< /note >}}
3. kubectl installieren
```bash
sudo install -o root -g root -m 0755 kubectl /usr/local/bin/kubectl
```
{{< note >}}
Wenn kein root Zugriff auf das Zielsystem möglich ist, kann kubectl in das Verzeichnis `~/.local/bin` installiert werden:
```bash
chmod +x kubectl
mkdir -p ~/.local/bin
mv ./kubectl ~/.local/bin/kubectl
# und ~/.local/bin zur Umgebungsvariable $PATH hinzufügen
```
{{< /note >}}
4. Prüfen ob die installierte Version die aktuellste Version ist:
```bash
kubectl version --client
```
{{< note >}}
Der oben stehende Befehl wirft folgende Warnung:
```
WARNING: This version information is deprecated and will be replaced with the output from kubectl version --short.
```
Diese Warnung kann ignoriert werden. Prüfe lediglich die `kubectl` Version, eelche installiert wurde.
{{< /note >}}
Oder benutzte diesen Befehl für eine detailliertere Ansicht:
```cmd
kubectl version --client --output=yaml
```
### Installieren mit Hilfe des Linux eigenen Paketmanagers
{{< tabs name="kubectl_install" >}}
{{% tab name="Debian-basierte Distributionen" %}}
1. Update des `apt` Paketindex und Installation der benötigten Pakete um das Kubernetes `apt` Repository zu nutzen:
```shell
sudo apt-get update
sudo apt-get install -y ca-certificates curl
```
Falls Debian 9 (stretch) oder älter genutzt wird, müssen zusätzlich das Paket `apt-transport-https` installiert werden:
```shell
sudo apt-get install -y apt-transport-https
```
2. Den öffentlichen Google Cloud Signaturschlüssel herunterladen:
```shell
sudo curl -fsSLo /etc/apt/keyrings/kubernetes-archive-keyring.gpg https://packages.cloud.google.com/apt/doc/apt-key.gpg
```
3. Kubernetes zum `apt` Repository:
```shell
echo "deb [signed-by=/etc/apt/keyrings/kubernetes-archive-keyring.gpg] https://apt.kubernetes.io/ kubernetes-xenial main" | sudo tee /etc/apt/sources.list.d/kubernetes.list
```
4. Den `apt` Paketindex mit dem neuen Repository updaten und kubectl installieren:
```shell
sudo apt-get update
sudo apt-get install -y kubectl
```
{{< note >}}
In Releases älter als Debian 12 und Ubuntu 22.04, existiert `/etc/apt/keyrings` nicht per default.
Falls es benötigt wird, kann es angelegt werden. Hierzu sollte es danach von jedermann lesbar, aber nur von Admins schreibar gemacht werden.
{{< /note >}}
{{% /tab %}}
{{% tab name="Red Hat-basierte Distributionen" %}}
```bash
cat <<EOF | sudo tee /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=https://packages.cloud.google.com/yum/repos/kubernetes-el7-\$basearch
enabled=1
gpgcheck=1
gpgkey=https://packages.cloud.google.com/yum/doc/rpm-package-key.gpg
EOF
sudo yum install -y kubectl
```
{{% /tab %}}
{{< /tabs >}}
### Installation mit anderen Paketmanagern
{{< tabs name="other_kubectl_install" >}}
{{% tab name="Snap" %}}
Falls Ubuntu oder andere Linux Distributionen verwendet wird, und diese den [snap](https://snapcraft.io/docs/core/install) Paketmanager unterstützen, kann kubectl als [snap](https://snapcraft.io/) Anwendung installiert werden.
```shell
snap install kubectl --classic
kubectl version --client
```
{{% /tab %}}
{{% tab name="Homebrew" %}}
Falls in Linux [Homebrew](https://docs.brew.sh/Homebrew-on-Linux) als
Paketmanager genutzt wird, kann kubectl über diesen [installiert](https://docs.brew.sh/Homebrew-on-Linux#install) werden.
```shell
brew install kubectl
kubectl version --client
```
{{% /tab %}}
{{< /tabs >}}
## Kubectl Konfiguration verifizieren
{{< include "included/verify-kubectl.md" >}}
## Optionale kubectl Konfigurationen und Plugins
### Shell Autovervollständigung einbinden
kubectl stellt Autovervollständigungen für Bash, Zsh, Fish und Powershell zur Verfügung, mit welchem sich Kommandozeilen Befehle beschleunigen lassen.
Untenstehend ist beschrieben, wie die Autovervollständigungen für Fish und Zsh eingebunden werden.
{{< tabs name="kubectl_autocompletion" >}}
{{< tab name="Fish" include="included/optional-kubectl-configs-fish.md" />}}
{{< tab name="Zsh" include="included/optional-kubectl-configs-zsh.md" />}}
{{< /tabs >}}
### `kubectl-convert` Plugin installieren
{{< include "included/kubectl-convert-overview.md" >}}
1. Neueste Version des Kommandozeilenbefehls herunterladen:
```bash
curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl-convert"
```
2. Binär-Datei validieren (optional)
Download der kubectl-convert Checksum-Datei:
```bash
curl -LO "https://dl.k8s.io/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl-convert.sha256"
```
Kubectl-convert Binary mit der Checksum-Datei validieren:
```bash
echo "$(cat kubectl-convert.sha256) kubectl-convert" | sha256sum --check
```
Wenn Valide, dann sieht die Ausgabe wie folgt aus:
```console
kubectl-convert: OK
```
Falls die Validierung fehlschlägt, beendet sich `sha256` mit einem "nonzero"-Status und gibt einen Fehler aus, welcher so aussehen könnte:
```bash
kubectl-convert: FAILED
sha256sum: WARNING: 1 computed checksum did NOT match
```
{{< note >}}
Lade von der kubectl Binary und Checksum-Datei immer die selben Versionen herunter.
{{< /note >}}
3. kubectl-convert installieren
```bash
sudo install -o root -g root -m 0755 kubectl-convert /usr/local/bin/kubectl-convert
```
4. Verifizieren, dass das Pluign erfolgreich installiert wurde:
```shell
kubectl convert --help
```
Wenn kein Fehler ausgegeben wird, ist das Plugin erfolgreich installiert worden.
1. Nach Installation des Plugins, die Installationsdateien aufräumen:
```bash
rm kubectl-convert kubectl-convert.sha256
```
## {{% heading "whatsnext" %}}
{{< include "included/kubectl-whats-next.md" >}}

View File

@ -0,0 +1,284 @@
---
title: Kubectl installieren und konfigurieren auf macOS
content_type: task
weight: 10
card:
name: tasks
weight: 20
title: Kubectl auf macOS installieren
---
## {{% heading "prerequisites" %}}
Um kubectl zu verwenden darf die kubectl-Version nicht mehr als eine Minor-Version Unterschied zu deinem Cluster aufweisen. Zum Beispiel: eine Client-Version v{{< skew currentVersion >}} kann mit folgenden Versionen kommunizieren v{{< skew currentVersionAddMinor -1 >}}, v{{< skew currentVersionAddMinor 0 >}}, und v{{< skew currentVersionAddMinor 1 >}}.
Die Verwendung der neuesten kompatiblen Version von kubectl hilft, unvorhergesehene Probleme zu vermeiden.
## Kubectl auf macOS installieren
Um kubectl auf macOS zu installieren, gibt es die folgenden Möglichkeiten:
- [{{% heading "prerequisites" %}}](#-heading-prerequisites-)
- [Kubectl auf macOS installieren](#kubectl-auf-macos-installieren)
- [Kubectl Binary mit curl auf macOS installieren](#kubectl-binary-mit-curl-auf-macos-installieren)
- [Mit Homebrew auf macOS installieren](#mit-homebrew-auf-macos-installieren)
- [Mit Macports auf macOS installieren](#mit-macports-auf-macos-installieren)
- [Kubectl Konfiguration verifizieren](#kubectl-konfiguration-verifizieren)
- [Optionale kubectl Konfigurationen und Plugins](#optionale-kubectl-konfigurationen-und-plugins)
- [Shell Autovervollständigung einbinden](#shell-autovervollständigung-einbinden)
- [`kubectl-convert` Plugin installieren](#kubectl-convert-plugin-installieren)
- [{{% heading "whatsnext" %}}](#-heading-whatsnext-)
### Kubectl Binary mit curl auf macOS installieren
1. Das aktuellste Release downloaden:
{{< tabs name="download_binary_macos" >}}
{{< tab name="Intel" codelang="bash" >}}
curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/darwin/amd64/kubectl"
{{< /tab >}}
{{< tab name="Apple Silicon" codelang="bash" >}}
curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/darwin/arm64/kubectl"
{{< /tab >}}
{{< /tabs >}}
{{< note >}}
Um eine spezifische Version herunterzuladen, ersetze `$(curl -L -s https://dl.k8s.io/release/stable.txt)` mit der spezifischen Version
Um zum Beispiel Version {{< param "fullversion" >}} auf Intel macOS herunterzuladen:
```bash
curl -LO "https://dl.k8s.io/release/{{< param "fullversion" >}}/bin/darwin/amd64/kubectl"
```
Für macOS auf Apple Silicon (z.B. M1/M2):
```bash
curl -LO "https://dl.k8s.io/release/{{< param "fullversion" >}}/bin/darwin/arm64/kubectl"
```
{{< /note >}}
2. Binary validieren (optional)
Download der kubectl Checksum-Datei:
{{< tabs name="download_checksum_macos" >}}
{{< tab name="Intel" codelang="bash" >}}
curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/darwin/amd64/kubectl.sha256"
{{< /tab >}}
{{< tab name="Apple Silicon" codelang="bash" >}}
curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/darwin/arm64/kubectl.sha256"
{{< /tab >}}
{{< /tabs >}}
Kubectl Binary mit der Checksum-Datei validieren:
```bash
echo "$(cat kubectl.sha256) kubectl" | shasum -a 256 --check
```
Wenn Valide, dann sieht die Ausgabe wie folgt aus:
```console
kubectl: OK
```
Falls die Validierung fehlschlägt, beendet sich `shasum` mit einem "nonzero"-Status und gibt einen Fehler aus, welcher so aussehen könnte:
```bash
kubectl: FAILED
shasum: WARNING: 1 computed checksum did NOT match
```
{{< note >}}
Lade von der kubectl Binary und Checksum-Datei immer die selben Versionen herunter.
{{< /note >}}
3. Kubectl Binary ausführbar machen.
```bash
chmod +x ./kubectl
```
4. Kubectl Binary zu einem Ordner in Ihrem `PATH` verschieben.
```bash
sudo mv ./kubectl /usr/local/bin/kubectl
sudo chown root: /usr/local/bin/kubectl
```
{{< note >}}
Stelle sicher, dass `/usr/local/bin` in deiner PATH Umgebungsvariable gesetzt ist.
{{< /note >}}
5. Prüfen ob die installierte Version die aktuellste Version ist:
```bash
kubectl version --client
```
{{< note >}}
Der oben stehende Befehl wirft folgende Warnung:
```
WARNING: This version information is deprecated and will be replaced with the output from kubectl version --short.
```
Diese Warnung kann ignoriert werden. Prüfe lediglich die `kubectl` Version, welche installiert wurde.
{{< /note >}}
Oder benutzte diesen Befehl für eine detailliertere Ansicht:
```cmd
kubectl version --client --output=yaml
```
6. Nach Installation des Plugins, die Installationsdateien aufräumen:
```bash
rm kubectl kubectl.sha256
```
### Mit Homebrew auf macOS installieren
Wenn macOS und [Homebrew](https://brew.sh/) als Paketmanager benutzt wird,
kann kubectl über diesen installiert werden.
1. Führe den Installationsbefehl aus:
```bash
brew install kubectl
```
oder
```bash
brew install kubernetes-cli
```
2. Prüfen ob die installierte Version die aktuellste Version ist:
```bash
kubectl version --client
```
### Mit Macports auf macOS installieren
Wenn macOS und [Macports](https://macports.org/) als Paketmanager benutzt wird, kann kubectl über diesen installiert werden.
1. Führe den Installationsbefehl aus:
```bash
sudo port selfupdate
sudo port install kubectl
```
2. Prüfen ob die installierte Version die aktuellste Version ist:
```bash
kubectl version --client
```
## Kubectl Konfiguration verifizieren
{{< include "included/verify-kubectl.md" >}}
## Optionale kubectl Konfigurationen und Plugins
### Shell Autovervollständigung einbinden
kubectl stellt Autovervollständigungen für Bash, Zsh, Fish und Powershell zur Verfügung, mit welchem Kommandozeilenbefehle beschleunigt werden können.
Untenstehend ist beschrieben, wie die Autovervollständigungen für Fish und Zsh eingebunden werden.
{{< tabs name="kubectl_autocompletion" >}}
{{< tab name="Fish" include="included/optional-kubectl-configs-fish.md" />}}
{{< tab name="Zsh" include="included/optional-kubectl-configs-zsh.md" />}}
{{< /tabs >}}
### `kubectl-convert` Plugin installieren
{{< include "included/kubectl-convert-overview.md" >}}
1. Neueste Version des Kommandozeilenbefehls herunterladen:
{{< tabs name="download_convert_binary_macos" >}}
{{< tab name="Intel" codelang="bash" >}}
curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/darwin/amd64/kubectl-convert"
{{< /tab >}}
{{< tab name="Apple Silicon" codelang="bash" >}}
curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/darwin/arm64/kubectl-convert"
{{< /tab >}}
{{< /tabs >}}
2. Binär-Datei validieren (optional)
Download der kubectl-convert Checksum-Datei:
{{< tabs name="download_convert_checksum_macos" >}}
{{< tab name="Intel" codelang="bash" >}}
curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/darwin/amd64/kubectl-convert.sha256"
{{< /tab >}}
{{< tab name="Apple Silicon" codelang="bash" >}}
curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/darwin/arm64/kubectl-convert.sha256"
{{< /tab >}}
{{< /tabs >}}
Validierung der kubectl-convert Binary mit der Checksum-Datei:
```bash
echo "$(cat kubectl-convert.sha256) kubectl-convert" | shasum -a 256 --check
```
Wenn Valide, dann sieht die Ausgabe wie folgt aus:
```console
kubectl-convert: OK
```
Falls die Validierung fehlschlägt, beendet sich `shasum` mit einem "nonzero"-Status und gibt einen Fehler aus, welcher so aussehen könnte:
```bash
kubectl-convert: FAILED
shasum: WARNING: 1 computed checksum did NOT match
```
{{< note >}}
Lade von der kubectl Binary und Checksum-Datei immer die selben Versionen herunter.
{{< /note >}}
3. Kubectl-convert Binary ausführbar machen
```bash
chmod +x ./kubectl-convert
```
4. Kubectl-convert Binary zu einem Ordner in `PATH` Umgebungsvariable verschieben.
```bash
sudo mv ./kubectl-convert /usr/local/bin/kubectl-convert
sudo chown root: /usr/local/bin/kubectl-convert
```
{{< note >}}
Stelle sicher, dass `/usr/local/bin` in der PATH Umgebungsvariable gesetzt ist.
{{< /note >}}
5. Verifizieren, dass das Pluign erfolgreich installiert wurde:
```shell
kubectl convert --help
```
Wenn kein Fehler ausgegeben wird, ist das Plugin erfolgreich installiert worden.
6. Nach Installation des Plugins, die Installationsdateien aufräumen:
```bash
rm kubectl-convert kubectl-convert.sha256
```
## {{% heading "whatsnext" %}}
{{< include "included/kubectl-whats-next.md" >}}

View File

@ -30,8 +30,8 @@ Nachfolgend finden Sie einige Methoden zur Installation von kubectl.
{{< tabs name="kubectl_install" >}}
{{< tab name="Ubuntu, Debian oder HypriotOS" codelang="bash" >}}
sudo apt-get update && sudo apt-get install -y apt-transport-https
curl -s https://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo apt-key add -
echo "deb https://apt.kubernetes.io/ kubernetes-xenial main" | sudo tee -a /etc/apt/sources.list.d/kubernetes.list
curl -s https://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo gpg --dearmour -o /usr/share/keyrings/kubernetes.gpg
echo "deb [arch=amd64 signed-by=/usr/share/keyrings/kubernetes.gpg] https://apt.kubernetes.io/ kubernetes-xenial main" | sudo tee -a /etc/apt/sources.list.d/kubernetes.list
sudo apt-get update
sudo apt-get install -y kubectl
{{< /tab >}}
@ -42,7 +42,7 @@ baseurl=https://packages.cloud.google.com/yum/repos/kubernetes-el7-x86_64
enabled=1
gpgcheck=1
repo_gpgcheck=1
gpgkey=https://packages.cloud.google.com/yum/doc/yum-key.gpg https://packages.cloud.google.com/yum/doc/rpm-package-key.gpg
gpgkey=https://packages.cloud.google.com/yum/doc/rpm-package-key.gpg
EOF
yum install -y kubectl
{{< /tab >}}
@ -192,15 +192,15 @@ Sie können kubectl als Teil des Google Cloud SDK installieren.
1. Laden Sie die neueste Version herunter:
```
curl -LO https://storage.googleapis.com/kubernetes-release/release/$(curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt)/bin/darwin/amd64/kubectl
curl -LO https://dl.k8s.io/release/$(curl -LS https://dl.k8s.io/release/stable.txt)/bin/darwin/amd64/kubectl
```
Um eine bestimmte Version herunterzuladen, ersetzen Sie den Befehlsteil `$(curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt)` mit der jeweiligen Version.
Um eine bestimmte Version herunterzuladen, ersetzen Sie den Befehlsteil `$(curl -LS https://dl.k8s.io/release/stable.txt)` mit der jeweiligen Version.
Um beispielsweise die Version {{< param "fullversion" >}} auf macOS herunterzuladen, verwenden Sie den folgenden Befehl:
```
curl -LO https://storage.googleapis.com/kubernetes-release/release/{{< param "fullversion" >}}/bin/darwin/amd64/kubectl
curl -LO https://dl.k8s.io/release/{{< param "fullversion" >}}/bin/darwin/amd64/kubectl
```
2. Machen Sie die kubectl-Binärdatei ausführbar.
@ -220,15 +220,15 @@ Sie können kubectl als Teil des Google Cloud SDK installieren.
1. Laden Sie die neueste Version mit dem Befehl herunter:
```
curl -LO https://storage.googleapis.com/kubernetes-release/release/$(curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt)/bin/linux/amd64/kubectl
curl -LO https://dl.k8s.io/release/$(curl -LS https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl
```
Um eine bestimmte Version herunterzuladen, ersetzen Sie den Befehlsteil `$(curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt)` mit der jeweiligen Version.
Um eine bestimmte Version herunterzuladen, ersetzen Sie den Befehlsteil `$(curl -LS https://dl.k8s.io/release/stable.txt)` mit der jeweiligen Version.
Um beispielsweise die Version {{< param "fullversion" >}} auf Linux herunterzuladen, verwenden Sie den folgenden Befehl:
```
curl -LO https://storage.googleapis.com/kubernetes-release/release/{{< param "fullversion" >}}/bin/linux/amd64/kubectl
curl -LO https://dl.k8s.io/release/{{< param "fullversion" >}}/bin/linux/amd64/kubectl
```
2. Machen Sie die kubectl-Binärdatei ausführbar.
@ -244,15 +244,15 @@ Sie können kubectl als Teil des Google Cloud SDK installieren.
```
{{% /tab %}}
{{% tab name="Windows" %}}
1. Laden Sie das aktuellste Release {{< param "fullversion" >}} von [diesem link](https://storage.googleapis.com/kubernetes-release/release/{{< param "fullversion" >}}/bin/windows/amd64/kubectl.exe) herunter.
1. Laden Sie das aktuellste Release {{< param "fullversion" >}} von [diesem link](https://dl.k8s.io/release/{{< param "fullversion" >}}/bin/windows/amd64/kubectl.exe) herunter.
Oder, sofern Sie `curl` installiert haven, verwenden Sie den folgenden Befehl:
```
curl -LO https://storage.googleapis.com/kubernetes-release/release/{{< param "fullversion" >}}/bin/windows/amd64/kubectl.exe
curl -LO https://dl.k8s.io/release/{{< param "fullversion" >}}/bin/windows/amd64/kubectl.exe
```
Informationen zur aktuellen stabilen Version (z. B. für scripting) finden Sie unter [https://storage.googleapis.com/kubernetes-release/release/stable.txt](https://storage.googleapis.com/kubernetes-release/release/stable.txt).
Informationen zur aktuellen stabilen Version (z. B. für scripting) finden Sie unter [https://dl.k8s.io/release/stable.txt](https://dl.k8s.io/release/stable.txt).
2. Verschieben Sie die Binärdatei in Ihren PATH.
{{% /tab %}}

View File

@ -77,7 +77,7 @@ Deployments sind die empfohlene Methode zum Verwalten der Erstellung und Skalier
Der Pod führt einen Container basierend auf dem bereitgestellten Docker-Image aus.
```shell
kubectl create deployment hello-node --image=k8s.gcr.io/echoserver:1.4
kubectl create deployment hello-node --image=registry.k8s.io/echoserver:1.4
```
2. Anzeigen des Deployments:

View File

@ -8,7 +8,7 @@ sitemap:
{{< blocks/section id="oceanNodes" >}}
{{% blocks/feature image="flower" %}}
[Kubernetes]({{< relref "/docs/concepts/overview/what-is-kubernetes" >}}), also known as K8s, is an open-source system for automating deployment, scaling, and management of containerized applications.
[Kubernetes]({{< relref "/docs/concepts/overview/" >}}), also known as K8s, is an open-source system for automating deployment, scaling, and management of containerized applications.
It groups containers that make up an application into logical units for easy management and discovery. Kubernetes builds upon [15 years of experience of running production workloads at Google](http://queue.acm.org/detail.cfm?id=2898444), combined with best-of-breed ideas and practices from the community.
{{% /blocks/feature %}}
@ -16,7 +16,7 @@ It groups containers that make up an application into logical units for easy man
{{% blocks/feature image="scalable" %}}
#### Planet Scale
Designed on the same principles that allows Google to run billions of containers a week, Kubernetes can scale without increasing your ops team.
Designed on the same principles that allow Google to run billions of containers a week, Kubernetes can scale without increasing your operations team.
{{% /blocks/feature %}}
@ -30,7 +30,9 @@ Whether testing locally or running a global enterprise, Kubernetes flexibility g
{{% blocks/feature image="suitcase" %}}
#### Run K8s Anywhere
Kubernetes is open source giving you the freedom to take advantage of on-premises, hybrid, or public cloud infrastructure, letting you effortlessly move workloads to where it matters to you.
Kubernetes is open source giving you the freedom to take advantage of on-premises, hybrid, or public cloud infrastructure, letting you effortlessly move workloads to where it matters to you.
To download Kubernetes, visit the [download](/releases/download/) section.
{{% /blocks/feature %}}
@ -43,12 +45,12 @@ Kubernetes is open source giving you the freedom to take advantage of on-premise
<button id="desktopShowVideoButton" onclick="kub.showVideo()">Watch Video</button>
<br>
<br>
<a href="https://events.linuxfoundation.org/kubecon-cloudnativecon-north-america/?utm_source=kubernetes.io&utm_medium=nav&utm_campaign=kccncna22" button id="desktopKCButton">Attend KubeCon North America on October 24-28, 2022</a>
<a href="https://events.linuxfoundation.org/kubecon-cloudnativecon-europe/" button id="desktopKCButton">Attend KubeCon + CloudNativeCon Europe on April 18-21, 2023</a>
<br>
<br>
<br>
<br>
<a href="https://events.linuxfoundation.org/kubecon-cloudnativecon-europe-2023/?utm_source=kubernetes.io&utm_medium=nav&utm_campaign=kccnceu23" button id="desktopKCButton">Attend KubeCon Europe on April 17-21, 2023</a>
<a href="https://events.linuxfoundation.org/kubecon-cloudnativecon-north-america/" button id="desktopKCButton">Attend KubeCon + CloudNativeCon North America on November 6-9, 2023</a>
</div>
<div id="videoPlayer">
<iframe data-url="https://www.youtube.com/embed/H06qrNmGqyE?autoplay=1" frameborder="0" allowfullscreen></iframe>

View File

@ -16,7 +16,7 @@ To give you a flavor, here are four Kubernetes features that came from our exper
1) [Pods](https://github.com/GoogleCloudPlatform/kubernetes/blob/master/docs/pods.md). A pod is the unit of scheduling in Kubernetes. It is a resource envelope in which one or more containers run. Containers that are part of the same pod are guaranteed to be scheduled together onto the same machine, and can share state via local volumes.
1) [Pods](/docs/concepts/workloads/pods/). A pod is the unit of scheduling in Kubernetes. It is a resource envelope in which one or more containers run. Containers that are part of the same pod are guaranteed to be scheduled together onto the same machine, and can share state via local volumes.
@ -24,15 +24,15 @@ Borg has a similar abstraction, called an alloc (short for “resource allocatio
2) [Services](https://github.com/GoogleCloudPlatform/kubernetes/blob/master/docs/services.md). Although Borgs primary role is to manage the lifecycles of tasks and machines, the applications that run on Borg benefit from many other cluster services, including naming and load balancing. Kubernetes supports naming and load balancing using the service abstraction: a service has a name and maps to a dynamic set of pods defined by a label selector (see next section). Any container in the cluster can connect to the service using the service name. Under the covers, Kubernetes automatically load-balances connections to the service among the pods that match the label selector, and keeps track of where the pods are running as they get rescheduled over time due to failures.
2) [Services](/docs/concepts/services-networking/service/). Although Borgs primary role is to manage the lifecycles of tasks and machines, the applications that run on Borg benefit from many other cluster services, including naming and load balancing. Kubernetes supports naming and load balancing using the service abstraction: a service has a name and maps to a dynamic set of pods defined by a label selector (see next section). Any container in the cluster can connect to the service using the service name. Under the covers, Kubernetes automatically load-balances connections to the service among the pods that match the label selector, and keeps track of where the pods are running as they get rescheduled over time due to failures.
3) [Labels](https://github.com/GoogleCloudPlatform/kubernetes/blob/master/docs/labels.md). A container in Borg is usually one replica in a collection of identical or nearly identical containers that correspond to one tier of an Internet service (e.g. the front-ends for Google Maps) or to the workers of a batch job (e.g. a MapReduce). The collection is called a Job, and each replica is called a Task. While the Job is a very useful abstraction, it can be limiting. For example, users often want to manage their entire service (composed of many Jobs) as a single entity, or to uniformly manage several related instances of their service, for example separate canary and stable release tracks. At the other end of the spectrum, users frequently want to reason about and control subsets of tasks within a Job -- the most common example is during rolling updates, when different subsets of the Job need to have different configurations.
3) [Labels](/docs/concepts/overview/working-with-objects/labels/). A container in Borg is usually one replica in a collection of identical or nearly identical containers that correspond to one tier of an Internet service (e.g. the front-ends for Google Maps) or to the workers of a batch job (e.g. a MapReduce). The collection is called a Job, and each replica is called a Task. While the Job is a very useful abstraction, it can be limiting. For example, users often want to manage their entire service (composed of many Jobs) as a single entity, or to uniformly manage several related instances of their service, for example separate canary and stable release tracks. At the other end of the spectrum, users frequently want to reason about and control subsets of tasks within a Job -- the most common example is during rolling updates, when different subsets of the Job need to have different configurations.
Kubernetes supports more flexible collections than Borg by organizing pods using labels, which are arbitrary key/value pairs that users attach to pods (and in fact to any object in the system). Users can create groupings equivalent to Borg Jobs by using a “job:\<jobname\>” label on their pods, but they can also use additional labels to tag the service name, service instance (production, staging, test), and in general, any subset of their pods. A label query (called a “label selector”) is used to select which set of pods an operation should be applied to. Taken together, labels and [replication controllers](https://github.com/GoogleCloudPlatform/kubernetes/blob/master/docs/replication-controller.md) allow for very flexible update semantics, as well as for operations that span the equivalent of Borg Jobs.
Kubernetes supports more flexible collections than Borg by organizing pods using labels, which are arbitrary key/value pairs that users attach to pods (and in fact to any object in the system). Users can create groupings equivalent to Borg Jobs by using a “job:\<jobname\>” label on their pods, but they can also use additional labels to tag the service name, service instance (production, staging, test), and in general, any subset of their pods. A label query (called a “label selector”) is used to select which set of pods an operation should be applied to. Taken together, labels and [replication controllers](/docs/concepts/workloads/controllers/replicationcontroller/) allow for very flexible update semantics, as well as for operations that span the equivalent of Borg Jobs.

View File

@ -1,87 +1,91 @@
---
title: " Some things you didnt know about kubectl "
title: "Some things you didnt know about kubectl"
date: 2015-10-28
slug: some-things-you-didnt-know-about-kubectl_28
url: /blog/2015/10/Some-Things-You-Didnt-Know-About-Kubectl_28
---
[kubectl](https://github.com/kubernetes/kubernetes/blob/master/docs/user-guide/kubectl-overview.md) is the command line tool for interacting with Kubernetes clusters. Many people use it every day to deploy their container workloads into production clusters. But theres more to kubectl than just `kubectl create -f or kubectl rolling-update`. kubectl is a veritable multi-tool of container orchestration and management. Below we describe some of the features of kubectl that you may not have seen.
**Important Note** : Most of these features are part of the upcoming 1.1 release of Kubernetes. They are not present in the current stable 1.0.x release series.
**Author:** Brendan Burns (Google)
[kubectl](/docs/reference/kubectl/) is the command line tool for interacting with Kubernetes clusters. Many people use it every day to deploy their container workloads into production clusters. But theres more to kubectl than just `kubectl create -f or kubectl rolling-update`. kubectl is a veritable multi-tool of container orchestration and management. Below we describe some of the features of kubectl that you may not have seen.
##### Run interactive commands
## Run interactive commands
`kubectl run` has been in kubectl since the 1.0 release, but recently we added the ability to run interactive containers in your cluster. That means that an interactive shell in your Kubernetes cluster is as close as:
```
$> kubectl run -i --tty busybox --image=busybox --restart=Never -- sh 
Waiting for pod default/busybox-tv9rm to be running, status is Pending, pod ready: false 
Waiting for pod default/busybox-tv9rm to be running, status is Running, pod ready: false 
```console
$> kubectl run -i --tty busybox --image=busybox --restart=Never -- sh
Waiting for pod default/busybox-tv9rm to be running, status is Pending, pod ready: false
Waiting for pod default/busybox-tv9rm to be running, status is Running, pod ready: false
$> # ls 
bin dev etc home proc root sys tmp usr var 
$> # exit
$> # exit
```
The above `kubectl` command is equivalent to `docker run -i -t busybox sh.` Sadly we mistakenly used `-t` for template in kubectl 1.0, so we need to retain backwards compatibility with existing CLI user. But the existing use of `-t` is deprecated and well eventually shorten `--tty` to `-t`.
In this example, `-i` indicates that you want an allocated `stdin` for your container and indicates that you want an interactive session, `--restart=Never` indicates that the container shouldnt be restarted after you exit the terminal and `--tty` requests that you allocate a TTY for that session.
The above `kubectl` command is equivalent to `docker run -i -t busybox sh`. Sadly we mistakenly used `-t` for template in kubectl 1.0, so we need to retain backwards compatibility with existing CLI user. But the existing use of `-t` is deprecated and well eventually shorten `--tty` to `-t`.
In this example, `-i` indicates that you want an allocated `stdin` for your container and indicates that you want an interactive session, `--restart=Never` indicates that the container shouldnt be restarted after you exit the terminal and `--tty` requests that you allocate a TTY for that session.
##### View your Pods logs
## View your Pods logs
Sometimes you just want to watch whats going on in your server. For this, `kubectl logs` is the subcommand to use. Adding the -f flag lets you live stream new logs to your terminal, just like tail -f.
$> kubectl logs -f redis-izl09
Sometimes you just want to watch whats going on in your server. For this, `kubectl logs` is the subcommand to use. Adding the -f flag lets you live stream new logs to your terminal, just like tail -f.
##### Attach to existing containers
```console
$> kubectl logs -f redis-izl09
```
In addition to interactive execution of commands, you can now also attach to any running process. Like kubectl logs, youll get stderr and stdout data, but with attach, youll also be able to send stdin from your terminal to the program. Awesome for interactive debugging, or even just sending ctrl-c to a misbehaving application.
## Attach to existing containers
$> kubectl attach redis -i
In addition to interactive execution of commands, you can now also attach to any running process. Like kubectl logs, youll get stderr and stdout data, but with attach, youll also be able to send stdin from your terminal to the program. Awesome for interactive debugging, or even just sending ctrl-c to a misbehaving application.
```console
$> kubectl attach redis -i
1:C 12 Oct 23:05:11.848 # Warning: no config file specified, using the default config. In order to specify a config file use redis-server /path/to/redis.conf
```
_._
_.-``__''-._
_._
_.-``__''-._
_.-`` `. `_. ''-._ Redis 3.0.3 (00000000/0) 64 bit
.-`` .-```. ```\/ _.,_ ''-._
.-`` .-```. ```\/ _.,_ ''-._
( ' , .-` | `, ) Running in standalone mode
|`-._`-...-` __...-.``-._|'` _.-'| Port: 6379
| `-._ `._ / _.-' | PID: 1
`-._ `-._ `-./ _.-' _.-'
|`-._`-._ `-.__.-' _.-'_.-'|
`-._ `-._ `-./ _.-' _.-'
|`-._`-._ `-.__.-' _.-'_.-'|
| `-._`-._ _.-'_.-' | http://redis.io
`-._ `-._`-.__.-'_.-' _.-'
|`-._`-._ `-.__.-' _.-'_.-'|
| `-._`-._ _.-'_.-' |
`-._ `-._`-.__.-'_.-' _.-'
`-._ `-.__.-' _.-'
`-._ _.-'
`-.__.-'
`-._ `-._`-.__.-'_.-' _.-'
|`-._`-._ `-.__.-' _.-'_.-'|
| `-._`-._ _.-'_.-' |
`-._ `-._`-.__.-'_.-' _.-'
`-._ `-.__.-' _.-'
`-._ _.-'
`-.__.-'
1:M 12 Oct 23:05:11.849 # Server started, Redis version 3.0.3
```
##### Forward ports from Pods to your local machine
## Forward ports from Pods to your local machine
Often times you want to be able to temporarily communicate with applications in your cluster without exposing them to the public internet for security reasons. To achieve this, the port-forward command allows you to securely forward a port on your local machine through the kubernetes API server to a Pod running in your cluster. For example:
`$> kubectl port-forward redis-izl09 6379`
Opens port 6379 on your local machine and forwards communication to that port to the Pod or Service in your cluster. For example, you can use the telnet command to poke at a Redis service in your cluster:
```console
$> kubectl port-forward redis-izl09 6379
```
$> telnet localhost 6379 
INCR foo 
:1 
Opens port 6379 on your local machine and forwards communication to that port to the Pod or Service in your cluster. For example, you can use the `telnet` command to poke at a Redis service in your cluster:
```console
$> telnet localhost 6379 
INCR foo
:1
INCR foo 
:2
:2
```
### Execute commands inside an existing container
In addition to being able to attach to existing processes inside a container, the “exec” command allows you to spawn new processes inside existing containers. This can be useful for debugging, or examining your pods to see whats going on inside without interrupting a running service. `kubectl exec` is different from `kubectl run`, because it runs a command inside of an _existing_ container, rather than spawning a new container for execution.
## Execute commands inside an existing container
In addition to being able to attach to existing processes inside a container, the `exec` command allows you to spawn new processes inside existing containers. This can be useful for debugging, or examining your pods to see whats going on inside without interrupting a running service. `kubectl exec` is different from `kubectl run`, because it runs a command inside of an _existing_ container, rather than spawning a new container for execution.
```
```console
$> kubectl exec redis-izl09 -- ls /
bin
boot
@ -93,59 +97,66 @@ home
```
##### Add or remove Labels
## Add or remove Labels
Sometimes you want to dynamically add or remove labels from a Pod, Service or Replication controller. Maybe you want to add an existing Pod to a Service, or you want to remove a Pod from a Service. No matter what you want, you can easily and dynamically add or remove labels using the `kubectl label` subcommand:
Sometimes you want to dynamically add or remove labels from a Pod, Service or Replication controller. Maybe you want to add an existing Pod to a Service, or you want to remove a Pod from a Service. No matter what you want, you can easily and dynamically add or remove labels using the `kubectl label` subcommand:
`$> kubectl label pods redis-izl09 mylabel=awesome `
`pod "redis-izl09" labeled`
##### Add annotations to your objects
Just like labels, you can add or remove annotations from API objects using the kubectl annotate subcommand. Unlike labels, annotations are there to help describe your object, but arent used to identify pods via label queries ([more details on annotations](https://github.com/kubernetes/kubernetes/blob/master/docs/user-guide/annotations.md#annotations)). For example, you might add an annotation of an icon for a GUI to use for displaying your pods.
`$> kubectl annotate pods redis-izl09 icon-url=http://goo.gl/XXBTWq `
`pod "redis-izl09" annotated`
##### Output custom format
Sometimes, you want to customize the fields displayed when kubectl summarizes an object from your cluster. To do this, you can use the `custom-columns-file` format. `custom-columns-file` takes in a template file for rendering the output. Again, JSONPath expressions are used in the template to specify fields in the API object. For example, the following template first shows the number of restarts, and then the name of the object:
```
$> cat cols.tmpl 
RESTARTS                                   NAME 
.status.containerStatuses[0].restartCount .metadata.name
```console
`$> kubectl label pods redis-izl09 mylabel=awesome `
`pod "redis-izl09" labeled`
```
If you pass this template to the `kubectl get pods` command you get a list of pods with the specified fields displayed.
## Add annotations to your objects
Just like labels, you can add or remove annotations from API objects using the kubectl annotate subcommand. Unlike labels, annotations are there to help describe your object, but arent used to identify pods via label queries ([more details on annotations](/docs/concepts/overview/working-with-objects/annotations/)). For example, you might add an annotation of an icon for a GUI to use for displaying your pods.
```console
$> kubectl annotate pods redis-izl09 icon-url=http://goo.gl/XXBTWq
pod "redis-izl09" annotated
```
## Output custom format
Sometimes, you want to customize the fields displayed when kubectl summarizes an object from your cluster. To do this, you can use the `custom-columns-file` format. `custom-columns-file` takes in a template file for rendering the output. Again, JSONPath expressions are used in the template to specify fields in the API object. For example, the following template first shows the number of restarts, and then the name of the object:
```console
$> cat cols.tmpl
RESTARTS                                   NAME
.status.containerStatuses[0].restartCount .metadata.name
```
If you pass this template to the `kubectl get pods` command you get a list of pods with the specified fields displayed.
```
$> kubectl get pods redis-izl09 -o=custom-columns-file --template=cols.tmpl                 RESTARTS           NAME 
$> kubectl get pods redis-izl09 -o=custom-columns-file --template=cols.tmpl                 RESTARTS           NAME 
 0                  redis-izl09 
 1                  redis-abl42
```
##### Easily manage multiple Kubernetes clusters
## Easily manage multiple Kubernetes clusters
If youre running multiple Kubernetes clusters, you know it can be tricky to manage all of the credentials for the different clusters. Using the `kubectl config` subcommands, switching between different clusters is as easy as:
If youre running multiple Kubernetes clusters, you know it can be tricky to manage all of the credentials for the different clusters. Using the `kubectl config` subcommands, switching between different clusters is as easy as:
$> kubectl config use-context
```console
$> kubectl config use-context
```
Not sure what clusters are available? You can view currently configured clusters with:
Not sure what clusters are available? You can view currently configured clusters with:
$> kubectl config view
```console
$> kubectl config view
```
Phew, that outputs a lot of text. To restrict it down to only the things were interested in, we can use a JSONPath template:
Phew, that outputs a lot of text. To restrict it down to only the things were interested in, we can use a JSONPath template:
$> kubectl config view -o jsonpath="{.context[*].name}"
```console
$> kubectl config view -o jsonpath="{.context[*].name}"
```
Ahh, thats better.
##### Conclusion
## Conclusion
So there you have it, nine new and exciting things you can do with your Kubernetes cluster and the kubectl command line. If youre just getting started with Kubernetes, check out [Google Container Engine](https://cloud.google.com/container-engine/) or other ways to [get started with Kubernetes](/docs/tutorials/kubernetes-basics/).
So there you have it, nine new and exciting things you can do with your Kubernetes cluster and the kubectl command line. If youre just getting started with Kubernetes, check out [Google Container Engine](https://cloud.google.com/container-engine/) or other ways to [get started with Kubernetes](/docs/tutorials/kubernetes-basics/).
- Brendan Burns, Google Software Engineer

View File

@ -143,7 +143,7 @@ When a default StorageClass exists and a user creates a PersistentVolumeClaim wi
Kubernetes 1.4 maintains backwards compatibility with the alpha version of the dynamic provisioning feature to allow for a smoother transition to the beta version. The alpha behavior is triggered by the existance of the alpha dynamic provisioning annotation (volume. **alpha**.kubernetes.io/storage-class). Keep in mind that if the beta annotation (volume. **beta**.kubernetes.io/storage-class) is present, it takes precedence, and triggers the beta behavior.
Kubernetes 1.4 maintains backwards compatibility with the alpha version of the dynamic provisioning feature to allow for a smoother transition to the beta version. The alpha behavior is triggered by the existence of the alpha dynamic provisioning annotation (volume. **alpha**.kubernetes.io/storage-class). Keep in mind that if the beta annotation (volume. **beta**.kubernetes.io/storage-class) is present, it takes precedence, and triggers the beta behavior.

View File

@ -30,7 +30,7 @@ This then points to the other benefit of next generation PaaS being built on top
Kubernetes is infrastructure for next generation applications, PaaS and more. Given this, Im really excited by our [announcement](https://azure.microsoft.com/en-us/blog/kubernetes-now-generally-available-on-azure-container-service/) today that Kubernetes on Azure Container Service has reached general availability. When you deploy your next generation application to Azure, whether on a PaaS or deployed directly onto Kubernetes itself (or both) you can deploy it onto a managed, supported Kubernetes cluster.
Furthermore, because we know that the world of PaaS and software development in general is a hybrid one, were excited to announce the preview availability of [Windows clusters in Azure Container Service](https://docs.microsoft.com/en-us/azure/container-service/container-service-kubernetes-walkthrough). Were also working on [hybrid clusters](https://github.com/Azure/acs-engine/blob/master/docs/kubernetes/windows.md) in [ACS-Engine](https://github.com/Azure/acs-engine) and expect to roll those out to general availability in the coming months.
Furthermore, because we know that the world of PaaS and software development in general is a hybrid one, were excited to announce the preview availability of [Windows clusters in Azure Container Service](https://learn.microsoft.com/en-us/azure/container-service/container-service-kubernetes-walkthrough). Were also working on [hybrid clusters](https://github.com/Azure/acs-engine/blob/master/docs/kubernetes/windows.md) in [ACS-Engine](https://github.com/Azure/acs-engine) and expect to roll those out to general availability in the coming months.
Im thrilled to see how containers and container as a service is changing the world of compute, Im confident that were only scratching the surface of the transformation well see in the coming months and years.

View File

@ -94,7 +94,7 @@ If youd like to try out Kubeflow, we have a number of options for you:
1. You can use sample walkthroughs hosted on [Katacoda](https://www.katacoda.com/kubeflow)
2. You can follow a guided tutorial with existing models from the [examples repository](https://github.com/kubeflow/examples). These include the [GitHub Issue Summarization](https://github.com/kubeflow/examples/tree/master/github_issue_summarization), [MNIST](https://github.com/kubeflow/examples/tree/master/mnist) and [Reinforcement Learning with Agents](https://github.com/kubeflow/examples/tree/v0.5.1/agents).
3. You can start a cluster on your own and try your own model. Any Kubernetes conformant cluster will support Kubeflow including those from contributors [Caicloud](https://www.prnewswire.com/news-releases/caicloud-releases-its-kubernetes-based-cluster-as-a-service-product-claas-20-and-the-first-tensorflow-as-a-service-taas-11-while-closing-6m-series-a-funding-300418071.html), [Canonical](https://jujucharms.com/canonical-kubernetes/), [Google](https://cloud.google.com/kubernetes-engine/docs/how-to/creating-a-container-cluster), [Heptio](https://heptio.com/products/kubernetes-subscription/), [Mesosphere](https://github.com/mesosphere/dcos-kubernetes-quickstart), [Microsoft](https://docs.microsoft.com/en-us/azure/aks/kubernetes-walkthrough), [IBM](https://cloud.ibm.com/docs/containers?topic=containers-cs_cluster_tutorial#cs_cluster_tutorial), [Red Hat/Openshift ](https://docs.openshift.com/container-platform/3.3/install_config/install/quick_install.html#install-config-install-quick-install)and [Weaveworks](https://www.weave.works/product/cloud/).
3. You can start a cluster on your own and try your own model. Any Kubernetes conformant cluster will support Kubeflow including those from contributors [Caicloud](https://www.prnewswire.com/news-releases/caicloud-releases-its-kubernetes-based-cluster-as-a-service-product-claas-20-and-the-first-tensorflow-as-a-service-taas-11-while-closing-6m-series-a-funding-300418071.html), [Canonical](https://jujucharms.com/canonical-kubernetes/), [Google](https://cloud.google.com/kubernetes-engine/docs/how-to/creating-a-container-cluster), [Heptio](https://heptio.com/products/kubernetes-subscription/), [Mesosphere](https://github.com/mesosphere/dcos-kubernetes-quickstart), [Microsoft](https://learn.microsoft.com/en-us/azure/aks/kubernetes-walkthrough), [IBM](https://cloud.ibm.com/docs/containers?topic=containers-cs_cluster_tutorial#cs_cluster_tutorial), [Red Hat/Openshift ](https://docs.openshift.com/container-platform/3.3/install_config/install/quick_install.html#install-config-install-quick-install)and [Weaveworks](https://www.weave.works/product/cloud/).
There were also a number of sessions at KubeCon + CloudNativeCon EU 2018 covering Kubeflow. The links to the talks are here; the associated videos will be posted in the coming days.

View File

@ -192,7 +192,7 @@ To modify/add your own DAGs, you can use `kubectl cp` to upload local files into
# Get Involved
This feature is just the beginning of multiple major efforts to improves Apache Airflow integration into Kubernetes. The Kubernetes Operator has been merged into the [1.10 release branch of Airflow](https://github.com/apache/incubator-airflow/tree/v1-10-test) (the executor in experimental mode), along with a fully k8s native scheduler called the Kubernetes Executor (article to come). These features are still in a stage where early adopters/contributers can have a huge influence on the future of these features.
This feature is just the beginning of multiple major efforts to improves Apache Airflow integration into Kubernetes. The Kubernetes Operator has been merged into the [1.10 release branch of Airflow](https://github.com/apache/incubator-airflow/tree/v1-10-test) (the executor in experimental mode), along with a fully k8s native scheduler called the Kubernetes Executor (article to come). These features are still in a stage where early adopters/contributors can have a huge influence on the future of these features.
For those interested in joining these efforts, I'd recommend checkint out these steps:

View File

@ -460,7 +460,7 @@ Now you can configure your DHCP. Basically you should set the `next-server` and
I use ISC-DHCP server, and here is an example `dhcpd.conf`:
```
shared-network ltsp-netowrk {
shared-network ltsp-network {
subnet 10.9.0.0 netmask 255.255.0.0 {
authoritative;
default-lease-time -1;

View File

@ -10,11 +10,11 @@ date: 2018-10-08
With Kubernetes v1.12, Azure virtual machine scale sets (VMSS) and cluster-autoscaler have reached their General Availability (GA) and User Assigned Identity is available as a preview feature.
_Azure VMSS allow you to create and manage identical, load balanced VMs that automatically increase or decrease based on demand or a set schedule. This enables you to easily manage and scale multiple VMs to provide high availability and application resiliency, ideal for large-scale applications like container workloads [[1]](https://docs.microsoft.com/en-us/azure/virtual-machine-scale-sets/overview)._
_Azure VMSS allow you to create and manage identical, load balanced VMs that automatically increase or decrease based on demand or a set schedule. This enables you to easily manage and scale multiple VMs to provide high availability and application resiliency, ideal for large-scale applications like container workloads [[1]](https://learn.microsoft.com/en-us/azure/virtual-machine-scale-sets/overview)._
Cluster autoscaler allows you to adjust the size of the Kubernetes clusters based on the load conditions automatically.
Another exciting feature which v1.12 brings to the table is the ability to use User Assigned Identities with Kubernetes clusters [[12]](https://docs.microsoft.com/en-us/azure/active-directory/managed-identities-azure-resources/overview).
Another exciting feature which v1.12 brings to the table is the ability to use User Assigned Identities with Kubernetes clusters [[12]](https://learn.microsoft.com/en-us/azure/active-directory/managed-identities-azure-resources/overview).
In this article, we will do a brief overview of VMSS, cluster autoscaler and user assigned identity features on Azure.
@ -22,7 +22,7 @@ In this article, we will do a brief overview of VMSS, cluster autoscaler and use
Azures Virtual Machine Scale sets (VMSS) feature offers users an ability to automatically create VMs from a single central configuration, provide load balancing via L4 and L7 load balancing, provide a path to use availability zones for high availability, provides large-scale VM instances et. al.
VMSS consists of a group of virtual machines, which are identical and can be managed and configured at a group level. More details of this feature in Azure itself can be found at the following link [[1]](https://docs.microsoft.com/en-us/azure/virtual-machine-scale-sets/overview).
VMSS consists of a group of virtual machines, which are identical and can be managed and configured at a group level. More details of this feature in Azure itself can be found at the following link [[1]](https://learn.microsoft.com/en-us/azure/virtual-machine-scale-sets/overview).
With Kubernetes v1.12 customers can create k8s cluster out of VMSS instances and utilize VMSS features.
@ -254,7 +254,7 @@ Cluster Autoscaler currently supports four VM types: standard (VMAS), VMSS, ACS
## User Assigned Identity
Inorder for the Kubernetes cluster components to securely talk to the cloud services, it needs to authenticate with the cloud provider. In Azure Kubernetes clusters, up until now this was done using two ways - Service Principals or Managed Identities. In case of service principal the credentials are stored within the cluster and there are password rotation and other challenges which user needs to incur to accommodate this model. Managed service identities takes out this burden from the user and manages the service instances directly [[12]](https://docs.microsoft.com/en-us/azure/active-directory/managed-identities-azure-resources/overview).
Inorder for the Kubernetes cluster components to securely talk to the cloud services, it needs to authenticate with the cloud provider. In Azure Kubernetes clusters, up until now this was done using two ways - Service Principals or Managed Identities. In case of service principal the credentials are stored within the cluster and there are password rotation and other challenges which user needs to incur to accommodate this model. Managed service identities takes out this burden from the user and manages the service instances directly [[12]](https://learn.microsoft.com/en-us/azure/active-directory/managed-identities-azure-resources/overview).
There are two kinds of managed identities possible - one is system assigned and another is user assigned. In case of system assigned identity each vm in the Kubernetes cluster is assigned a managed identity during creation. This identity is used by various Kubernetes components needing access to Azure resources. Examples to these operations are getting/updating load balancer configuration, getting/updating vm information etc. With the system assigned managed identity, user has no control over the identity which is assigned to the underlying vm. The system automatically assigns it and this reduces the flexibility for the user.
@ -273,7 +273,7 @@ env.ServiceManagementEndpoint,
config.UserAssignedIdentityID)
```
This calls hits either the instance metadata service or the vm extension [[12]](https://docs.microsoft.com/en-us/azure/active-directory/managed-identities-azure-resources/overview) to gather the token which is then used to access various resources.
This calls hits either the instance metadata service or the vm extension [[12]](https://learn.microsoft.com/en-us/azure/active-directory/managed-identities-azure-resources/overview) to gather the token which is then used to access various resources.
## Setting up a cluster with user assigned identity
@ -304,11 +304,11 @@ For azure specific discussions - please checkout the Azure SIG page at [[6]](htt
For CA, please checkout the Autoscaler project here [[7]](http://www.github.com/kubernetes/autoscaler) and join the [#sig-autoscaling](https://kubernetes.slack.com/messages/sig-autoscaling) Slack for more discussions.
For the acs-engine (the unmanaged variety) on Azure docs can be found here: [[9]](https://github.com/Azure/acs-engine). More details about the managed service from Azure Kubernetes Service (AKS) here [[5]](https://docs.microsoft.com/en-us/azure/aks/).
For the acs-engine (the unmanaged variety) on Azure docs can be found here: [[9]](https://github.com/Azure/acs-engine). More details about the managed service from Azure Kubernetes Service (AKS) here [[5]](https://learn.microsoft.com/en-us/azure/aks/).
## References
1) https://docs.microsoft.com/en-us/azure/virtual-machine-scale-sets/overview
1) https://learn.microsoft.com/en-us/azure/virtual-machine-scale-sets/overview
2) /docs/concepts/architecture/cloud-controller/
@ -316,7 +316,7 @@ For the acs-engine (the unmanaged variety) on Azure docs can be found here: [[9]
4) https://github.com/Azure/acs-engine/blob/master/docs/kubernetes/deploy.md
5) https://docs.microsoft.com/en-us/azure/aks/
5) https://learn.microsoft.com/en-us/azure/aks/
6) https://github.com/kubernetes/community/tree/master/sig-azure
@ -330,7 +330,7 @@ For the acs-engine (the unmanaged variety) on Azure docs can be found here: [[9]
11) /docs/concepts/architecture/
12) https://docs.microsoft.com/en-us/azure/active-directory/managed-identities-azure-resources/overview
12) https://learn.microsoft.com/en-us/azure/active-directory/managed-identities-azure-resources/overview
13) https://github.com/Azure/acs-engine/tree/master/examples/kubernetes-msi-userassigned

View File

@ -33,7 +33,7 @@ General Availability means different things for different projects. For kubeadm,
We now consider kubeadm to have achieved GA-level maturity in each of these important domains:
* **Stable command-line UX** --- The kubeadm CLI conforms to [#5a GA rule of the Kubernetes Deprecation Policy](/docs/reference/using-api/deprecation-policy/#deprecating-a-flag-or-cli), which states that a command or flag that exists in a GA version must be kept for at least 12 months after deprecation.
* **Stable underlying implementation** --- kubeadm now creates a new Kubernetes cluster using methods that shouldn't change any time soon. The control plane, for example, is run as a set of static Pods, bootstrap tokens are used for the [`kubeadm join`](/docs/reference/setup-tools/kubeadm/kubeadm-join/) flow, and [ComponentConfig](https://github.com/kubernetes/enhancements/blob/master/keps/sig-cluster-lifecycle/wgs/0014-20180707-componentconfig-api-types-to-staging.md) is used for configuring the [kubelet](/docs/reference/command-line-tools-reference/kubelet/).
* **Stable underlying implementation** --- kubeadm now creates a new Kubernetes cluster using methods that shouldn't change any time soon. The control plane, for example, is run as a set of static Pods, bootstrap tokens are used for the [`kubeadm join`](/docs/reference/setup-tools/kubeadm/kubeadm-join/) flow, and [ComponentConfig](https://github.com/kubernetes/enhancements/blob/master/keps/sig-cluster-lifecycle/wgs/115-componentconfig) is used for configuring the [kubelet](/docs/reference/command-line-tools-reference/kubelet/).
* **Configuration file schema** --- With the new **v1beta1** API version, you can now tune almost every part of the cluster declaratively and thus build a "GitOps" flow around kubeadm-built clusters. In future versions, we plan to graduate the API to version **v1** with minimal changes (and perhaps none).
* **The "toolbox" interface of kubeadm** --- Also known as **phases**. If you don't want to perform all [`kubeadm init`](/docs/reference/setup-tools/kubeadm/kubeadm-init/) tasks, you can instead apply more fine-grained actions using the `kubeadm init phase` command (for example generating certificates or control plane [Static Pod](/docs/tasks/administer-cluster/static-pod/) manifests).
* **Upgrades between minor versions** --- The [`kubeadm upgrade`](/docs/reference/setup-tools/kubeadm/kubeadm-upgrade/) command is now fully GA. It handles control plane upgrades for you, which includes upgrades to [etcd](https://etcd.io), the [API Server](/docs/reference/using-api/api-overview/), the [Controller Manager](/docs/reference/command-line-tools-reference/kube-controller-manager/), and the [Scheduler](/docs/reference/command-line-tools-reference/kube-scheduler/). You can seamlessly upgrade your cluster between minor or patch versions (e.g. v1.12.2 -> v1.13.1 or v1.13.1 -> v1.13.3).

View File

@ -8,7 +8,7 @@ date: 2018-12-12
Kubernetes provides great primitives for deploying applications to a cluster: it can be as simple as `kubectl create -f app.yaml`. Deploy apps across multiple clusters has never been that simple. How should app workloads be distributed? Should the app resources be replicated into all clusters, replicated into selected clusters, or partitioned into clusters? How is access to the clusters managed? What happens if some of the resources that a user wants to distribute pre-exist, in some or all of the clusters, in some form?
In SIG Multicluster, our journey has revealed that there are multiple possible models to solve these problems and there probably is no single best-fit, all-scenario solution. [Federation](/docs/concepts/cluster-administration/federation/), however, is the single biggest Kubernetes open source sub-project, and has seen the maximum interest and contribution from the community in this problem space. The project initially reused the Kubernetes API to do away with any added usage complexity for an existing Kubernetes user. This approach was not viable, because of the problems summarised below:
In SIG Multicluster, our journey has revealed that there are multiple possible models to solve these problems and there probably is no single best-fit, all-scenario solution. [Kubernetes Cluster Federation (KubeFed for short)](https://github.com/kubernetes-sigs/kubefed), however, is the single biggest Kubernetes open source sub-project, and has seen the maximum interest and contribution from the community in this problem space. The project initially reused the Kubernetes API to do away with any added usage complexity for an existing Kubernetes user. This approach was not viable, because of the problems summarised below:
* Difficulties in re-implementing the Kubernetes API at the cluster level, as federation-specific extensions were stored in annotations.
* Limited flexibility in federated types, placement and reconciliation, due to 1:1 emulation of the Kubernetes API.
@ -82,7 +82,7 @@ For external clients, automatic DNS expansion described is not currently possibl
That way, your clients can always use the short form on the left, and always be automatically routed to the closest healthy shard on their home continent. All of the required failover is handled for you automatically by Kubernetes cluster federation.
As further reading, a more elaborate example for users is available in the [Multi-Cluster Service DNS with ExternalDNS guide](https://github.com/kubernetes-sigs/federation-v2/blob/master/docs/servicedns-with-externaldns.md).
As further reading, a more elaborate example for users is available in the [Multi-Cluster Service DNS with ExternalDNS guide](https://github.com/kubernetes-retired/kubefed/blob/dbcd4da3823a7ba8ac29e80c9d5b968868638d28/docs/servicedns-with-externaldns.md)
# Try it yourself
To get started with Federation v2, please refer to the [user guide](https://github.com/kubernetes-sigs/federation-v2/blob/master/docs/userguide.md). Deployment can be accomplished with a [Helm chart](https://github.com/kubernetes-sigs/kubefed/blob/master/charts/kubefed/README.md), and once the control plane is available, the [user guides example](https://github.com/kubernetes-sigs/federation-v2/blob/master/docs/userguide.md#example) can be used to get some hands-on experience with using Federation V2.

View File

@ -129,7 +129,7 @@ spec:
spec:
containers:
- name: test-container
image: k8s.gcr.io/busybox
image: registry.k8s.io/busybox # updated after publication (previously used k8s.gcr.io/busybox)
command:
- "/bin/sh"
args:

View File

@ -47,7 +47,7 @@ The essential idea is that so-called **seed** clusters are used to host the
control planes of end-user clusters (botanically named **shoots**). \
Gardener provides vanilla Kubernetes clusters as a service independent of the
underlying infrastructure provider in a homogenous way, utilizing the upstream
provided `k8s.gcr.io/*` images as open distribution. The project is built
provided `k8s.gcr.io/*` images as open distribution _(update: `k8s.gcr.io` has been deprecated in favor of `registry.k8s.io`)._ The project is built
entirely on top of Kubernetes extension concepts, and as such adds a custom API
server, a controller-manager, and a scheduler to create and manage the lifecycle
of Kubernetes clusters. It extends the Kubernetes API with custom resources,

View File

@ -119,8 +119,8 @@ Here are some of the images we built
- `gcr.io/kubernetes-e2e-test-images/volume/iscsi:2.0`
- `gcr.io/kubernetes-e2e-test-images/volume/nfs:1.0`
- `gcr.io/kubernetes-e2e-test-images/volume/rbd:1.0.1`
- `k8s.gcr.io/etcd:3.3.15`
- `k8s.gcr.io/pause:3.1`
- `registry.k8s.io/etcd:3.3.15` (image changed since publication - previously used registry "k8s.gcr.io")
- `registry.k8s.io/pause:3.1` (image changed since publication - previously used registry "k8s.gcr.io")
Finally, we ran the tests and got the test result, include `e2e.log`, which showed that all test cases passed. Additionally, we submitted our test result to [k8s-conformance](https://github.com/cncf/k8s-conformance) as a [pull request](https://github.com/cncf/k8s-conformance/pull/779).

View File

@ -67,7 +67,7 @@ Let's see an example of a cluster to understand this API.
As the feature name "PodTopologySpread" implies, the basic usage of this feature
is to run your workload with an absolute even manner (maxSkew=1), or relatively
even manner (maxSkew>=2). See the [official
document](/docs/concepts/workloads/pods/pod-topology-spread-constraints/)
document](/docs/concepts/scheduling-eviction/topology-spread-constraints/)
for more details.
In addition to this basic usage, there are some advanced usage examples that

View File

@ -27,7 +27,7 @@ Our goal is for Kubernetes docs to be a trustworthy guide to Kubernetes features
### Re-homing content
Some content will be removed that readers may find helpful. To make sure readers have continous access to information, we're giving stakeholders until the [1.19 release deadline for docs](https://github.com/kubernetes/sig-release/tree/master/releases/release-1.19), **July 9th, 2020** to re-home any content slated for removal.
Some content will be removed that readers may find helpful. To make sure readers have continuous access to information, we're giving stakeholders until the [1.19 release deadline for docs](https://github.com/kubernetes/sig-release/tree/master/releases/release-1.19), **July 9th, 2020** to re-home any content slated for removal.
Over the next few months you'll see less third party content in the docs as contributors open PRs to remove content.

File diff suppressed because it is too large Load Diff

View File

@ -55,7 +55,8 @@ The team has made progress in the last few months that is well worth celebrating
- The K8s-Infrastructure Working Group released an automated billing report that they start every meeting off by reviewing as a group.
- DNS for k8s.io and kubernetes.io are also fully [community-owned](https://groups.google.com/g/kubernetes-dev/c/LZTYJorGh7c/m/u-ydk-yNEgAJ), with community members able to [file issues](https://github.com/kubernetes/k8s.io/issues/new?assignees=&labels=wg%2Fk8s-infra&template=dns-request.md&title=DNS+REQUEST%3A+%3Cyour-dns-record%3E) to manage records.
- The container registry [k8s.gcr.io](https://github.com/kubernetes/k8s.io/tree/main/k8s.gcr.io) is also fully community-owned and available for all Kubernetes subprojects to use.
- The container registry [registry.k8s.io](https://github.com/kubernetes/k8s.io/tree/main/registry.k8s.io) is also fully community-owned and available for all Kubernetes subprojects to use.
_Note:_ The container registry has changed to registry.k8s.io. Updated on August 25, 2022.
- The Kubernetes [publishing-bot](https://github.com/kubernetes/publishing-bot) responsible for keeping k8s.io/kubernetes/staging repositories published to their own top-level repos (For example: [kubernetes/api](https://github.com/kubernetes/api)) runs on a community-owned cluster.
- The gcsweb.k8s.io service used to provide anonymous access to GCS buckets for kubernetes artifacts runs on a community-owned cluster.
- There is also an automated process of promoting all our container images. This includes a fully documented infrastructure, managed by the Kubernetes community, with automated processes for provisioning permissions.

View File

@ -198,7 +198,7 @@ GUINEVERE SAENGER: I would want Jorge to be really on top of making sure that ev
Greater communication of timelines and just giving people more time and space to be able to get in their changes, or at least, seemingly give them more time and space by sending early warnings, is going to be helpful. Of course, he's going to have a slightly longer release, too, than I did. This might be related to a unique Q4 challenge. Overall, I would encourage him to take more breaks, to rely more on his release shadows, and split out the work in a fashion that allows everyone to have a turn and everyone to have a break as well.
**ADAM GLICK: What would your advice be to someone who is hearing your experience and is inspired to get involved with the Kubernetes release or contributer process?**
**ADAM GLICK: What would your advice be to someone who is hearing your experience and is inspired to get involved with the Kubernetes release or contributor process?**
GUINEVERE SAENGER: Those are two separate questions. So let me tackle the Kubernetes release question first. Kubernetes [SIG Release](https://github.com/kubernetes/sig-release/#readme) has, in my opinion, a really excellent onboarding program for new members. We have what is called the [Release Team Shadow Program](https://github.com/kubernetes/sig-release/blob/master/release-team/shadows.md). We also have the Release Engineering Shadow Program, or the Release Management Shadow Program. Those are two separate subprojects within SIG Release. And each subproject has a team of roles, and each role can have two to four shadows that are basically people who are part of that role team, and they are learning that role as they are doing it.

View File

@ -59,7 +59,7 @@ toil as the organization grows.
# Introducing hierarchical namespaces
[Hierarchical
namespaces](https://github.com/kubernetes-sigs/multi-tenancy/blob/master/incubator/hnc/docs/user-guide/concepts.md#basic)
namespaces](https://github.com/kubernetes-sigs/hierarchical-namespaces/blob/master/docs/user-guide/concepts.md#basic-concepts)
are a new concept developed by the [Kubernetes Working Group for Multi-Tenancy
(wg-multitenancy)](https://github.com/kubernetes-sigs/multi-tenancy) in order to
solve these problems. In its simplest form, a hierarchical namespace is a
@ -71,10 +71,10 @@ This concept of ownership enables two additional types of behaviours:
* **Policy inheritance:** if one namespace is a child of another, policy objects
such as RBAC RoleBindings are [copied from the parent to the
child](https://github.com/kubernetes-sigs/multi-tenancy/blob/master/incubator/hnc/docs/user-guide/concepts.md#basic-propagation).
child](https://github.com/kubernetes-sigs/hierarchical-namespaces/blob/master/docs/user-guide/concepts.md#policy-inheritance-and-object-propagation).
* **Delegated creation:** you usually need cluster-level privileges to create a
namespace, but hierarchical namespaces adds an alternative:
[_subnamespaces_](https://github.com/kubernetes-sigs/multi-tenancy/blob/master/incubator/hnc/docs/user-guide/concepts.md#basic-subns),
[_subnamespaces_](https://github.com/kubernetes-sigs/hierarchical-namespaces/blob/master/docs/user-guide/concepts.md#subnamespaces-and-full-namespaces),
which can be manipulated using only limited permissions in the parent
namespace.
@ -88,7 +88,7 @@ without violating the policies that were imposed by the cluster administrators.
Hierarchical namespaces are provided by a Kubernetes extension known as the
[**Hierarchical Namespace
Controller**](https://github.com/kubernetes-sigs/multi-tenancy/tree/master/incubator/hnc),
Controller**](https://github.com/kubernetes-sigs/hierarchical-namespaces#the-hierarchical-namespace-controller-hnc),
or **HNC**. The HNC consists of two components:
* The **manager** runs on your cluster, manages subnamespaces, propagates policy
@ -97,7 +97,7 @@ or **HNC**. The HNC consists of two components:
interact with the manager.
Both can be easily installed from the [releases page of our
repo](https://github.com/kubernetes-sigs/multi-tenancy/releases).
repo](https://github.com/kubernetes-sigs/hierarchical-namespaces/releases).
Lets see HNC in action. Imagine that I do not have namespace creation
privileges, but I can view the namespace `team-a` and create subnamespaces
@ -161,7 +161,7 @@ _also_ allow ingress traffic between all of those namespaces. The “tree” lab
can only be applied by HNC, and is guaranteed to reflect the latest hierarchy.
You can learn all about the features of HNC from the [user
guide](https://github.com/kubernetes-sigs/multi-tenancy/tree/master/incubator/hnc/docs/user-guide).
guide](https://github.com/kubernetes-sigs/hierarchical-namespaces/tree/master/docs/user-guide).
# Next steps and getting involved
@ -178,7 +178,7 @@ or help prototype new features such as exceptions, improved monitoring,
hierarchical resource quotas or fine-grained configuration.
Please get in touch with us via our
[repo](https://github.com/kubernetes-sigs/multi-tenancy), [mailing
[repo](https://github.com/kubernetes-sigs/hierarchical-namespaces), [mailing
list](https://groups.google.com/g/kubernetes-wg-multitenancy) or on
[Slack](https://kubernetes.slack.com/messages/wg-multitenancy) - we look forward
to hearing from you!

View File

@ -70,7 +70,7 @@ To correct the latter issue, we now employ a "hunt and peck" approach to removin
### 1. Upgrade to kubernetes 1.18 and make use of Pod Topology Spread Constraints
While this seems like it could have been the perfect solution, at the time of writing Kubernetes 1.18 was unavailable on the two most common managed Kubernetes services in public cloud, EKS and GKE.
Furthermore, [pod topology spread constraints](/docs/concepts/workloads/pods/pod-topology-spread-constraints/) were still a [beta feature in 1.18](https://v1-18.docs.kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/) which meant that it [wasn't guaranteed to be available in managed clusters](https://cloud.google.com/kubernetes-engine/docs/concepts/types-of-clusters#kubernetes_feature_choices) even when v1.18 became available.
Furthermore, [pod topology spread constraints](/docs/concepts/scheduling-eviction/topology-spread-constraints/) were still a beta feature in 1.18 which meant that it [wasn't guaranteed to be available in managed clusters](https://cloud.google.com/kubernetes-engine/docs/concepts/types-of-clusters#kubernetes_feature_choices) even when v1.18 became available.
The entire endeavour was concerningly reminiscent of checking [caniuse.com](https://caniuse.com/) when Internet Explorer 8 was still around.
### 2. Deploy a statefulset _per zone_.

View File

@ -4,71 +4,159 @@ title: "PodSecurityPolicy Deprecation: Past, Present, and Future"
date: 2021-04-06
slug: podsecuritypolicy-deprecation-past-present-and-future
---
**Author:** Tabitha Sable (Kubernetes SIG Security)
PodSecurityPolicy (PSP) is being deprecated in Kubernetes 1.21, to be released later this week. This starts the countdown to its removal, but doesnt change anything else. PodSecurityPolicy will continue to be fully functional for several more releases before being removed completely. In the meantime, we are developing a replacement for PSP that covers key use cases more easily and sustainably.
{{% pageinfo color="primary" %}}
**Update:** *With the release of Kubernetes v1.25, PodSecurityPolicy has been removed.*
*You can read more information about the removal of PodSecurityPolicy in the
[Kubernetes 1.25 release notes](/blog/2022/08/23/kubernetes-v1-25-release/#pod-security-changes).*
{{% /pageinfo %}}
What are Pod Security Policies? Why did we need them? Why are they going away, and whats next? How does this affect you? These key questions come to mind as we prepare to say goodbye to PSP, so lets walk through them together. Well start with an overview of how features get removed from Kubernetes.
PodSecurityPolicy (PSP) is being deprecated in Kubernetes 1.21, to be released later this week.
This starts the countdown to its removal, but doesnt change anything else.
PodSecurityPolicy will continue to be fully functional for several more releases before being removed completely.
In the meantime, we are developing a replacement for PSP that covers key use cases more easily and sustainably.
What are Pod Security Policies? Why did we need them? Why are they going away, and whats next?
How does this affect you? These key questions come to mind as we prepare to say goodbye to PSP,
so lets walk through them together. Well start with an overview of how features get removed from Kubernetes.
## What does deprecation mean in Kubernetes?
Whenever a Kubernetes feature is set to go away, our [deprecation policy](/docs/reference/using-api/deprecation-policy/) is our guide. First the feature is marked as deprecated, then after enough time has passed, it can finally be removed.
Whenever a Kubernetes feature is set to go away, our [deprecation policy](/docs/reference/using-api/deprecation-policy/)
is our guide. First the feature is marked as deprecated, then after enough time has passed, it can finally be removed.
Kubernetes 1.21 starts the deprecation process for PodSecurityPolicy. As with all feature deprecations, PodSecurityPolicy will continue to be fully functional for several more releases. The current plan is to remove PSP from Kubernetes in the 1.25 release.
Kubernetes 1.21 starts the deprecation process for PodSecurityPolicy. As with all feature deprecations,
PodSecurityPolicy will continue to be fully functional for several more releases.
The current plan is to remove PSP from Kubernetes in the 1.25 release.
Until then, PSP is still PSP. There will be at least a year during which the newest Kubernetes releases will still support PSP, and nearly two years until PSP will pass fully out of all supported Kubernetes versions.
Until then, PSP is still PSP. There will be at least a year during which the newest Kubernetes releases will
still support PSP, and nearly two years until PSP will pass fully out of all supported Kubernetes versions.
## What is PodSecurityPolicy?
[PodSecurityPolicy](/docs/concepts/security/pod-security-policy/) is a built-in [admission controller](/blog/2019/03/21/a-guide-to-kubernetes-admission-controllers/) that allows a cluster administrator to control security-sensitive aspects of the Pod specification.
[PodSecurityPolicy](/docs/concepts/security/pod-security-policy/) is
a built-in [admission controller](/blog/2019/03/21/a-guide-to-kubernetes-admission-controllers/)
that allows a cluster administrator to control security-sensitive aspects of the Pod specification.
First, one or more PodSecurityPolicy resources are created in a cluster to define the requirements Pods must meet. Then, RBAC rules are created to control which PodSecurityPolicy applies to a given pod. If a pod meets the requirements of its PSP, it will be admitted to the cluster as usual. In some cases, PSP can also modify Pod fields, effectively creating new defaults for those fields. If a Pod does not meet the PSP requirements, it is rejected, and cannot run.
First, one or more PodSecurityPolicy resources are created in a cluster to define the requirements Pods must meet.
Then, RBAC rules are created to control which PodSecurityPolicy applies to a given pod.
If a pod meets the requirements of its PSP, it will be admitted to the cluster as usual.
In some cases, PSP can also modify Pod fields, effectively creating new defaults for those fields.
If a Pod does not meet the PSP requirements, it is rejected, and cannot run.
One more important thing to know about PodSecurityPolicy: its not the same as [PodSecurityContext](/docs/reference/kubernetes-api/workload-resources/pod-v1/#security-context).
One more important thing to know about PodSecurityPolicy: its not the same as
[PodSecurityContext](/docs/reference/kubernetes-api/workload-resources/pod-v1/#security-context).
A part of the Pod specification, PodSecurityContext (and its per-container counterpart `SecurityContext`) is the collection of fields that specify many of the security-relevant settings for a Pod. The security context dictates to the kubelet and container runtime how the Pod should actually be run. In contrast, the PodSecurityPolicy only constrains (or defaults) the values that may be set on the security context.
A part of the Pod specification, PodSecurityContext (and its per-container counterpart `SecurityContext`)
is the collection of fields that specify many of the security-relevant settings for a Pod.
The security context dictates to the kubelet and container runtime how the Pod should actually be run.
In contrast, the PodSecurityPolicy only constrains (or defaults) the values that may be set on the security context.
The deprecation of PSP does not affect PodSecurityContext in any way.
## Why did we need PodSecurityPolicy?
In Kubernetes, we define resources such as Deployments, StatefulSets, and Services that represent the building blocks of software applications. The various controllers inside a Kubernetes cluster react to these resources, creating further Kubernetes resources or configuring some software or hardware to accomplish our goals.
In Kubernetes, we define resources such as Deployments, StatefulSets, and Services that
represent the building blocks of software applications. The various controllers inside
a Kubernetes cluster react to these resources, creating further Kubernetes resources or
configuring some software or hardware to accomplish our goals.
In most Kubernetes clusters, RBAC (Role-Based Access Control) [rules](/docs/reference/access-authn-authz/rbac/#role-and-clusterrole) control access to these resources. `list`, `get`, `create`, `edit`, and `delete` are the sorts of API operations that RBAC cares about, but _RBAC does not consider what settings are being put into the resources it controls_. For example, a Pod can be almost anything from a simple webserver to a privileged command prompt offering full access to the underlying server node and all the data. Its all the same to RBAC: a Pod is a Pod is a Pod.
In most Kubernetes clusters,
RBAC (Role-Based Access Control) [rules](/docs/reference/access-authn-authz/rbac/#role-and-clusterrole)
control access to these resources. `list`, `get`, `create`, `edit`, and `delete` are
the sorts of API operations that RBAC cares about,
but _RBAC does not consider what settings are being put into the resources it controls_.
For example, a Pod can be almost anything from a simple webserver to
a privileged command prompt offering full access to the underlying server node and all the data.
Its all the same to RBAC: a Pod is a Pod is a Pod.
To control what sorts of settings are allowed in the resources defined in your cluster, you need Admission Control in addition to RBAC. Since Kubernetes 1.3, PodSecurityPolicy has been the built-in way to do that for security-related Pod fields. Using PodSecurityPolicy, you can prevent “create Pod” from automatically meaning “root on every cluster node,” without needing to deploy additional external admission controllers.
To control what sorts of settings are allowed in the resources defined in your cluster,
you need Admission Control in addition to RBAC. Since Kubernetes 1.3,
PodSecurityPolicy has been the built-in way to do that for security-related Pod fields.
Using PodSecurityPolicy, you can prevent “create Pod” from automatically meaning “root on every cluster node,”
without needing to deploy additional external admission controllers.
## Why is PodSecurityPolicy going away?
In the years since PodSecurityPolicy was first introduced, we have realized that PSP has some serious usability problems that cant be addressed without making breaking changes.
In the years since PodSecurityPolicy was first introduced, we have realized that
PSP has some serious usability problems that cant be addressed without making breaking changes.
The way PSPs are applied to Pods has proven confusing to nearly everyone that has attempted to use them. It is easy to accidentally grant broader permissions than intended, and difficult to inspect which PSP(s) apply in a given situation. The “changing Pod defaults” feature can be handy, but is only supported for certain Pod settings and its not obvious when they will or will not apply to your Pod. Without a “dry run” or audit mode, its impractical to retrofit PSP to existing clusters safely, and its impossible for PSP to ever be enabled by default.
The way PSPs are applied to Pods has proven confusing to nearly everyone that has attempted to use them.
It is easy to accidentally grant broader permissions than intended,
and difficult to inspect which PSP(s) apply in a given situation. The “changing Pod defaults” feature can be handy,
but is only supported for certain Pod settings and its not obvious when they will or will not apply to your Pod.
Without a “dry run” or audit mode, its impractical to retrofit PSP to existing clusters safely,
and its impossible for PSP to ever be enabled by default.
For more information about these and other PSP difficulties, check out SIG Auths KubeCon NA 2019 Maintainer Track session video: {{< youtube "SFtHRmPuhEw?start=953" youtube-quote-sm >}}
For more information about these and other PSP difficulties, check out
SIG Auths KubeCon NA 2019 Maintainer Track session video:{{< youtube "SFtHRmPuhEw?start=953" youtube-quote-sm >}}
Today, youre not limited only to deploying PSP or writing your own custom admission controller. Several external admission controllers are available that incorporate lessons learned from PSP to provide a better user experience. [K-Rail](https://github.com/cruise-automation/k-rail), [Kyverno](https://github.com/kyverno/kyverno/), and [OPA/Gatekeeper](https://github.com/open-policy-agent/gatekeeper/) are all well-known, and each has its fans.
Today, youre not limited only to deploying PSP or writing your own custom admission controller.
Several external admission controllers are available that incorporate lessons learned from PSP to
provide a better user experience. [K-Rail](https://github.com/cruise-automation/k-rail),
[Kyverno](https://github.com/kyverno/kyverno/), and
[OPA/Gatekeeper](https://github.com/open-policy-agent/gatekeeper/) are all well-known, and each has its fans.
Although there are other good options available now, we believe there is still value in having a built-in admission controller available as a choice for users. With this in mind, we turn toward building whats next, inspired by the lessons learned from PSP.
Although there are other good options available now, we believe there is still value in
having a built-in admission controller available as a choice for users. With this in mind,
we turn toward building whats next, inspired by the lessons learned from PSP.
## Whats next?
Kubernetes SIG Security, SIG Auth, and a diverse collection of other community members have been working together for months to ensure that whats coming next is going to be awesome. We have developed a Kubernetes Enhancement Proposal ([KEP 2579](https://github.com/kubernetes/enhancements/issues/2579)) and a prototype for a new feature, currently being called by the temporary name "PSP Replacement Policy." We are targeting an Alpha release in Kubernetes 1.22.
Kubernetes SIG Security, SIG Auth, and a diverse collection of other community members
have been working together for months to ensure that whats coming next is going to be awesome.
We have developed a Kubernetes Enhancement Proposal ([KEP 2579](https://github.com/kubernetes/enhancements/issues/2579))
and a prototype for a new feature, currently being called by the temporary name "PSP Replacement Policy."
We are targeting an Alpha release in Kubernetes 1.22.
PSP Replacement Policy starts with the realization that since there is a robust ecosystem of external admission controllers already available, PSPs replacement doesnt need to be all things to all people. Simplicity of deployment and adoption is the key advantage a built-in admission controller has compared to an external webhook, so we have focused on how to best utilize that advantage.
PSP Replacement Policy starts with the realization that
since there is a robust ecosystem of external admission controllers already available,
PSPs replacement doesnt need to be all things to all people.
Simplicity of deployment and adoption is the key advantage a built-in admission controller has
compared to an external webhook, so we have focused on how to best utilize that advantage.
PSP Replacement Policy is designed to be as simple as practically possible while providing enough flexibility to really be useful in production at scale. It has soft rollout features to enable retrofitting it to existing clusters, and is configurable enough that it can eventually be active by default. It can be deactivated partially or entirely, to coexist with external admission controllers for advanced use cases.
PSP Replacement Policy is designed to be as simple as practically possible
while providing enough flexibility to really be useful in production at scale.
It has soft rollout features to enable retrofitting it to existing clusters,
and is configurable enough that it can eventually be active by default.
It can be deactivated partially or entirely, to coexist with external admission controllers for advanced use cases.
## What does this mean for you?
What this all means for you depends on your current PSP situation. If youre already using PSP, theres plenty of time to plan your next move. Please review the PSP Replacement Policy KEP and think about how well it will suit your use case.
What this all means for you depends on your current PSP situation.
If youre already using PSP, theres plenty of time to plan your next move.
Please review the PSP Replacement Policy KEP and think about how well it will suit your use case.
If youre making extensive use of the flexibility of PSP with numerous PSPs and complex binding rules, you will likely find the simplicity of PSP Replacement Policy too limiting. Use the next year to evaluate the other admission controller choices in the ecosystem. There are resources available to ease this transition, such as the [Gatekeeper Policy Library](https://github.com/open-policy-agent/gatekeeper-library).
If youre making extensive use of the flexibility of PSP with numerous PSPs and complex binding rules,
you will likely find the simplicity of PSP Replacement Policy too limiting.
Use the next year to evaluate the other admission controller choices in the ecosystem.
There are resources available to ease this transition,
such as the [Gatekeeper Policy Library](https://github.com/open-policy-agent/gatekeeper-library).
If your use of PSP is relatively simple, with a few policies and straightforward binding to service accounts in each namespace, you will likely find PSP Replacement Policy to be a good match for your needs. Evaluate your PSPs compared to the Kubernetes [Pod Security Standards](/docs/concepts/security/pod-security-standards/) to get a feel for where youll be able to use the Restricted, Baseline, and Privileged policies. Please follow along with or contribute to the KEP and subsequent development, and try out the Alpha release of PSP Replacement Policy when it becomes available.
If your use of PSP is relatively simple, with a few policies and straightforward binding to
service accounts in each namespace, you will likely find PSP Replacement Policy to be a good match for your needs.
Evaluate your PSPs compared to the Kubernetes [Pod Security Standards](/docs/concepts/security/pod-security-standards/)
to get a feel for where youll be able to use the Restricted, Baseline, and Privileged policies.
Please follow along with or contribute to the KEP and subsequent development,
and try out the Alpha release of PSP Replacement Policy when it becomes available.
If youre just beginning your PSP journey, you will save time and effort by keeping it simple. You can approximate the functionality of PSP Replacement Policy today by using the Pod Security Standards PSPs. If you set the cluster default by binding a Baseline or Restricted policy to the `system:serviceaccounts` group, and then make a more-permissive policy available as needed in certain Namespaces [using ServiceAccount bindings](/docs/concepts/policy/pod-security-policy/#run-another-pod), you will avoid many of the PSP pitfalls and have an easy migration to PSP Replacement Policy. If your needs are much more complex than this, your effort is probably better spent adopting one of the more fully-featured external admission controllers mentioned above.
If youre just beginning your PSP journey, you will save time and effort by keeping it simple.
You can approximate the functionality of PSP Replacement Policy today by using the Pod Security Standards PSPs.
If you set the cluster default by binding a Baseline or Restricted policy to the `system:serviceaccounts` group,
and then make a more-permissive policy available as needed in certain
Namespaces [using ServiceAccount bindings](/docs/concepts/policy/pod-security-policy/#run-another-pod),
you will avoid many of the PSP pitfalls and have an easy migration to PSP Replacement Policy.
If your needs are much more complex than this, your effort is probably better spent adopting
one of the more fully-featured external admission controllers mentioned above.
Were dedicated to making Kubernetes the best container orchestration tool we can, and sometimes that means we need to remove longstanding features to make space for better things to come. When that happens, the Kubernetes deprecation policy ensures you have plenty of time to plan your next move. In the case of PodSecurityPolicy, several options are available to suit a range of needs and use cases. Start planning ahead now for PSPs eventual removal, and please consider contributing to its replacement! Happy securing!
Were dedicated to making Kubernetes the best container orchestration tool we can,
and sometimes that means we need to remove longstanding features to make space for better things to come.
When that happens, the Kubernetes deprecation policy ensures you have plenty of time to plan your next move.
In the case of PodSecurityPolicy, several options are available to suit a range of needs and use cases.
Start planning ahead now for PSPs eventual removal, and please consider contributing to its replacement! Happy securing!
**Acknowledgment:** It takes a wonderful group to make wonderful software. Thanks are due to everyone who has contributed to the PSP replacement effort, especially (in alphabetical order) Tim Allclair, Ian Coldwater, and Jordan Liggitt. Its been a joy to work with yall on this.
**Acknowledgment:** It takes a wonderful group to make wonderful software.
Thanks are due to everyone who has contributed to the PSP replacement effort,
especially (in alphabetical order) Tim Allclair, Ian Coldwater, and Jordan Liggitt.
Its been a joy to work with yall on this.

View File

@ -35,7 +35,7 @@ So without further ado, here is the list of more noteworthy user-facing breaking
* Metric labels that relate to Kubernetes were converted to snake_case.
* If you are importing kube-state-metrics as a library, we have updated our go module path to `k8s.io/kube-state-metrics/v2`
* All deprecated stable metrics were removed as per the [notice in the v1.9 release](https://github.com/kubernetes/kube-state-metrics/tree/release-1.9/docs#metrics-deprecation).
* `quay.io/coreos/kube-state-metrics` images will no longer be updated. `k8s.gcr.io/kube-state-metrics/kube-state-metrics` is the new canonical location.
* `quay.io/coreos/kube-state-metrics` images will no longer be updated. `k8s.gcr.io/kube-state-metrics/kube-state-metrics` is the new canonical location _(update: `k8s.gcr.io` has been deprecated in favor of `registry.k8s.io`)._
* The helm chart that is part of the kubernetes/kube-state-metrics repository is deprecated. https://github.com/prometheus-community/helm-charts will be its new location.
For the full list of v2.0 release changes includes features, bug fixes and other breaking changes see the full [CHANGELOG](https://github.com/kubernetes/kube-state-metrics/blob/master/CHANGELOG.md).

View File

@ -83,6 +83,7 @@ Adopting a common convention for annotations ensures consistency and understanda
| `a8r.io/uptime` | Link to external uptime dashboard. |
| `a8r.io/performance` | Link to external performance dashboard. |
| `a8r.io/dependencies` | Unstructured text describing the service dependencies for humans. |
{{< /table >}}
## Visualizing annotations: Service Catalogs

View File

@ -1,269 +1,269 @@
---
layout: blog
title: 'Using Finalizers to Control Deletion'
date: 2021-05-14
slug: using-finalizers-to-control-deletion
---
**Authors:** Aaron Alpar (Kasten)
Deleting objects in Kubernetes can be challenging. You may think youve deleted something, only to find it still persists. While issuing a `kubectl delete` command and hoping for the best might work for day-to-day operations, understanding how Kubernetes `delete` commands operate will help you understand why some objects linger after deletion.
In this post, Ill look at:
- What properties of a resource govern deletion
- How finalizers and owner references impact object deletion
- How the propagation policy can be used to change the order of deletions
- How deletion works, with examples
For simplicity, all examples will use ConfigMaps and basic shell commands to demonstrate the process. Well explore how the commands work and discuss repercussions and results from using them in practice.
## The basic `delete`
Kubernetes has several different commands you can use that allow you to create, read, update, and delete objects. For the purpose of this blog post, well focus on four `kubectl` commands: `create`, `get`, `patch`, and `delete`.
Here are examples of the basic `kubectl delete` command:
```
kubectl create configmap mymap
configmap/mymap created
```
```
kubectl get configmap/mymap
NAME DATA AGE
mymap 0 12s
```
```
kubectl delete configmap/mymap
configmap "mymap" deleted
```
```
kubectl get configmap/mymap
Error from server (NotFound): configmaps "mymap" not found
```
Shell commands preceded by `$` are followed by their output. You can see that we begin with a `kubectl create configmap mymap`, which will create the empty configmap `mymap`. Next, we need to `get` the configmap to prove it exists. We can then delete that configmap. Attempting to `get` it again produces an HTTP 404 error, which means the configmap is not found.
The state diagram for the basic `delete` command is very simple:
{{<figure width="495" src="/images/blog/2021-05-14-using-finalizers-to-control-deletion/state-diagram-delete.png" caption="State diagram for delete">}}
Although this operation is straightforward, other factors may interfere with the deletion, including finalizers and owner references.
## Understanding Finalizers
When it comes to understanding resource deletion in Kubernetes, knowledge of how finalizers work is helpful and can help you understand why some objects dont get deleted.
Finalizers are keys on resources that signal pre-delete operations. They control the garbage collection on resources, and are designed to alert controllers what cleanup operations to perform prior to removing a resource. However, they dont necessarily name code that should be executed; finalizers on resources are basically just lists of keys much like annotations. Like annotations, they can be manipulated.
Some common finalizers youve likely encountered are:
- `kubernetes.io/pv-protection`
- `kubernetes.io/pvc-protection`
The finalizers above are used on volumes to prevent accidental deletion. Similarly, some finalizers can be used to prevent deletion of any resource but are not managed by any controller.
Below with a custom configmap, which has no properties but contains a finalizer:
```
cat <<EOF | kubectl create -f -
apiVersion: v1
kind: ConfigMap
metadata:
name: mymap
finalizers:
- kubernetes
EOF
```
The configmap resource controller doesn't understand what to do with the `kubernetes` finalizer key. I term these “dead” finalizers for configmaps as it is normally used on namespaces. Heres what happen upon attempting to delete the configmap:
```
kubectl delete configmap/mymap &
configmap "mymap" deleted
jobs
[1]+ Running kubectl delete configmap/mymap
```
Kubernetes will report back that the object has been deleted, however, it hasnt been deleted in a traditional sense. Rather, its in the process of deletion. When we attempt to `get` that object again, we discover the object has been modified to include the deletion timestamp.
```
kubectl get configmap/mymap -o yaml
apiVersion: v1
kind: ConfigMap
metadata:
creationTimestamp: "2020-10-22T21:30:18Z"
deletionGracePeriodSeconds: 0
deletionTimestamp: "2020-10-22T21:30:34Z"
finalizers:
- kubernetes
name: mymap
namespace: default
resourceVersion: "311456"
selfLink: /api/v1/namespaces/default/configmaps/mymap
uid: 93a37fed-23e3-45e8-b6ee-b2521db81638
```
In short, whats happened is that the object was updated, not deleted. Thats because Kubernetes saw that the object contained finalizers and blocked removal of the object from etcd. The deletion timestamp signals that deletion was requested, but the deletion will not be complete until we edit the object and remove the finalizer.
Here's a demonstration of using the `patch` command to remove finalizers. If we want to delete an object, we can simply patch it on the command line to remove the finalizers. In this way, the deletion that was running in the background will complete and the object will be deleted. When we attempt to `get` that configmap, it will be gone.
```
kubectl patch configmap/mymap \
--type json \
--patch='[ { "op": "remove", "path": "/metadata/finalizers" } ]'
configmap/mymap patched
[1]+ Done kubectl delete configmap/mymap
kubectl get configmap/mymap -o yaml
Error from server (NotFound): configmaps "mymap" not found
```
Here's a state diagram for finalization:
{{<figure width="617" src="/images/blog/2021-05-14-using-finalizers-to-control-deletion/state-diagram-finalize.png" caption="State diagram for finalize">}}
So, if you attempt to delete an object that has a finalizer on it, it will remain in finalization until the controller has removed the finalizer keys or the finalizers are removed using Kubectl. Once that finalizer list is empty, the object can actually be reclaimed by Kubernetes and put into a queue to be deleted from the registry.
## Owner References
Owner references describe how groups of objects are related. They are properties on resources that specify the relationship to one another, so entire trees of resources can be deleted.
Finalizer rules are processed when there are owner references. An owner reference consists of a name and a UID. Owner references link resources within the same namespace, and it also needs a UID for that reference to work. Pods typically have owner references to the owning replica set. So, when deployments or stateful sets are deleted, then the child replica sets and pods are deleted in the process.
Here are some examples of owner references and how they work. In the first example, we create a parent object first, then the child. The result is a very simple configmap that contains an owner reference to its parent:
```
cat <<EOF | kubectl create -f -
apiVersion: v1
kind: ConfigMap
metadata:
name: mymap-parent
EOF
CM_UID=$(kubectl get configmap mymap-parent -o jsonpath="{.metadata.uid}")
cat <<EOF | kubectl create -f -
apiVersion: v1
kind: ConfigMap
metadata:
name: mymap-child
ownerReferences:
- apiVersion: v1
kind: ConfigMap
name: mymap-parent
uid: $CM_UID
EOF
```
Deleting the child object when an owner reference is involved does not delete the parent:
```
kubectl get configmap
NAME DATA AGE
mymap-child 0 12m4s
mymap-parent 0 12m4s
kubectl delete configmap/mymap-child
configmap "mymap-child" deleted
kubectl get configmap
NAME DATA AGE
mymap-parent 0 12m10s
```
In this example, we re-created the parent-child configmaps from above. Now, when deleting from the parent (instead of the child) with an owner reference from the child to the parent, when we `get` the configmaps, none are in the namespace:
```
kubectl get configmap
NAME DATA AGE
mymap-child 0 10m2s
mymap-parent 0 10m2s
kubectl delete configmap/mymap-parent
configmap "mymap-parent" deleted
kubectl get configmap
No resources found in default namespace.
```
To sum things up, when there's an override owner reference from a child to a parent, deleting the parent deletes the children automatically. This is called `cascade`. The default for cascade is `true`, however, you can use the --cascade=orphan option for `kubectl delete` to delete an object and orphan its children. *Update: starting with kubectl v1.20, the default for cascade is `background`.*
In the following example, there is a parent and a child. Notice the owner references are still included. If I delete the parent using --cascade=orphan, the parent is deleted but the child still exists:
```
kubectl get configmap
NAME DATA AGE
mymap-child 0 13m8s
mymap-parent 0 13m8s
kubectl delete --cascade=orphan configmap/mymap-parent
configmap "mymap-parent" deleted
kubectl get configmap
NAME DATA AGE
mymap-child 0 13m21s
```
The --cascade option links to the propagation policy in the API, which allows you to change the order in which objects are deleted within a tree. In the following example uses API access to craft a custom delete API call with the background propagation policy:
```
kubectl proxy --port=8080 &
Starting to serve on 127.0.0.1:8080
curl -X DELETE \
localhost:8080/api/v1/namespaces/default/configmaps/mymap-parent \
-d '{ "kind":"DeleteOptions", "apiVersion":"v1", "propagationPolicy":"Background" }' \
-H "Content-Type: application/json"
{
"kind": "Status",
"apiVersion": "v1",
"metadata": {},
"status": "Success",
"details": { ... }
}
```
Note that the propagation policy cannot be specified on the command line using kubectl. You have to specify it using a custom API call. Simply create a proxy, so you have access to the API server from the client, and execute a `curl` command with just a URL to execute that `delete` command.
There are three different options for the propagation policy:
- `Foreground`: Children are deleted before the parent (post-order)
- `Background`: Parent is deleted before the children (pre-order)
- `Orphan`: Owner references are ignored
Keep in mind that when you delete an object and owner references have been specified, finalizers will be honored in the process. This can result in trees of objects persisting, and you end up with a partial deletion. At that point, you have to look at any existing owner references on your objects, as well as any finalizers, to understand whats happening.
## Forcing a Deletion of a Namespace
There's one situation that may require forcing finalization for a namespace. If you've deleted a namespace and you've cleaned out all of the objects under it, but the namespace still exists, deletion can be forced by updating the namespace subresource, `finalize`. This informs the namespace controller that it needs to remove the finalizer from the namespace and perform any cleanup:
```
cat <<EOF | curl -X PUT \
localhost:8080/api/v1/namespaces/test/finalize \
-H "Content-Type: application/json" \
--data-binary @-
{
"kind": "Namespace",
"apiVersion": "v1",
"metadata": {
"name": "test"
},
"spec": {
"finalizers": null
}
}
EOF
```
This should be done with caution as it may delete the namespace only and leave orphan objects within the, now non-exiting, namespace - a confusing state for Kubernetes. If this happens, the namespace can be re-created manually and sometimes the orphaned objects will re-appear under the just-created namespace which will allow manual cleanup and recovery.
## Key Takeaways
As these examples demonstrate, finalizers can get in the way of deleting resources in Kubernetes, especially when there are parent-child relationships between objects. Often, there is a reason for adding a finalizer into the code, so you should always investigate before manually deleting it. Owner references allow you to specify and remove trees of resources, although finalizers will be honored in the process. Finally, the propagation policy can be used to specify the order of deletion via a custom API call, giving you control over how objects are deleted. Now that you know a little more about how deletions work in Kubernetes, we recommend you try it out on your own, using a test cluster.
{{< youtube class="youtube-quote-sm" id="F7-ZxWwf4sY" title="Clean Up Your Room! What Does It Mean to Delete Something in K8s">}}
---
layout: blog
title: 'Using Finalizers to Control Deletion'
date: 2021-05-14
slug: using-finalizers-to-control-deletion
---
**Authors:** Aaron Alpar (Kasten)
Deleting objects in Kubernetes can be challenging. You may think youve deleted something, only to find it still persists. While issuing a `kubectl delete` command and hoping for the best might work for day-to-day operations, understanding how Kubernetes `delete` commands operate will help you understand why some objects linger after deletion.
In this post, Ill look at:
- What properties of a resource govern deletion
- How finalizers and owner references impact object deletion
- How the propagation policy can be used to change the order of deletions
- How deletion works, with examples
For simplicity, all examples will use ConfigMaps and basic shell commands to demonstrate the process. Well explore how the commands work and discuss repercussions and results from using them in practice.
## The basic `delete`
Kubernetes has several different commands you can use that allow you to create, read, update, and delete objects. For the purpose of this blog post, well focus on four `kubectl` commands: `create`, `get`, `patch`, and `delete`.
Here are examples of the basic `kubectl delete` command:
```
kubectl create configmap mymap
configmap/mymap created
```
```
kubectl get configmap/mymap
NAME DATA AGE
mymap 0 12s
```
```
kubectl delete configmap/mymap
configmap "mymap" deleted
```
```
kubectl get configmap/mymap
Error from server (NotFound): configmaps "mymap" not found
```
Shell commands preceded by `$` are followed by their output. You can see that we begin with a `kubectl create configmap mymap`, which will create the empty configmap `mymap`. Next, we need to `get` the configmap to prove it exists. We can then delete that configmap. Attempting to `get` it again produces an HTTP 404 error, which means the configmap is not found.
The state diagram for the basic `delete` command is very simple:
{{<figure width="495" src="/images/blog/2021-05-14-using-finalizers-to-control-deletion/state-diagram-delete.png" caption="State diagram for delete">}}
Although this operation is straightforward, other factors may interfere with the deletion, including finalizers and owner references.
## Understanding Finalizers
When it comes to understanding resource deletion in Kubernetes, knowledge of how finalizers work is helpful and can help you understand why some objects dont get deleted.
Finalizers are keys on resources that signal pre-delete operations. They control the garbage collection on resources, and are designed to alert controllers what cleanup operations to perform prior to removing a resource. However, they dont necessarily name code that should be executed; finalizers on resources are basically just lists of keys much like annotations. Like annotations, they can be manipulated.
Some common finalizers youve likely encountered are:
- `kubernetes.io/pv-protection`
- `kubernetes.io/pvc-protection`
The finalizers above are used on volumes to prevent accidental deletion. Similarly, some finalizers can be used to prevent deletion of any resource but are not managed by any controller.
Below with a custom configmap, which has no properties but contains a finalizer:
```
cat <<EOF | kubectl create -f -
apiVersion: v1
kind: ConfigMap
metadata:
name: mymap
finalizers:
- kubernetes
EOF
```
The configmap resource controller doesn't understand what to do with the `kubernetes` finalizer key. I term these “dead” finalizers for configmaps as it is normally used on namespaces. Heres what happen upon attempting to delete the configmap:
```
kubectl delete configmap/mymap &
configmap "mymap" deleted
jobs
[1]+ Running kubectl delete configmap/mymap
```
Kubernetes will report back that the object has been deleted, however, it hasnt been deleted in a traditional sense. Rather, its in the process of deletion. When we attempt to `get` that object again, we discover the object has been modified to include the deletion timestamp.
```
kubectl get configmap/mymap -o yaml
apiVersion: v1
kind: ConfigMap
metadata:
creationTimestamp: "2020-10-22T21:30:18Z"
deletionGracePeriodSeconds: 0
deletionTimestamp: "2020-10-22T21:30:34Z"
finalizers:
- kubernetes
name: mymap
namespace: default
resourceVersion: "311456"
selfLink: /api/v1/namespaces/default/configmaps/mymap
uid: 93a37fed-23e3-45e8-b6ee-b2521db81638
```
In short, whats happened is that the object was updated, not deleted. Thats because Kubernetes saw that the object contained finalizers and blocked removal of the object from etcd. The deletion timestamp signals that deletion was requested, but the deletion will not be complete until we edit the object and remove the finalizer.
Here's a demonstration of using the `patch` command to remove finalizers. If we want to delete an object, we can simply patch it on the command line to remove the finalizers. In this way, the deletion that was running in the background will complete and the object will be deleted. When we attempt to `get` that configmap, it will be gone.
```
kubectl patch configmap/mymap \
--type json \
--patch='[ { "op": "remove", "path": "/metadata/finalizers" } ]'
configmap/mymap patched
[1]+ Done kubectl delete configmap/mymap
kubectl get configmap/mymap -o yaml
Error from server (NotFound): configmaps "mymap" not found
```
Here's a state diagram for finalization:
{{<figure width="617" src="/images/blog/2021-05-14-using-finalizers-to-control-deletion/state-diagram-finalize.png" caption="State diagram for finalize">}}
So, if you attempt to delete an object that has a finalizer on it, it will remain in finalization until the controller has removed the finalizer keys or the finalizers are removed using Kubectl. Once that finalizer list is empty, the object can actually be reclaimed by Kubernetes and put into a queue to be deleted from the registry.
## Owner References
Owner references describe how groups of objects are related. They are properties on resources that specify the relationship to one another, so entire trees of resources can be deleted.
Finalizer rules are processed when there are owner references. An owner reference consists of a name and a UID. Owner references link resources within the same namespace, and it also needs a UID for that reference to work. Pods typically have owner references to the owning replica set. So, when deployments or stateful sets are deleted, then the child replica sets and pods are deleted in the process.
Here are some examples of owner references and how they work. In the first example, we create a parent object first, then the child. The result is a very simple configmap that contains an owner reference to its parent:
```
cat <<EOF | kubectl create -f -
apiVersion: v1
kind: ConfigMap
metadata:
name: mymap-parent
EOF
CM_UID=$(kubectl get configmap mymap-parent -o jsonpath="{.metadata.uid}")
cat <<EOF | kubectl create -f -
apiVersion: v1
kind: ConfigMap
metadata:
name: mymap-child
ownerReferences:
- apiVersion: v1
kind: ConfigMap
name: mymap-parent
uid: $CM_UID
EOF
```
Deleting the child object when an owner reference is involved does not delete the parent:
```
kubectl get configmap
NAME DATA AGE
mymap-child 0 12m4s
mymap-parent 0 12m4s
kubectl delete configmap/mymap-child
configmap "mymap-child" deleted
kubectl get configmap
NAME DATA AGE
mymap-parent 0 12m10s
```
In this example, we re-created the parent-child configmaps from above. Now, when deleting from the parent (instead of the child) with an owner reference from the child to the parent, when we `get` the configmaps, none are in the namespace:
```
kubectl get configmap
NAME DATA AGE
mymap-child 0 10m2s
mymap-parent 0 10m2s
kubectl delete configmap/mymap-parent
configmap "mymap-parent" deleted
kubectl get configmap
No resources found in default namespace.
```
To sum things up, when there's an override owner reference from a child to a parent, deleting the parent deletes the children automatically. This is called `cascade`. The default for cascade is `true`, however, you can use the --cascade=orphan option for `kubectl delete` to delete an object and orphan its children. *Update: starting with kubectl v1.20, the default for cascade is `background`.*
In the following example, there is a parent and a child. Notice the owner references are still included. If I delete the parent using --cascade=orphan, the parent is deleted but the child still exists:
```
kubectl get configmap
NAME DATA AGE
mymap-child 0 13m8s
mymap-parent 0 13m8s
kubectl delete --cascade=orphan configmap/mymap-parent
configmap "mymap-parent" deleted
kubectl get configmap
NAME DATA AGE
mymap-child 0 13m21s
```
The --cascade option links to the propagation policy in the API, which allows you to change the order in which objects are deleted within a tree. In the following example uses API access to craft a custom delete API call with the background propagation policy:
```
kubectl proxy --port=8080 &
Starting to serve on 127.0.0.1:8080
curl -X DELETE \
localhost:8080/api/v1/namespaces/default/configmaps/mymap-parent \
-d '{ "kind":"DeleteOptions", "apiVersion":"v1", "propagationPolicy":"Background" }' \
-H "Content-Type: application/json"
{
"kind": "Status",
"apiVersion": "v1",
"metadata": {},
"status": "Success",
"details": { ... }
}
```
Note that the propagation policy cannot be specified on the command line using kubectl. You have to specify it using a custom API call. Simply create a proxy, so you have access to the API server from the client, and execute a `curl` command with just a URL to execute that `delete` command.
There are three different options for the propagation policy:
- `Foreground`: Children are deleted before the parent (post-order)
- `Background`: Parent is deleted before the children (pre-order)
- `Orphan`: Owner references are ignored
Keep in mind that when you delete an object and owner references have been specified, finalizers will be honored in the process. This can result in trees of objects persisting, and you end up with a partial deletion. At that point, you have to look at any existing owner references on your objects, as well as any finalizers, to understand whats happening.
## Forcing a Deletion of a Namespace
There's one situation that may require forcing finalization for a namespace. If you've deleted a namespace and you've cleaned out all of the objects under it, but the namespace still exists, deletion can be forced by updating the namespace subresource, `finalize`. This informs the namespace controller that it needs to remove the finalizer from the namespace and perform any cleanup:
```
cat <<EOF | curl -X PUT \
localhost:8080/api/v1/namespaces/test/finalize \
-H "Content-Type: application/json" \
--data-binary @-
{
"kind": "Namespace",
"apiVersion": "v1",
"metadata": {
"name": "test"
},
"spec": {
"finalizers": null
}
}
EOF
```
This should be done with caution as it may delete the namespace only and leave orphan objects within the, now non-exiting, namespace - a confusing state for Kubernetes. If this happens, the namespace can be re-created manually and sometimes the orphaned objects will re-appear under the just-created namespace which will allow manual cleanup and recovery.
## Key Takeaways
As these examples demonstrate, finalizers can get in the way of deleting resources in Kubernetes, especially when there are parent-child relationships between objects. Often, there is a reason for adding a finalizer into the code, so you should always investigate before manually deleting it. Owner references allow you to specify and remove trees of resources, although finalizers will be honored in the process. Finally, the propagation policy can be used to specify the order of deletion via a custom API call, giving you control over how objects are deleted. Now that you know a little more about how deletions work in Kubernetes, we recommend you try it out on your own, using a test cluster.
{{< youtube class="youtube-quote-sm" id="F7-ZxWwf4sY" title="Clean Up Your Room! What Does It Mean to Delete Something in K8s">}}

View File

@ -32,7 +32,7 @@ Kubernetes releases now generate provenance attestation files describing the sta
### HorizontalPodAutoscaler v2 graduates to GA
The HorizontalPodAutscaler `autoscaling/v2` stable API moved to GA in 1.23. The HorizontalPodAutoscaler `autoscaling/v2beta2` API has been deprecated.
The HorizontalPodAutoscaler `autoscaling/v2` stable API moved to GA in 1.23. The HorizontalPodAutoscaler `autoscaling/v2beta2` API has been deprecated.
### Generic Ephemeral Volume feature graduates to GA
@ -81,7 +81,7 @@ If the `ServerSideFieldValidation` feature gate is enabled starting 1.23, users
With the feature gate enabled, we also introduce the `fieldValidation` query parameter so that users can specify the desired behavior of the server on a per request basis. Valid values for the `fieldValidation` query parameter are:
- Ignore (default when feature gate is disabled, same as pre-1.23 behavior of dropping/ignoring unkonwn fields)
- Ignore (default when feature gate is disabled, same as pre-1.23 behavior of dropping/ignoring unknown fields)
- Warn (default when feature gate is enabled).
- Strict (this will fail the request with an Invalid Request error)

View File

@ -105,7 +105,7 @@ validations are done here.
### AppArmor support
This version introduces the initial support for AppArmor, allowing users to load and
unload AppArmor profiles into cluster nodes by using the new [AppArmorProfile](https://github.com/kubernetes-sigs/security-profiles-operator/blob/main/deploy/base/crds/apparmorprofile.yaml) CRD.
unload AppArmor profiles into cluster nodes by using the new [AppArmorProfile](https://github.com/kubernetes-sigs/security-profiles-operator/blob/main/deploy/base-crds/crds/apparmorprofile.yaml) CRD.
To enable AppArmor support use the [enableAppArmor feature gate](https://github.com/kubernetes-sigs/security-profiles-operator/blob/main/examples/config.yaml#L10) switch of your SPO configuration.
Then use our [apparmor example](https://github.com/kubernetes-sigs/security-profiles-operator/blob/main/examples/apparmorprofile.yaml) to deploy your first profile across your cluster.

View File

@ -5,6 +5,7 @@ linkTitle: "Dockershim Removal FAQ"
date: 2022-02-17
slug: dockershim-faq
aliases: [ '/dockershim' ]
evergreen: true
---
**This supersedes the original

View File

@ -32,7 +32,7 @@ Caleb is also a co-organizer of the [CloudNative NZ](https://www.meetup.com/clou
## [Dylan Graham](https://github.com/DylanGraham)
Dylan Graham is a cloud engineer from Adeliade, Australia. He has been contributing to the upstream Kubernetes project since 2018.
Dylan Graham is a cloud engineer from Adelaide, Australia. He has been contributing to the upstream Kubernetes project since 2018.
He stated that being a part of such a large-scale project was initially overwhelming, but that the community's friendliness and openness assisted him in getting through it.

View File

@ -68,8 +68,7 @@ been deprecated. These removals have been superseded by newer, stable/generally
## API removals, deprecations, and other changes for Kubernetes 1.24
* [Dynamic kubelet configuration](https://github.com/kubernetes/enhancements/issues/281): `DynamicKubeletConfig` is used to enable the dynamic configuration of the kubelet. The `DynamicKubeletConfig` flag was deprecated in Kubernetes 1.22. In v1.24, this feature gate will be removed from the kubelet. See [Reconfigure kubelet](/docs/tasks/administer-cluster/reconfigure-kubelet/). Refer to the ["Dynamic kubelet config is removed" KEP](https://github.com/kubernetes/enhancements/issues/281) for more information.
* [Dynamic log sanitization](https://github.com/kubernetes/kubernetes/pull/107207): The experimental dynamic log sanitization feature is deprecated and will be removed in v1.24. This feature introduced a logging filter that could be applied to all Kubernetes system components logs to prevent various types of sensitive information from leaking via logs. Refer to [KEP-1753: Kubernetes system components logs sanitization](https://github.com/kubernetes/enhancements/tree/master/keps/sig-instrumentation/1753-logs-sanitization#deprecation) for more information and an [alternative approach](https://github.com/kubernetes/enhancements/tree/master/keps/sig-instrumentation/1753-logs-sanitization#alternatives=).
* In-tree provisioner to CSI driver migration: This applies to a number of in-tree plugins, including [Portworx](https://github.com/kubernetes/enhancements/issues/2589). Refer to the [In-tree Storage Plugin to CSI Migration Design Doc](https://git.k8s.io/design-proposals-archive/storage/csi-migration.md#background-and-motivations) for more information.
* [Dynamic log sanitization](https://github.com/kubernetes/kubernetes/pull/107207): The experimental dynamic log sanitization feature is deprecated and will be removed in v1.24. This feature introduced a logging filter that could be applied to all Kubernetes system components logs to prevent various types of sensitive information from leaking via logs. Refer to [KEP-1753: Kubernetes system components logs sanitization](https://github.com/kubernetes/enhancements/tree/master/keps/sig-instrumentation/1753-logs-sanitization#deprecation) for more information and an [alternative approach](https://github.com/kubernetes/enhancements/tree/master/keps/sig-instrumentation/1753-logs-sanitization#alternatives=).
* [Removing Dockershim from kubelet](https://github.com/kubernetes/enhancements/issues/2221): the Container Runtime Interface (CRI) for Docker (i.e. Dockershim) is currently a built-in container runtime in the kubelet code base. It was deprecated in v1.20. As of v1.24, the kubelet will no longer have dockershim. Check out this blog on [what you need to do be ready for v1.24](/blog/2022/03/31/ready-for-dockershim-removal/).
* [Storage capacity tracking for pod scheduling](https://github.com/kubernetes/enhancements/issues/1472): The CSIStorageCapacity API supports exposing currently available storage capacity via CSIStorageCapacity objects and enhances scheduling of pods that use CSI volumes with late binding. In v1.24, the CSIStorageCapacity API will be stable. The API graduating to stable initates the deprecation of the v1beta1 CSIStorageCapacity API. Refer to the [Storage Capacity Constraints for Pod Scheduling KEP](https://github.com/kubernetes/enhancements/tree/master/keps/sig-storage/1472-storage-capacity-tracking) for more information.
* [The `master` label is no longer present on kubeadm control plane nodes](https://github.com/kubernetes/kubernetes/pull/107533). For new clusters, the label 'node-role.kubernetes.io/master' will no longer be added to control plane nodes, only the label 'node-role.kubernetes.io/control-plane' will be added. For more information, refer to [KEP-2067: Rename the kubeadm "master" label and taint](https://github.com/kubernetes/enhancements/tree/master/keps/sig-cluster-lifecycle/kubeadm/2067-rename-master-label-taint).
@ -84,7 +83,7 @@ As stated earlier, there are several guides about
You can start with [Finding what container runtime are on your nodes](/docs/tasks/administer-cluster/migrating-from-dockershim/find-out-runtime-you-use/).
If your nodes are using dockershim, there are other possible Docker Engine dependencies such as
Pods or third-party tools executing Docker commands or private registries in the Docker configuration file. You can follow the
[Check whether Dockershim deprecation affects you](/docs/tasks/administer-cluster/migrating-from-dockershim/check-if-dockershim-deprecation-affects-you/) guide to review possible
[Check whether Dockershim removal affects you](/docs/tasks/administer-cluster/migrating-from-dockershim/check-if-dockershim-removal-affects-you/) guide to review possible
Docker Engine dependencies. Before upgrading to v1.24, you decide to either remain using Docker Engine and
[Migrate Docker Engine nodes from dockershim to cri-dockerd](/docs/tasks/administer-cluster/migrating-from-dockershim/migrate-dockershim-dockerd/) or migrate to a CRI-compatible runtime. Here's a guide to
[change the container runtime on a node from Docker Engine to containerd](/docs/tasks/administer-cluster/migrating-from-dockershim/change-runtime-containerd/).

View File

@ -32,7 +32,7 @@ Existing beta APIs and new versions of existing beta APIs will continue to be en
Release artifacts are [signed](https://github.com/kubernetes/enhancements/issues/3031) using [cosign](https://github.com/sigstore/cosign)
signatures,
and there is experimental support for [verifying image signatures](/docs/tasks/administer-cluster/verify-signed-images/).
and there is experimental support for [verifying image signatures](/docs/tasks/administer-cluster/verify-signed-artifacts/).
Signing and verification of release artifacts is part of [increasing software supply chain security for the Kubernetes release process](https://github.com/kubernetes/enhancements/issues/3027).
### OpenAPI v3
@ -84,8 +84,7 @@ that enables the caller of a function to control all aspects of logging (output
### Avoiding Collisions in IP allocation to Services
Kubernetes 1.24 introduces a new opt-in feature that allows you to
[soft-reserve a range for static IP address assignments](/docs/concepts/services-networking/service/#service-ip-static-sub-range)
to Services.
soft-reserve a range for static IP address assignments to Services.
With the manual enablement of this feature, the cluster will prefer automatic assignment from
the pool of Service IP addresses, thereby reducing the risk of collision.

View File

@ -7,10 +7,11 @@ slug: grpc-probes-now-in-beta
**Author**: Sergey Kanzhelev (Google)
_Update: Since this article was posted, the feature was graduated to GA in v1.27 and doesn't require any feature gates to be enabled.
With Kubernetes 1.24 the gRPC probes functionality entered beta and is available by default.
Now you can configure startup, liveness, and readiness probes for your gRPC app
without exposing any HTTP endpoint, nor do you need an executable. Kubernetes can natively connect to your your workload via gRPC and query its status.
without exposing any HTTP endpoint, nor do you need an executable. Kubernetes can natively connect to your workload via gRPC and query its status.
## Some history
@ -32,7 +33,7 @@ the first release at [Sep 19, 2018](https://github.com/grpc-ecosystem/grpc-healt
This approach for gRPC apps health checking is very popular. There are [3,626 Dockerfiles](https://github.com/search?l=Dockerfile&q=grpc_health_probe&type=code)
with the `grpc_health_probe` and [6,621 yaml](https://github.com/search?l=YAML&q=grpc_health_probe&type=Code) files that are discovered with the
basic search on GitHub (at the moment of writing). This is good indication of the tool popularity
basic search on GitHub (at the moment of writing). This is a good indication of the tool popularity
and the need to support this natively.
Kubernetes v1.23 introduced an alpha-quality implementation of native support for
@ -106,7 +107,7 @@ another - http port to react on commands `make-serving` and `make-not-serving`.
Here is an example pod definition. It starts the `grpc-health-checking` module,
exposes ports `5000` and `8080`, and configures gRPC readiness probe:
``` yaml
```yaml
---
apiVersion: v1
kind: Pod
@ -115,7 +116,8 @@ metadata:
spec:
containers:
- name: agnhost
image: k8s.gcr.io/e2e-test-images/agnhost:2.35
# image changed since publication (previously used registry "k8s.gcr.io")
image: registry.k8s.io/e2e-test-images/agnhost:2.35
command: ["/agnhost", "grpc-health-checking"]
ports:
- containerPort: 5000
@ -125,7 +127,7 @@ spec:
port: 5000
```
If the file called `test.yaml`, you can create the pod and check it's status.
In the manifest file called `test.yaml`, you can create the pod and check its status.
The pod will be in ready state as indicated by the snippet of the output.
```shell
@ -180,7 +182,7 @@ Conditions:
Once it is switched back, in about one second the Pod will get back to ready status:
``` bsh
```bash
curl http://localhost:8080/make-serving
kubectl describe test-grpc
```

View File

@ -18,7 +18,7 @@ case where you're using the `OrderedReady` Pod management policy for a StatefulS
Here are some examples:
- I am using a StatefulSet to orchestrate a multi-instance, cache based application where the size of the cache is large. The cache
starts cold and requires some siginificant amount of time before the container can start. There could be more initial startup tasks
starts cold and requires some significant amount of time before the container can start. There could be more initial startup tasks
that are required. A RollingUpdate on this StatefulSet would take a lot of time before the application is fully updated. If the
StatefulSet supported updating more than one pod at a time, it would result in a much faster update.
@ -50,7 +50,8 @@ spec:
app: nginx
spec:
containers:
- image: k8s.gcr.io/nginx-slim:0.8
# image changed since publication (previously used registry "k8s.gcr.io")
- image: registry.k8s.io/nginx-slim:0.8
imagePullPolicy: IfNotPresent
name: nginx
updateStrategy:
@ -66,10 +67,10 @@ If you enable the new feature and you don't specify a value for `maxUnavailable`
I'll run through a scenario based on that example manifest to demonstrate how this feature works. I will deploy a StatefulSet that
has 5 replicas, with `maxUnavailable` set to 2 and `partition` set to 0.
I can trigger a rolling update by changing the image to `k8s.gcr.io/nginx-slim:0.9`. Once I initiate the rolling update, I can
I can trigger a rolling update by changing the image to `registry.k8s.io/nginx-slim:0.9`. Once I initiate the rolling update, I can
watch the pods update 2 at a time as the current value of maxUnavailable is 2. The below output shows a span of time and is not
complete. The maxUnavailable can be an absolute number (for example, 2) or a percentage of desired Pods (for example, 10%). The
absolute number is calculated from percentage by rounding down.
absolute number is calculated from percentage by rounding up to the nearest integer.
```
kubectl get pods --watch
```

View File

@ -145,7 +145,7 @@ workstream within the Gateway API subproject focused on Gateway API for Mesh
Management and Administration.
This group will deliver [enhancement
proposals](https://gateway-api.sigs.k8s.io/v1beta1/contributing/gep/) consisting
proposals](https://gateway-api.sigs.k8s.io/geps/overview/) consisting
of resources, additions, and modifications to the Gateway API specification for
mesh and mesh-adjacent use-cases.

Some files were not shown because too many files have changed in this diff Show More