Merge remote-tracking branch 'upstream/master' into dev-1.19
commit
220d754880
16
Makefile
16
Makefile
|
@ -4,7 +4,7 @@ NETLIFY_FUNC = $(NODE_BIN)/netlify-lambda
|
|||
|
||||
# The CONTAINER_ENGINE variable is used for specifying the container engine. By default 'docker' is used
|
||||
# but this can be overridden when calling make, e.g.
|
||||
# CONTAINER_ENGINE=podman make container-image
|
||||
# CONTAINER_ENGINE=podman make container-image
|
||||
CONTAINER_ENGINE ?= docker
|
||||
CONTAINER_IMAGE = kubernetes-hugo
|
||||
CONTAINER_RUN = $(CONTAINER_ENGINE) run --rm --interactive --tty --volume $(CURDIR):/src
|
||||
|
@ -17,12 +17,15 @@ CCEND=\033[0m
|
|||
help: ## Show this help.
|
||||
@awk 'BEGIN {FS = ":.*?## "} /^[a-zA-Z_-]+:.*?## / {sub("\\\\n",sprintf("\n%22c"," "), $$2);printf "\033[36m%-20s\033[0m %s\n", $$1, $$2}' $(MAKEFILE_LIST)
|
||||
|
||||
module-check:
|
||||
@git submodule status --recursive | awk '/^[+-]/ {printf "\033[31mWARNING\033[0m Submodule not initialized: \033[34m%s\033[0m\n",$$2}' 1>&2
|
||||
|
||||
all: build ## Build site with production settings and put deliverables in ./public
|
||||
|
||||
build: ## Build site with production settings and put deliverables in ./public
|
||||
build: module-check ## Build site with production settings and put deliverables in ./public
|
||||
hugo --minify
|
||||
|
||||
build-preview: ## Build site with drafts and future posts enabled
|
||||
build-preview: module-check ## Build site with drafts and future posts enabled
|
||||
hugo --buildDrafts --buildFuture
|
||||
|
||||
deploy-preview: ## Deploy preview site via netlify
|
||||
|
@ -39,7 +42,7 @@ production-build: build check-headers-file ## Build the production site and ensu
|
|||
non-production-build: ## Build the non-production site, which adds noindex headers to prevent indexing
|
||||
hugo --enableGitInfo
|
||||
|
||||
serve: ## Boot the development server.
|
||||
serve: module-check ## Boot the development server.
|
||||
hugo server --buildFuture
|
||||
|
||||
docker-image:
|
||||
|
@ -60,10 +63,10 @@ container-image:
|
|||
--tag $(CONTAINER_IMAGE) \
|
||||
--build-arg HUGO_VERSION=$(HUGO_VERSION)
|
||||
|
||||
container-build:
|
||||
container-build: module-check
|
||||
$(CONTAINER_RUN) $(CONTAINER_IMAGE) hugo
|
||||
|
||||
container-serve:
|
||||
container-serve: module-check
|
||||
$(CONTAINER_RUN) --mount type=tmpfs,destination=/src/resources,tmpfs-mode=0755 -p 1313:1313 $(CONTAINER_IMAGE) hugo server --buildFuture --bind 0.0.0.0
|
||||
|
||||
test-examples:
|
||||
|
@ -81,4 +84,3 @@ docker-internal-linkcheck:
|
|||
container-internal-linkcheck: link-checker-image-pull
|
||||
$(CONTAINER_RUN) $(CONTAINER_IMAGE) hugo --config config.toml,linkcheck-config.toml --buildFuture
|
||||
$(CONTAINER_ENGINE) run --mount type=bind,source=$(CURDIR),target=/test --rm wjdp/htmltest htmltest
|
||||
|
||||
|
|
|
@ -15,7 +15,7 @@ Weitere Informationen zum Beitrag zur Kubernetes-Dokumentation finden Sie unter:
|
|||
|
||||
* [Mitwirkung beginnen](https://kubernetes.io/docs/contribute/start/)
|
||||
* [Ihre Dokumentationsänderungen bereitstellen](http://kubernetes.io/docs/contribute/intermediate#view-your-changes-locally)
|
||||
* [Seitenvorlagen verwenden](http://kubernetes.io/docs/contribute/style/page-templates/)
|
||||
* [Seitenvorlagen verwenden](http://kubernetes.io/docs/contribute/style/page-content-types/)
|
||||
* [Dokumentationsstil-Handbuch](http://kubernetes.io/docs/contribute/style/style-guide/)
|
||||
* [Übersetzung der Kubernetes-Dokumentation](https://kubernetes.io/docs/contribute/localization/)
|
||||
|
||||
|
|
|
@ -18,7 +18,7 @@ Para obtener más información sobre cómo contribuir a la documentación de Kub
|
|||
|
||||
* [Empezando a contribuir](https://kubernetes.io/docs/contribute/start/)
|
||||
* [Visualizando sus cambios en su entorno local](http://kubernetes.io/docs/contribute/intermediate#view-your-changes-locally)
|
||||
* [Utilizando las plantillas de las páginas](http://kubernetes.io/docs/contribute/style/page-templates/)
|
||||
* [Utilizando las plantillas de las páginas](http://kubernetes.io/docs/contribute/style/page-content-types/)
|
||||
* [Guía de estilo de la documentación](http://kubernetes.io/docs/contribute/style/style-guide/)
|
||||
* [Traduciendo la documentación de Kubernetes](https://kubernetes.io/docs/contribute/localization/)
|
||||
|
||||
|
|
|
@ -23,7 +23,7 @@ Pour plus d'informations sur la contribution à la documentation Kubernetes, voi
|
|||
|
||||
* [Commencez à contribuer](https://kubernetes.io/docs/contribute/start/)
|
||||
* [Apperçu des modifications apportées à votre documentation](http://kubernetes.io/docs/contribute/intermediate#view-your-changes-locally)
|
||||
* [Utilisation des modèles de page](http://kubernetes.io/docs/contribute/style/page-templates/)
|
||||
* [Utilisation des modèles de page](https://kubernetes.io/docs/contribute/style/page-content-types/)
|
||||
* [Documentation Style Guide](http://kubernetes.io/docs/contribute/style/style-guide/)
|
||||
* [Traduction de la documentation Kubernetes](https://kubernetes.io/docs/contribute/localization/)
|
||||
|
||||
|
|
88
README-ja.md
88
README-ja.md
|
@ -3,7 +3,31 @@
|
|||
[![Build Status](https://api.travis-ci.org/kubernetes/website.svg?branch=master)](https://travis-ci.org/kubernetes/website)
|
||||
[![GitHub release](https://img.shields.io/github/release/kubernetes/website.svg)](https://github.com/kubernetes/website/releases/latest)
|
||||
|
||||
ようこそ!このリポジトリには、[KubernetesのWebサイトとドキュメント](https://kubernetes.io/)をビルドするために必要な全アセットが格納されています。貢献に興味を持っていただきありがとうございます!
|
||||
このリポジトリには、[KubernetesのWebサイトとドキュメント](https://kubernetes.io/)をビルドするために必要な全アセットが格納されています。貢献に興味を持っていただきありがとうございます!
|
||||
|
||||
## Hugoを使ってローカル環境でWebサイトを動かす
|
||||
|
||||
Hugoのインストール方法については[Hugoの公式ドキュメント](https://gohugo.io/getting-started/installing/)をご覧ください。このとき、[`netlify.toml`](netlify.toml#L10)ファイルに記述されている`HUGO_VERSION`と同じバージョンをインストールするようにしてください。
|
||||
|
||||
Hugoがインストールできたら、以下のコマンドを使ってWebサイトをローカル上で動かすことができます:
|
||||
|
||||
```bash
|
||||
git clone https://github.com/kubernetes/website.git
|
||||
cd website
|
||||
git submodule update --init --recursive
|
||||
hugo server --buildFuture
|
||||
```
|
||||
|
||||
これで、Hugoのサーバーが1313番ポートを使って開始します。お使いのブラウザにて http://localhost:1313 にアクセスしてください。リポジトリ内のソースファイルに変更を加えると、HugoがWebサイトの内容を更新してブラウザに反映します。
|
||||
|
||||
## SIG Docsに参加する
|
||||
|
||||
[コミュニティのページ](https://github.com/kubernetes/community/tree/master/sig-docs#meetings)をご覧になることで、SIG Docs Kubernetesコミュニティとの関わり方を学ぶことができます。
|
||||
|
||||
本プロジェクトのメンテナーには以下の方法で連絡することができます:
|
||||
|
||||
- [Slack](https://kubernetes.slack.com/messages/kubernetes-docs-ja)
|
||||
- [メーリングリスト](https://groups.google.com/forum/#!forum/kubernetes-sig-docs)
|
||||
|
||||
## ドキュメントに貢献する
|
||||
|
||||
|
@ -12,63 +36,31 @@ GitHubの画面右上にある**Fork**ボタンをクリックすると、お使
|
|||
Pull Requestが作成されると、レビュー担当者が責任を持って明確かつ実用的なフィードバックを返します。
|
||||
Pull Requestの所有者は作成者であるため、**ご自身で作成したPull Requestを編集し、フィードバックに対応するのはご自身の役目です。**
|
||||
また、状況によっては2人以上のレビュアーからフィードバックが返されたり、アサインされていないレビュー担当者からのフィードバックが来ることがある点もご注意ください。
|
||||
さらに、特定のケースにおいては、レビュー担当者が[Kubernetes tech reviewer](https://github.com/kubernetes/website/wiki/Tech-reviewers)に対してレビューを依頼することもあります。
|
||||
さらに、特定のケースにおいては、レビュー担当者がKubernetesの技術的なレビュアーに対してレビューを依頼することもあります。
|
||||
レビュー担当者はタイムリーにフィードバックを提供するために最善を尽くしますが、応答時間は状況に応じて異なる場合があります。
|
||||
|
||||
Kubernetesのドキュメントへの貢献に関する詳細については以下のページをご覧ください:
|
||||
|
||||
* [貢献のはじめ方](https://kubernetes.io/docs/contribute/start/)
|
||||
* [ドキュメントの変更をステージする](http://kubernetes.io/docs/contribute/intermediate#view-your-changes-locally)
|
||||
* [ページテンプレートの使い方](http://kubernetes.io/docs/contribute/style/page-templates/)
|
||||
* [ドキュメントのスタイルガイド](http://kubernetes.io/docs/contribute/style/style-guide/)
|
||||
* [Kubernetesのドキュメントへの貢献](https://kubernetes.io/ja/docs/contribute/)
|
||||
* [ページコンテントタイプ](https://kubernetes.io/docs/contribute/style/page-content-types/)
|
||||
* [ドキュメントのスタイルガイド](https://kubernetes.io/docs/contribute/style/style-guide/)
|
||||
* [Kubernetesドキュメントの翻訳方法](https://kubernetes.io/docs/contribute/localization/)
|
||||
|
||||
## Dockerを使ってローカル環境でWebサイトを動かす
|
||||
## 翻訳された`README.md`一覧
|
||||
|
||||
ローカル環境で本ページを動かすのに推奨される方法は、静的サイトジェネレータの[Hugo](https://gohugo.io)を動かすのに特化した[Docker](https://docker.com)イメージを使うことです。
|
||||
|
||||
> Windows上で環境を作る場合は[Chocolatey](https://chocolatey.org)を使ってインストール可能な追加のツールが必要になります。 `choco install make`
|
||||
|
||||
> Dockerを使わずに環境を構築したい場合は、[Hugoをローカル環境で動かす](#hugoをローカル環境で動かす)をご覧ください。
|
||||
|
||||
既に[Dockerが動いている環境](https://www.docker.com/get-started)であれば、以下のコマンドを使って`kubernetes-hugo`イメージをローカルでビルドします:
|
||||
|
||||
```bash
|
||||
make docker-image
|
||||
```
|
||||
|
||||
イメージが作成されたら、以下のコマンドを使ってWebサイトをローカル上で動かすことができます:
|
||||
|
||||
```bash
|
||||
make docker-serve
|
||||
```
|
||||
|
||||
お使いのブラウザにて http://localhost:1313 にアクセスすることでWebサイトが開きます。リポジトリ内のソースファイルに変更を加えると、HugoがWebサイトの内容を更新してブラウザに反映します。
|
||||
|
||||
## Hugoをローカル環境で動かす
|
||||
|
||||
Hugoのインストール方法については[Hugoの公式ドキュメント](https://gohugo.io/getting-started/installing/)をご覧ください。このとき、[`netlify.toml`](netlify.toml#L9)ファイルに記述されている`HUGO_VERSION`と同じバージョンをインストールするようにしてください。
|
||||
|
||||
Hugoがインストールできたら、以下のコマンドを使ってWebサイトをローカル上で動かすことができます:
|
||||
|
||||
```bash
|
||||
make serve
|
||||
```
|
||||
|
||||
これで、Hugoのサーバーが1313番ポートを使って開始します。 お使いのブラウザにて http://localhost:1313 にアクセスしてください。リポジトリ内のソースファイルに変更を加えると、HugoがWebサイトの内容を更新してブラウザに反映します。
|
||||
|
||||
## コミュニティ内での議論、貢献、サポートなどについて
|
||||
|
||||
[コミュニティのページ](http://kubernetes.io/community/)をご覧になることで、Kubernetesコミュニティとの関わり方を学ぶことができます。
|
||||
|
||||
本プロジェクトのメンテナーには以下の方法で連絡することができます:
|
||||
|
||||
- [Slack](https://kubernetes.slack.com/messages/kubernetes-docs-ja)
|
||||
- [メーリングリスト](https://groups.google.com/forum/#!forum/kubernetes-sig-docs)
|
||||
| Language | Language |
|
||||
|---|---|
|
||||
|[中国語](README-zh.md)|[韓国語](README-ko.md)|
|
||||
|[フランス語](README-fr.md)|[ポーランド語](README-pl.md)|
|
||||
|[ドイツ語](README-de.md)|[ポルトガル語](README-pt.md)|
|
||||
|[ヒンディー語](README-hi.md)|[ロシア語](README-ru.md)|
|
||||
|[インドネシア語](README-id.md)|[スペイン語](README-es.md)|
|
||||
|[イタリア語](README-it.md)|[ウクライナ語](README-uk.md)|
|
||||
|[日本語](README-ja.md)|[ベトナム語](README-vi.md)|
|
||||
|
||||
### 行動規範
|
||||
|
||||
Kubernetesコミュニティへの参加については、[Kubernetesの行動規範](code-of-conduct.md)によって管理されています。
|
||||
Kubernetesコミュニティへの参加については、[CNCFの行動規範](https://github.com/cncf/foundation/blob/master/code-of-conduct.md)によって管理されています。
|
||||
|
||||
## ありがとうございます!
|
||||
|
||||
|
|
|
@ -41,7 +41,7 @@ Furthermore, in some cases, one of your reviewers might ask for a technical revi
|
|||
For more information about contributing to the Kubernetes documentation, see:
|
||||
|
||||
* [Contribute to Kubernetes docs](https://kubernetes.io/docs/contribute/)
|
||||
* [Using Page Templates](https://kubernetes.io/docs/contribute/style/page-templates/)
|
||||
* [Page Content Types](https://kubernetes.io/docs/contribute/style/page-content-types/)
|
||||
* [Documentation Style Guide](https://kubernetes.io/docs/contribute/style/style-guide/)
|
||||
* [Localizing Kubernetes Documentation](https://kubernetes.io/docs/contribute/localization/)
|
||||
|
||||
|
|
|
@ -65,8 +65,8 @@ footer {
|
|||
.button {
|
||||
display: inline-block;
|
||||
border-radius: 6px;
|
||||
padding: 0 20px;
|
||||
line-height: 40px;
|
||||
padding: 6px 20px;
|
||||
line-height: 1.3rem;
|
||||
color: white;
|
||||
background-color: $blue;
|
||||
text-decoration: none;
|
||||
|
|
|
@ -299,12 +299,19 @@ blockquote {
|
|||
}
|
||||
}
|
||||
|
||||
.td-sidebar-nav {
|
||||
& > .td-sidebar-nav__section {
|
||||
padding-top: .5rem;
|
||||
padding-left: 1.5rem;
|
||||
}
|
||||
}
|
||||
|
||||
.td-sidebar__inner {
|
||||
form.td-sidebar__search {
|
||||
|
||||
button.td-sidebar__toggle {
|
||||
&:hover {
|
||||
color: $white;
|
||||
color: #000000;
|
||||
}
|
||||
|
||||
color: $blue;
|
||||
|
|
|
@ -238,7 +238,7 @@ no = 'Sorry to hear that. Please <a href="https://github.com/USERNAME/REPOSITORY
|
|||
|
||||
[[params.links.user]]
|
||||
name = "Youtube"
|
||||
url = "https://youtbue.com/kubernetescommunity"
|
||||
url = "https://youtube.com/kubernetescommunity"
|
||||
icon = "fab fa-youtube"
|
||||
desc = "Youtube community videos"
|
||||
|
||||
|
|
|
@ -38,7 +38,7 @@ Minikube unterstützt die folgenden Treiber:
|
|||
* kvm ([Treiber installation](https://minikube.sigs.k8s.io/docs/drivers/#kvm-driver))
|
||||
* hyperkit ([Treiber installation](https://minikube.sigs.k8s.io/docs/drivers/#hyperkit-driver))
|
||||
* xhyve ([Treiber installation](https://minikube.sigs.k8s.io/docs/drivers/#xhyve-driver)) (deprecated)
|
||||
* hyperv ([Treiber installation](https://github.com/kubernetes/minikube/blob/master/docs/drivers.md#hyperv-driver))
|
||||
* hyperv ([Treiber installation](https://minikube.sigs.k8s.io/docs/drivers/#hyperv-driver))
|
||||
Beachten Sie, dass die unten angegebene IP-Adresse dynamisch ist und sich ändern kann. Sie kann mit `minikube ip` abgerufen werden.
|
||||
* none (Führt die Kubernetes-Komponenten auf dem Host und nicht in einer VM aus. Die Verwendung dieses Treibers erfordert Docker ([Docker installieren](https://docs.docker.com/install/linux/docker-ce/ubuntu/)) und eine Linux-Umgebung)
|
||||
|
||||
|
|
|
@ -18,7 +18,7 @@ This is post #1 in a series about the local deployment options on Linux, and it
|
|||
|
||||
[Minikube](https://github.com/kubernetes/minikube) is a cross-platform, community-driven [Kubernetes](https://kubernetes.io/) distribution, which is targeted to be used primarily in local environments. It deploys a single-node cluster, which is an excellent option for having a simple Kubernetes cluster up and running on localhost.
|
||||
|
||||
Minikube is designed to be used as a virtual machine (VM), and the default VM runtime is [VirtualBox](https://www.virtualbox.org/). At the same time, extensibility is one of the critical benefits of Minikube, so it's possible to use it with [drivers](https://github.com/kubernetes/minikube/blob/master/docs/drivers.md) outside of VirtualBox.
|
||||
Minikube is designed to be used as a virtual machine (VM), and the default VM runtime is [VirtualBox](https://www.virtualbox.org/). At the same time, extensibility is one of the critical benefits of Minikube, so it's possible to use it with [drivers](https://minikube.sigs.k8s.io/docs/drivers/) outside of VirtualBox.
|
||||
|
||||
By default, Minikube uses Virtualbox as a runtime for running the virtual machine. Virtualbox is a cross-platform solution, which can be used on a variety of operating systems, including GNU/Linux, Windows, and macOS.
|
||||
|
||||
|
|
|
@ -12,61 +12,3 @@ The Concepts section helps you learn about the parts of the Kubernetes system an
|
|||
|
||||
|
||||
<!-- body -->
|
||||
|
||||
## Overview
|
||||
|
||||
To work with Kubernetes, you use *Kubernetes API objects* to describe your cluster's *desired state*: what applications or other workloads you want to run, what container images they use, the number of replicas, what network and disk resources you want to make available, and more. You set your desired state by creating objects using the Kubernetes API, typically via the command-line interface, `kubectl`. You can also use the Kubernetes API directly to interact with the cluster and set or modify your desired state.
|
||||
|
||||
Once you've set your desired state, the *Kubernetes Control Plane* makes the cluster's current state match the desired state via the Pod Lifecycle Event Generator ([PLEG](https://github.com/kubernetes/community/blob/master/contributors/design-proposals/node/pod-lifecycle-event-generator.md)). To do so, Kubernetes performs a variety of tasks automatically--such as starting or restarting containers, scaling the number of replicas of a given application, and more. The Kubernetes Control Plane consists of a collection of processes running on your cluster:
|
||||
|
||||
* The **Kubernetes Master** is a collection of three processes that run on a single node in your cluster, which is designated as the master node. Those processes are: [kube-apiserver](/docs/admin/kube-apiserver/), [kube-controller-manager](/docs/admin/kube-controller-manager/) and [kube-scheduler](/docs/admin/kube-scheduler/).
|
||||
* Each individual non-master node in your cluster runs two processes:
|
||||
* **[kubelet](/docs/admin/kubelet/)**, which communicates with the Kubernetes Master.
|
||||
* **[kube-proxy](/docs/admin/kube-proxy/)**, a network proxy which reflects Kubernetes networking services on each node.
|
||||
|
||||
## Kubernetes objects
|
||||
|
||||
Kubernetes contains a number of abstractions that represent the state of your system: deployed containerized applications and workloads, their associated network and disk resources, and other information about what your cluster is doing. These abstractions are represented by objects in the Kubernetes API. See [Understanding Kubernetes objects](/docs/concepts/overview/working-with-objects/kubernetes-objects/#kubernetes-objects) for more details.
|
||||
|
||||
The basic Kubernetes objects include:
|
||||
|
||||
* [Pod](/docs/concepts/workloads/pods/pod-overview/)
|
||||
* [Service](/docs/concepts/services-networking/service/)
|
||||
* [Volume](/docs/concepts/storage/volumes/)
|
||||
* [Namespace](/docs/concepts/overview/working-with-objects/namespaces/)
|
||||
|
||||
Kubernetes also contains higher-level abstractions that rely on [controllers](/docs/concepts/architecture/controller/) to build upon the basic objects, and provide additional functionality and convenience features. These include:
|
||||
|
||||
* [Deployment](/docs/concepts/workloads/controllers/deployment/)
|
||||
* [DaemonSet](/docs/concepts/workloads/controllers/daemonset/)
|
||||
* [StatefulSet](/docs/concepts/workloads/controllers/statefulset/)
|
||||
* [ReplicaSet](/docs/concepts/workloads/controllers/replicaset/)
|
||||
* [Job](/docs/concepts/workloads/controllers/job/)
|
||||
|
||||
## Kubernetes Control Plane
|
||||
|
||||
The various parts of the Kubernetes Control Plane, such as the Kubernetes Master and kubelet processes, govern how Kubernetes communicates with your cluster. The Control Plane maintains a record of all of the Kubernetes Objects in the system, and runs continuous control loops to manage those objects' state. At any given time, the Control Plane's control loops will respond to changes in the cluster and work to make the actual state of all the objects in the system match the desired state that you provided.
|
||||
|
||||
For example, when you use the Kubernetes API to create a Deployment, you provide a new desired state for the system. The Kubernetes Control Plane records that object creation, and carries out your instructions by starting the required applications and scheduling them to cluster nodes--thus making the cluster's actual state match the desired state.
|
||||
|
||||
### Kubernetes Master
|
||||
|
||||
The Kubernetes master is responsible for maintaining the desired state for your cluster. When you interact with Kubernetes, such as by using the `kubectl` command-line interface, you're communicating with your cluster's Kubernetes master.
|
||||
|
||||
> The "master" refers to a collection of processes managing the cluster state. Typically all these processes run on a single node in the cluster, and this node is also referred to as the master. The master can also be replicated for availability and redundancy.
|
||||
|
||||
### Kubernetes Nodes
|
||||
|
||||
The nodes in a cluster are the machines (VMs, physical servers, etc) that run your applications and cloud workflows. The Kubernetes master controls each node; you'll rarely interact with nodes directly.
|
||||
|
||||
|
||||
|
||||
|
||||
## {{% heading "whatsnext" %}}
|
||||
|
||||
|
||||
If you would like to write a concept page, see
|
||||
[Page Content Types](/docs/home/contribute/style/page-content-types/#concept)
|
||||
for information about the concept page types.
|
||||
|
||||
|
||||
|
|
|
@ -1,5 +1,7 @@
|
|||
---
|
||||
title: "Cluster Architecture"
|
||||
weight: 30
|
||||
description: >
|
||||
The architectural concepts behind Kubernetes.
|
||||
---
|
||||
|
||||
|
|
|
@ -1,5 +1,74 @@
|
|||
---
|
||||
title: "Cluster Administration"
|
||||
title: Cluster Administration
|
||||
reviewers:
|
||||
- davidopp
|
||||
- lavalamp
|
||||
weight: 100
|
||||
content_type: concept
|
||||
description: >
|
||||
Lower-level detail relevant to creating or administering a Kubernetes cluster.
|
||||
---
|
||||
|
||||
<!-- overview -->
|
||||
The cluster administration overview is for anyone creating or administering a Kubernetes cluster.
|
||||
It assumes some familiarity with core Kubernetes [concepts](/docs/concepts/).
|
||||
|
||||
|
||||
<!-- body -->
|
||||
## Planning a cluster
|
||||
|
||||
See the guides in [Setup](/docs/setup/) for examples of how to plan, set up, and configure Kubernetes clusters. The solutions listed in this article are called *distros*.
|
||||
|
||||
{{< note >}}
|
||||
Not all distros are actively maintained. Choose distros which have been tested with a recent version of Kubernetes.
|
||||
{{< /note >}}
|
||||
|
||||
Before choosing a guide, here are some considerations:
|
||||
|
||||
- Do you just want to try out Kubernetes on your computer, or do you want to build a high-availability, multi-node cluster? Choose distros best suited for your needs.
|
||||
- Will you be using **a hosted Kubernetes cluster**, such as [Google Kubernetes Engine](https://cloud.google.com/kubernetes-engine/), or **hosting your own cluster**?
|
||||
- Will your cluster be **on-premises**, or **in the cloud (IaaS)**? Kubernetes does not directly support hybrid clusters. Instead, you can set up multiple clusters.
|
||||
- **If you are configuring Kubernetes on-premises**, consider which [networking model](/docs/concepts/cluster-administration/networking/) fits best.
|
||||
- Will you be running Kubernetes on **"bare metal" hardware** or on **virtual machines (VMs)**?
|
||||
- Do you **just want to run a cluster**, or do you expect to do **active development of Kubernetes project code**? If the
|
||||
latter, choose an actively-developed distro. Some distros only use binary releases, but
|
||||
offer a greater variety of choices.
|
||||
- Familiarize yourself with the [components](/docs/admin/cluster-components/) needed to run a cluster.
|
||||
|
||||
|
||||
## Managing a cluster
|
||||
|
||||
* [Managing a cluster](/docs/tasks/administer-cluster/cluster-management/) describes several topics related to the lifecycle of a cluster: creating a new cluster, upgrading your cluster’s master and worker nodes, performing node maintenance (e.g. kernel upgrades), and upgrading the Kubernetes API version of a running cluster.
|
||||
|
||||
* Learn how to [manage nodes](/docs/concepts/nodes/node/).
|
||||
|
||||
* Learn how to set up and manage the [resource quota](/docs/concepts/policy/resource-quotas/) for shared clusters.
|
||||
|
||||
## Securing a cluster
|
||||
|
||||
* [Certificates](/docs/concepts/cluster-administration/certificates/) describes the steps to generate certificates using different tool chains.
|
||||
|
||||
* [Kubernetes Container Environment](/docs/concepts/containers/container-environment/) describes the environment for Kubelet managed containers on a Kubernetes node.
|
||||
|
||||
* [Controlling Access to the Kubernetes API](/docs/reference/access-authn-authz/controlling-access/) describes how to set up permissions for users and service accounts.
|
||||
|
||||
* [Authenticating](/docs/reference/access-authn-authz/authentication/) explains authentication in Kubernetes, including the various authentication options.
|
||||
|
||||
* [Authorization](/docs/reference/access-authn-authz/authorization/) is separate from authentication, and controls how HTTP calls are handled.
|
||||
|
||||
* [Using Admission Controllers](/docs/reference/access-authn-authz/admission-controllers/) explains plug-ins which intercepts requests to the Kubernetes API server after authentication and authorization.
|
||||
|
||||
* [Using Sysctls in a Kubernetes Cluster](/docs/concepts/cluster-administration/sysctl-cluster/) describes to an administrator how to use the `sysctl` command-line tool to set kernel parameters .
|
||||
|
||||
* [Auditing](/docs/tasks/debug-application-cluster/audit/) describes how to interact with Kubernetes' audit logs.
|
||||
|
||||
### Securing the kubelet
|
||||
* [Master-Node communication](/docs/concepts/architecture/master-node-communication/)
|
||||
* [TLS bootstrapping](/docs/reference/command-line-tools-reference/kubelet-tls-bootstrapping/)
|
||||
* [Kubelet authentication/authorization](/docs/admin/kubelet-authentication-authorization/)
|
||||
|
||||
## Optional Cluster Services
|
||||
|
||||
* [DNS Integration](/docs/concepts/services-networking/dns-pod-service/) describes how to resolve a DNS name directly to a Kubernetes service.
|
||||
|
||||
* [Logging and Monitoring Cluster Activity](/docs/concepts/cluster-administration/logging/) explains how logging in Kubernetes works and how to implement it.
|
||||
|
|
|
@ -99,7 +99,7 @@ Different settings can be applied to a load balancer service in AWS using _annot
|
|||
* `service.beta.kubernetes.io/aws-load-balancer-connection-draining-timeout`: Used on the service to specify a connection draining timeout.
|
||||
* `service.beta.kubernetes.io/aws-load-balancer-connection-idle-timeout`: Used on the service to specify the idle connection timeout.
|
||||
* `service.beta.kubernetes.io/aws-load-balancer-cross-zone-load-balancing-enabled`: Used on the service to enable or disable cross-zone load balancing.
|
||||
* `service.beta.kubernetes.io/aws-load-balancer-security-groups`: Used to specify the security groups to be added to ELB created. This replaces all other security groups previously assigned to the ELB.
|
||||
* `service.beta.kubernetes.io/aws-load-balancer-security-groups`: Used to specify the security groups to be added to ELB created. This replaces all other security groups previously assigned to the ELB. Security groups defined here should not be shared between services.
|
||||
* `service.beta.kubernetes.io/aws-load-balancer-extra-security-groups`: Used on the service to specify additional security groups to be added to ELB created
|
||||
* `service.beta.kubernetes.io/aws-load-balancer-internal`: Used on the service to indicate that we want an internal ELB.
|
||||
* `service.beta.kubernetes.io/aws-load-balancer-proxy-protocol`: Used on the service to enable the proxy protocol on an ELB. Right now we only accept the value `*` which means enabling the proxy protocol on all ELB backends. In the future we could adjust this to allow setting the proxy protocol only on certain backends.
|
||||
|
@ -433,4 +433,4 @@ Alibaba Cloud does not require the format of node name, but the kubelet needs to
|
|||
|
||||
### Load Balancers
|
||||
|
||||
You can setup external load balancers to use specific features in Alibaba Cloud by configuring the [annotations](https://www.alibabacloud.com/help/en/doc-detail/86531.htm) .
|
||||
You can setup external load balancers to use specific features in Alibaba Cloud by configuring the [annotations](https://www.alibabacloud.com/help/en/doc-detail/86531.htm) .
|
||||
|
|
|
@ -1,73 +0,0 @@
|
|||
---
|
||||
reviewers:
|
||||
- davidopp
|
||||
- lavalamp
|
||||
title: Cluster Administration Overview
|
||||
content_type: concept
|
||||
weight: 10
|
||||
---
|
||||
|
||||
<!-- overview -->
|
||||
The cluster administration overview is for anyone creating or administering a Kubernetes cluster.
|
||||
It assumes some familiarity with core Kubernetes [concepts](/docs/concepts/).
|
||||
|
||||
|
||||
<!-- body -->
|
||||
## Planning a cluster
|
||||
|
||||
See the guides in [Setup](/docs/setup/) for examples of how to plan, set up, and configure Kubernetes clusters. The solutions listed in this article are called *distros*.
|
||||
|
||||
Before choosing a guide, here are some considerations:
|
||||
|
||||
- Do you just want to try out Kubernetes on your computer, or do you want to build a high-availability, multi-node cluster? Choose distros best suited for your needs.
|
||||
- Will you be using **a hosted Kubernetes cluster**, such as [Google Kubernetes Engine](https://cloud.google.com/kubernetes-engine/), or **hosting your own cluster**?
|
||||
- Will your cluster be **on-premises**, or **in the cloud (IaaS)**? Kubernetes does not directly support hybrid clusters. Instead, you can set up multiple clusters.
|
||||
- **If you are configuring Kubernetes on-premises**, consider which [networking model](/docs/concepts/cluster-administration/networking/) fits best.
|
||||
- Will you be running Kubernetes on **"bare metal" hardware** or on **virtual machines (VMs)**?
|
||||
- Do you **just want to run a cluster**, or do you expect to do **active development of Kubernetes project code**? If the
|
||||
latter, choose an actively-developed distro. Some distros only use binary releases, but
|
||||
offer a greater variety of choices.
|
||||
- Familiarize yourself with the [components](/docs/admin/cluster-components/) needed to run a cluster.
|
||||
|
||||
Note: Not all distros are actively maintained. Choose distros which have been tested with a recent version of Kubernetes.
|
||||
|
||||
## Managing a cluster
|
||||
|
||||
* [Managing a cluster](/docs/tasks/administer-cluster/cluster-management/) describes several topics related to the lifecycle of a cluster: creating a new cluster, upgrading your cluster’s master and worker nodes, performing node maintenance (e.g. kernel upgrades), and upgrading the Kubernetes API version of a running cluster.
|
||||
|
||||
* Learn how to [manage nodes](/docs/concepts/nodes/node/).
|
||||
|
||||
* Learn how to set up and manage the [resource quota](/docs/concepts/policy/resource-quotas/) for shared clusters.
|
||||
|
||||
## Securing a cluster
|
||||
|
||||
* [Certificates](/docs/concepts/cluster-administration/certificates/) describes the steps to generate certificates using different tool chains.
|
||||
|
||||
* [Kubernetes Container Environment](/docs/concepts/containers/container-environment/) describes the environment for Kubelet managed containers on a Kubernetes node.
|
||||
|
||||
* [Controlling Access to the Kubernetes API](/docs/reference/access-authn-authz/controlling-access/) describes how to set up permissions for users and service accounts.
|
||||
|
||||
* [Authenticating](/docs/reference/access-authn-authz/authentication/) explains authentication in Kubernetes, including the various authentication options.
|
||||
|
||||
* [Authorization](/docs/reference/access-authn-authz/authorization/) is separate from authentication, and controls how HTTP calls are handled.
|
||||
|
||||
* [Using Admission Controllers](/docs/reference/access-authn-authz/admission-controllers/) explains plug-ins which intercepts requests to the Kubernetes API server after authentication and authorization.
|
||||
|
||||
* [Using Sysctls in a Kubernetes Cluster](/docs/concepts/cluster-administration/sysctl-cluster/) describes to an administrator how to use the `sysctl` command-line tool to set kernel parameters .
|
||||
|
||||
* [Auditing](/docs/tasks/debug-application-cluster/audit/) describes how to interact with Kubernetes' audit logs.
|
||||
|
||||
### Securing the kubelet
|
||||
* [Master-Node communication](/docs/concepts/architecture/master-node-communication/)
|
||||
* [TLS bootstrapping](/docs/reference/command-line-tools-reference/kubelet-tls-bootstrapping/)
|
||||
* [Kubelet authentication/authorization](/docs/admin/kubelet-authentication-authorization/)
|
||||
|
||||
## Optional Cluster Services
|
||||
|
||||
* [DNS Integration](/docs/concepts/services-networking/dns-pod-service/) describes how to resolve a DNS name directly to a Kubernetes service.
|
||||
|
||||
* [Logging and Monitoring Cluster Activity](/docs/concepts/cluster-administration/logging/) explains how logging in Kubernetes works and how to implement it.
|
||||
|
||||
|
||||
|
||||
|
|
@ -1,5 +1,7 @@
|
|||
---
|
||||
title: "Configuration"
|
||||
weight: 80
|
||||
description: >
|
||||
Resources that Kubernetes provides for configuring Pods.
|
||||
---
|
||||
|
||||
|
|
|
@ -60,7 +60,7 @@ metadata:
|
|||
name: game-demo
|
||||
data:
|
||||
# property-like keys; each key maps to a simple value
|
||||
player_initial_lives: 3
|
||||
player_initial_lives: "3"
|
||||
ui_properties_file_name: "user-interface.properties"
|
||||
#
|
||||
# file-like keys
|
||||
|
@ -131,7 +131,7 @@ spec:
|
|||
|
||||
A ConfigMap doesn't differentiate between single line property values and
|
||||
multi-line file-like values.
|
||||
What matters how Pods and other objects consume those values.
|
||||
What matters is how Pods and other objects consume those values.
|
||||
For this example, defining a volume and mounting it inside the `demo`
|
||||
container as `/config` creates four files:
|
||||
|
||||
|
@ -168,7 +168,7 @@ ConfigMaps can hold data that other parts of the system should use for configura
|
|||
To consume a ConfigMap in a volume in a Pod:
|
||||
|
||||
1. Create a config map or use an existing one. Multiple Pods can reference the same config map.
|
||||
1. Modify your Pod definition to add a volume under `.spec.volumes[]`. Name the volume anything, and have a `.spec.volumes[].configmap.localObjectReference` field set to reference your ConfigMap object.
|
||||
1. Modify your Pod definition to add a volume under `.spec.volumes[]`. Name the volume anything, and have a `.spec.volumes[].configMap.name` field set to reference your ConfigMap object.
|
||||
1. Add a `.spec.containers[].volumeMounts[]` to each container that needs the config map. Specify `.spec.containers[].volumeMounts[].readOnly = true` and `.spec.containers[].volumeMounts[].mountPath` to an unused directory name where you would like the config map to appear.
|
||||
1. Modify your image or command line so that the program looks for files in that directory. Each key in the config map `data` map becomes the filename under `mountPath`.
|
||||
|
||||
|
|
|
@ -103,7 +103,7 @@ The caching semantics of the underlying image provider make even `imagePullPolic
|
|||
|
||||
- Use label selectors for `get` and `delete` operations instead of specific object names. See the sections on [label selectors](/docs/concepts/overview/working-with-objects/labels/#label-selectors) and [using labels effectively](/docs/concepts/cluster-administration/manage-deployment/#using-labels-effectively).
|
||||
|
||||
- Use `kubectl run` and `kubectl expose` to quickly create single-container Deployments and Services. See [Use a Service to Access an Application in a Cluster](/docs/tasks/access-application-cluster/service-access-application-cluster/) for an example.
|
||||
- Use `kubectl create deployment` and `kubectl expose` to quickly create single-container Deployments and Services. See [Use a Service to Access an Application in a Cluster](/docs/tasks/access-application-cluster/service-access-application-cluster/) for an example.
|
||||
|
||||
|
||||
|
||||
|
|
|
@ -1,5 +1,44 @@
|
|||
---
|
||||
title: "Containers"
|
||||
title: Containers
|
||||
weight: 40
|
||||
description: Technology for packaging an application along with its runtime dependencies.
|
||||
reviewers:
|
||||
- erictune
|
||||
- thockin
|
||||
content_type: concept
|
||||
---
|
||||
|
||||
<!-- overview -->
|
||||
|
||||
Each container that you run is repeatable; the standardization from having
|
||||
dependencies included means that you get the same behavior wherever you
|
||||
run it.
|
||||
|
||||
Containers decouple applications from underlying host infrastructure.
|
||||
This makes deployment easier in different cloud or OS environments.
|
||||
|
||||
|
||||
|
||||
|
||||
<!-- body -->
|
||||
|
||||
## Container images
|
||||
A [container image](/docs/concepts/containers/images/) is a ready-to-run
|
||||
software package, containing everything needed to run an application:
|
||||
the code and any runtime it requires, application and system libraries,
|
||||
and default values for any essential settings.
|
||||
|
||||
By design, a container is immutable: you cannot change the code of a
|
||||
container that is already running. If you have a containerized application
|
||||
and want to make changes, you need to build a new container that includes
|
||||
the change, then recreate the container to start from the updated image.
|
||||
|
||||
## Container runtimes
|
||||
|
||||
{{< glossary_definition term_id="container-runtime" length="all" >}}
|
||||
|
||||
## {{% heading "whatsnext" %}}
|
||||
|
||||
* Read about [container images](/docs/concepts/containers/images/)
|
||||
* Read about [Pods](/docs/concepts/workloads/pods/)
|
||||
|
||||
|
|
|
@ -10,7 +10,7 @@ weight: 10
|
|||
<!-- overview -->
|
||||
|
||||
A container image represents binary data that encapsulates an application and all its
|
||||
software depencies. Container images are executable software bundles that can run
|
||||
software dependencies. Container images are executable software bundles that can run
|
||||
standalone and that make very well defined assumptions about their runtime environment.
|
||||
|
||||
You typically create a container image of your application and push it to a registry
|
||||
|
@ -61,6 +61,8 @@ you can do one of the following:
|
|||
- omit the `imagePullPolicy` and the tag for the image to use.
|
||||
- enable the [AlwaysPullImages](/docs/reference/access-authn-authz/admission-controllers/#alwayspullimages) admission controller.
|
||||
|
||||
When `imagePullPolicy` is defined without a specific value, it is also set to `Always`.
|
||||
|
||||
## Multi-architecture Images with Manifests
|
||||
|
||||
As well as providing binary images, a container registry can also server a [container image manifest](https://github.com/opencontainers/image-spec/blob/master/manifest.md). A manifest can reference image manifests for architecture-specific versions of an container. The idea is that you can have a name for an image (for example: `pause`, `example/mycontainer`, `kube-apiserver`) and allow different systems to fetch the right binary image for the machine architecture they are using.
|
||||
|
@ -89,7 +91,7 @@ These options are explaind in more detail below.
|
|||
### Configuring Nodes to authenticate to a Private Registry
|
||||
|
||||
If you run Docker on your nodes, you can configure the Docker container
|
||||
runtuime to authenticate to a private container registry.
|
||||
runtime to authenticate to a private container registry.
|
||||
|
||||
This approach is suitable if you can control node configuration.
|
||||
|
||||
|
|
|
@ -1,46 +0,0 @@
|
|||
---
|
||||
reviewers:
|
||||
- erictune
|
||||
- thockin
|
||||
title: Containers overview
|
||||
content_type: concept
|
||||
weight: 1
|
||||
---
|
||||
|
||||
<!-- overview -->
|
||||
|
||||
Containers are a technology for packaging the (compiled) code for an
|
||||
application along with the dependencies it needs at run time. Each
|
||||
container that you run is repeatable; the standardization from having
|
||||
dependencies included means that you get the same behavior wherever you
|
||||
run it.
|
||||
|
||||
Containers decouple applications from underlying host infrastructure.
|
||||
This makes deployment easier in different cloud or OS environments.
|
||||
|
||||
|
||||
|
||||
|
||||
<!-- body -->
|
||||
|
||||
## Container images
|
||||
A [container image](/docs/concepts/containers/images/) is a ready-to-run
|
||||
software package, containing everything needed to run an application:
|
||||
the code and any runtime it requires, application and system libraries,
|
||||
and default values for any essential settings.
|
||||
|
||||
By design, a container is immutable: you cannot change the code of a
|
||||
container that is already running. If you have a containerized application
|
||||
and want to make changes, you need to build a new container that includes
|
||||
the change, then recreate the container to start from the updated image.
|
||||
|
||||
## Container runtimes
|
||||
|
||||
{{< glossary_definition term_id="container-runtime" length="all" >}}
|
||||
|
||||
|
||||
## {{% heading "whatsnext" %}}
|
||||
|
||||
* Read about [container images](/docs/concepts/containers/images/)
|
||||
* Read about [Pods](/docs/concepts/workloads/pods/)
|
||||
|
|
@ -1,4 +1,212 @@
|
|||
---
|
||||
title: Extending Kubernetes
|
||||
weight: 110
|
||||
description: Different ways to change the behavior of your Kubernetes cluster.
|
||||
reviewers:
|
||||
- erictune
|
||||
- lavalamp
|
||||
- cheftako
|
||||
- chenopis
|
||||
content_type: concept
|
||||
---
|
||||
|
||||
<!-- overview -->
|
||||
|
||||
Kubernetes is highly configurable and extensible. As a result,
|
||||
there is rarely a need to fork or submit patches to the Kubernetes
|
||||
project code.
|
||||
|
||||
This guide describes the options for customizing a Kubernetes
|
||||
cluster. It is aimed at {{< glossary_tooltip text="cluster operators" term_id="cluster-operator" >}} who want to
|
||||
understand how to adapt their Kubernetes cluster to the needs of
|
||||
their work environment. Developers who are prospective {{< glossary_tooltip text="Platform Developers" term_id="platform-developer" >}} or Kubernetes Project {{< glossary_tooltip text="Contributors" term_id="contributor" >}} will also find it
|
||||
useful as an introduction to what extension points and patterns
|
||||
exist, and their trade-offs and limitations.
|
||||
|
||||
|
||||
|
||||
|
||||
<!-- body -->
|
||||
|
||||
## Overview
|
||||
|
||||
Customization approaches can be broadly divided into *configuration*, which only involves changing flags, local configuration files, or API resources; and *extensions*, which involve running additional programs or services. This document is primarily about extensions.
|
||||
|
||||
## Configuration
|
||||
|
||||
*Configuration files* and *flags* are documented in the Reference section of the online documentation, under each binary:
|
||||
|
||||
* [kubelet](/docs/admin/kubelet/)
|
||||
* [kube-apiserver](/docs/admin/kube-apiserver/)
|
||||
* [kube-controller-manager](/docs/admin/kube-controller-manager/)
|
||||
* [kube-scheduler](/docs/admin/kube-scheduler/).
|
||||
|
||||
Flags and configuration files may not always be changeable in a hosted Kubernetes service or a distribution with managed installation. When they are changeable, they are usually only changeable by the cluster administrator. Also, they are subject to change in future Kubernetes versions, and setting them may require restarting processes. For those reasons, they should be used only when there are no other options.
|
||||
|
||||
*Built-in Policy APIs*, such as [ResourceQuota](/docs/concepts/policy/resource-quotas/), [PodSecurityPolicies](/docs/concepts/policy/pod-security-policy/), [NetworkPolicy](/docs/concepts/services-networking/network-policies/) and Role-based Access Control ([RBAC](/docs/reference/access-authn-authz/rbac/)), are built-in Kubernetes APIs. APIs are typically used with hosted Kubernetes services and with managed Kubernetes installations. They are declarative and use the same conventions as other Kubernetes resources like pods, so new cluster configuration can be repeatable and be managed the same way as applications. And, where they are stable, they enjoy a [defined support policy](/docs/reference/deprecation-policy/) like other Kubernetes APIs. For these reasons, they are preferred over *configuration files* and *flags* where suitable.
|
||||
|
||||
## Extensions
|
||||
|
||||
Extensions are software components that extend and deeply integrate with Kubernetes.
|
||||
They adapt it to support new types and new kinds of hardware.
|
||||
|
||||
Most cluster administrators will use a hosted or distribution
|
||||
instance of Kubernetes. As a result, most Kubernetes users will not need to
|
||||
install extensions and fewer will need to author new ones.
|
||||
|
||||
## Extension Patterns
|
||||
|
||||
Kubernetes is designed to be automated by writing client programs. Any
|
||||
program that reads and/or writes to the Kubernetes API can provide useful
|
||||
automation. *Automation* can run on the cluster or off it. By following
|
||||
the guidance in this doc you can write highly available and robust automation.
|
||||
Automation generally works with any Kubernetes cluster, including hosted
|
||||
clusters and managed installations.
|
||||
|
||||
There is a specific pattern for writing client programs that work well with
|
||||
Kubernetes called the *Controller* pattern. Controllers typically read an
|
||||
object's `.spec`, possibly do things, and then update the object's `.status`.
|
||||
|
||||
A controller is a client of Kubernetes. When Kubernetes is the client and
|
||||
calls out to a remote service, it is called a *Webhook*. The remote service
|
||||
is called a *Webhook Backend*. Like Controllers, Webhooks do add a point of
|
||||
failure.
|
||||
|
||||
In the webhook model, Kubernetes makes a network request to a remote service.
|
||||
In the *Binary Plugin* model, Kubernetes executes a binary (program).
|
||||
Binary plugins are used by the kubelet (e.g. [Flex Volume
|
||||
Plugins](https://github.com/kubernetes/community/blob/master/contributors/devel/sig-storage/flexvolume.md)
|
||||
and [Network
|
||||
Plugins](/docs/concepts/cluster-administration/network-plugins/))
|
||||
and by kubectl.
|
||||
|
||||
Below is a diagram showing how the extension points interact with the
|
||||
Kubernetes control plane.
|
||||
|
||||
<img src="https://docs.google.com/drawings/d/e/2PACX-1vQBRWyXLVUlQPlp7BvxvV9S1mxyXSM6rAc_cbLANvKlu6kCCf-kGTporTMIeG5GZtUdxXz1xowN7RmL/pub?w=960&h=720">
|
||||
|
||||
<!-- image source drawing https://docs.google.com/drawings/d/1muJ7Oxuj_7Gtv7HV9-2zJbOnkQJnjxq-v1ym_kZfB-4/edit?ts=5a01e054 -->
|
||||
|
||||
|
||||
## Extension Points
|
||||
|
||||
This diagram shows the extension points in a Kubernetes system.
|
||||
|
||||
<img src="https://docs.google.com/drawings/d/e/2PACX-1vSH5ZWUO2jH9f34YHenhnCd14baEb4vT-pzfxeFC7NzdNqRDgdz4DDAVqArtH4onOGqh0bhwMX0zGBb/pub?w=425&h=809">
|
||||
|
||||
<!-- image source diagrams: https://docs.google.com/drawings/d/1k2YdJgNTtNfW7_A8moIIkij-DmVgEhNrn3y2OODwqQQ/view -->
|
||||
|
||||
1. Users often interact with the Kubernetes API using `kubectl`. [Kubectl plugins](/docs/tasks/extend-kubectl/kubectl-plugins/) extend the kubectl binary. They only affect the individual user's local environment, and so cannot enforce site-wide policies.
|
||||
2. The apiserver handles all requests. Several types of extension points in the apiserver allow authenticating requests, or blocking them based on their content, editing content, and handling deletion. These are described in the [API Access Extensions](/docs/concepts/overview/extending#api-access-extensions) section.
|
||||
3. The apiserver serves various kinds of *resources*. *Built-in resource kinds*, like `pods`, are defined by the Kubernetes project and can't be changed. You can also add resources that you define, or that other projects have defined, called *Custom Resources*, as explained in the [Custom Resources](/docs/concepts/overview/extending#user-defined-types) section. Custom Resources are often used with API Access Extensions.
|
||||
4. The Kubernetes scheduler decides which nodes to place pods on. There are several ways to extend scheduling. These are described in the [Scheduler Extensions](/docs/concepts/overview/extending#scheduler-extensions) section.
|
||||
5. Much of the behavior of Kubernetes is implemented by programs called Controllers which are clients of the API-Server. Controllers are often used in conjunction with Custom Resources.
|
||||
6. The kubelet runs on servers, and helps pods appear like virtual servers with their own IPs on the cluster network. [Network Plugins](/docs/concepts/overview/extending#network-plugins) allow for different implementations of pod networking.
|
||||
7. The kubelet also mounts and unmounts volumes for containers. New types of storage can be supported via [Storage Plugins](/docs/concepts/overview/extending#storage-plugins).
|
||||
|
||||
If you are unsure where to start, this flowchart can help. Note that some solutions may involve several types of extensions.
|
||||
|
||||
|
||||
<img src="https://docs.google.com/drawings/d/e/2PACX-1vRWXNNIVWFDqzDY0CsKZJY3AR8sDeFDXItdc5awYxVH8s0OLherMlEPVUpxPIB1CSUu7GPk7B2fEnzM/pub?w=1440&h=1080">
|
||||
|
||||
<!-- image source drawing: https://docs.google.com/drawings/d/1sdviU6lDz4BpnzJNHfNpQrqI9F19QZ07KnhnxVrp2yg/edit -->
|
||||
|
||||
## API Extensions
|
||||
### User-Defined Types
|
||||
|
||||
Consider adding a Custom Resource to Kubernetes if you want to define new controllers, application configuration objects or other declarative APIs, and to manage them using Kubernetes tools, such as `kubectl`.
|
||||
|
||||
Do not use a Custom Resource as data storage for application, user, or monitoring data.
|
||||
|
||||
For more about Custom Resources, see the [Custom Resources concept guide](/docs/concepts/api-extension/custom-resources/).
|
||||
|
||||
|
||||
### Combining New APIs with Automation
|
||||
|
||||
The combination of a custom resource API and a control loop is called the [Operator pattern](/docs/concepts/extend-kubernetes/operator/). The Operator pattern is used to manage specific, usually stateful, applications. These custom APIs and control loops can also be used to control other resources, such as storage or policies.
|
||||
|
||||
### Changing Built-in Resources
|
||||
|
||||
When you extend the Kubernetes API by adding custom resources, the added resources always fall into a new API Groups. You cannot replace or change existing API groups.
|
||||
Adding an API does not directly let you affect the behavior of existing APIs (e.g. Pods), but API Access Extensions do.
|
||||
|
||||
|
||||
### API Access Extensions
|
||||
|
||||
When a request reaches the Kubernetes API Server, it is first Authenticated, then Authorized, then subject to various types of Admission Control. See [Controlling Access to the Kubernetes API](/docs/reference/access-authn-authz/controlling-access/) for more on this flow.
|
||||
|
||||
Each of these steps offers extension points.
|
||||
|
||||
Kubernetes has several built-in authentication methods that it supports. It can also sit behind an authenticating proxy, and it can send a token from an Authorization header to a remote service for verification (a webhook). All of these methods are covered in the [Authentication documentation](/docs/reference/access-authn-authz/authentication/).
|
||||
|
||||
### Authentication
|
||||
|
||||
[Authentication](/docs/reference/access-authn-authz/authentication/) maps headers or certificates in all requests to a username for the client making the request.
|
||||
|
||||
Kubernetes provides several built-in authentication methods, and an [Authentication webhook](/docs/reference/access-authn-authz/authentication/#webhook-token-authentication) method if those don't meet your needs.
|
||||
|
||||
|
||||
### Authorization
|
||||
|
||||
[Authorization](/docs/reference/access-authn-authz/webhook/) determines whether specific users can read, write, and do other operations on API resources. It just works at the level of whole resources -- it doesn't discriminate based on arbitrary object fields. If the built-in authorization options don't meet your needs, and [Authorization webhook](/docs/reference/access-authn-authz/webhook/) allows calling out to user-provided code to make an authorization decision.
|
||||
|
||||
|
||||
### Dynamic Admission Control
|
||||
|
||||
After a request is authorized, if it is a write operation, it also goes through [Admission Control](/docs/reference/access-authn-authz/admission-controllers/) steps. In addition to the built-in steps, there are several extensions:
|
||||
|
||||
* The [Image Policy webhook](/docs/reference/access-authn-authz/admission-controllers/#imagepolicywebhook) restricts what images can be run in containers.
|
||||
* To make arbitrary admission control decisions, a general [Admission webhook](/docs/reference/access-authn-authz/extensible-admission-controllers/#admission-webhooks) can be used. Admission Webhooks can reject creations or updates.
|
||||
|
||||
## Infrastructure Extensions
|
||||
|
||||
|
||||
### Storage Plugins
|
||||
|
||||
[Flex Volumes](https://github.com/kubernetes/community/blob/master/contributors/design-proposals/storage/flexvolume-deployment.md
|
||||
) allow users to mount volume types without built-in support by having the
|
||||
Kubelet call a Binary Plugin to mount the volume.
|
||||
|
||||
|
||||
### Device Plugins
|
||||
|
||||
Device plugins allow a node to discover new Node resources (in addition to the
|
||||
builtin ones like cpu and memory) via a [Device
|
||||
Plugin](/docs/concepts/cluster-administration/device-plugins/).
|
||||
|
||||
|
||||
### Network Plugins
|
||||
|
||||
Different networking fabrics can be supported via node-level [Network Plugins](/docs/admin/network-plugins/).
|
||||
|
||||
### Scheduler Extensions
|
||||
|
||||
The scheduler is a special type of controller that watches pods, and assigns
|
||||
pods to nodes. The default scheduler can be replaced entirely, while
|
||||
continuing to use other Kubernetes components, or [multiple
|
||||
schedulers](/docs/tasks/administer-cluster/configure-multiple-schedulers/)
|
||||
can run at the same time.
|
||||
|
||||
This is a significant undertaking, and almost all Kubernetes users find they
|
||||
do not need to modify the scheduler.
|
||||
|
||||
The scheduler also supports a
|
||||
[webhook](https://github.com/kubernetes/community/blob/master/contributors/design-proposals/scheduling/scheduler_extender.md)
|
||||
that permits a webhook backend (scheduler extension) to filter and prioritize
|
||||
the nodes chosen for a pod.
|
||||
|
||||
|
||||
|
||||
|
||||
## {{% heading "whatsnext" %}}
|
||||
|
||||
|
||||
* Learn more about [Custom Resources](/docs/concepts/api-extension/custom-resources/)
|
||||
* Learn about [Dynamic admission control](/docs/reference/access-authn-authz/extensible-admission-controllers/)
|
||||
* Learn more about Infrastructure extensions
|
||||
* [Network Plugins](/docs/concepts/cluster-administration/network-plugins/)
|
||||
* [Device Plugins](/docs/concepts/cluster-administration/device-plugins/)
|
||||
* Learn about [kubectl plugins](/docs/tasks/extend-kubectl/kubectl-plugins/)
|
||||
* Learn about the [Operator pattern](/docs/concepts/extend-kubernetes/operator/)
|
||||
|
||||
|
||||
|
|
|
@ -178,7 +178,7 @@ Aggregated APIs offer more advanced API features and customization of other feat
|
|||
|
||||
| Feature | Description | CRDs | Aggregated API |
|
||||
| ------- | ----------- | ---- | -------------- |
|
||||
| Validation | Help users prevent errors and allow you to evolve your API independently of your clients. These features are most useful when there are many clients who can't all update at the same time. | Yes. Most validation can be specified in the CRD using [OpenAPI v3.0 validation](/docs/tasks/extend-kubernetes/extend-api-custom-resource-definitions/#validation). Any other validations supported by addition of a [Validating Webhook](/docs/reference/access-authn-authz/admission-controllers/#validatingadmissionwebhook-alpha-in-1-8-beta-in-1-9). | Yes, arbitrary validation checks |
|
||||
| Validation | Help users prevent errors and allow you to evolve your API independently of your clients. These features are most useful when there are many clients who can't all update at the same time. | Yes. Most validation can be specified in the CRD using [OpenAPI v3.0 validation](/docs/tasks/extend-kubernetes/custom-resources/custom-resource-definitions/#validation). Any other validations supported by addition of a [Validating Webhook](/docs/reference/access-authn-authz/admission-controllers/#validatingadmissionwebhook-alpha-in-1-8-beta-in-1-9). | Yes, arbitrary validation checks |
|
||||
| Defaulting | See above | Yes, either via [OpenAPI v3.0 validation](/docs/tasks/extend-kubernetes/custom-resources/custom-resource-definitions/#defaulting) `default` keyword (GA in 1.17), or via a [Mutating Webhook](/docs/reference/access-authn-authz/admission-controllers/#mutatingadmissionwebhook) (though this will not be run when reading from etcd for old objects). | Yes |
|
||||
| Multi-versioning | Allows serving the same object through two API versions. Can help ease API changes like renaming fields. Less important if you control your client versions. | [Yes](/docs/tasks/extend-kubernetes/custom-resources/custom-resource-definition-versioning) | Yes |
|
||||
| Custom Storage | If you need storage with a different performance mode (for example, a time-series database instead of key-value store) or isolation for security (for example, encryption of sensitive information, etc.) | No | Yes |
|
||||
|
|
|
@ -223,7 +223,7 @@ Here are some examples of device plugin implementations:
|
|||
* The [RDMA device plugin](https://github.com/hustcat/k8s-rdma-device-plugin)
|
||||
* The [Solarflare device plugin](https://github.com/vikaschoudhary16/sfc-device-plugin)
|
||||
* The [SR-IOV Network device plugin](https://github.com/intel/sriov-network-device-plugin)
|
||||
* The [Xilinx FPGA device plugins](https://github.com/Xilinx/FPGA_as_a_Service/tree/master/k8s-fpga-device-plugin/trunk) for Xilinx FPGA devices
|
||||
* The [Xilinx FPGA device plugins](https://github.com/Xilinx/FPGA_as_a_Service/tree/master/k8s-fpga-device-plugin) for Xilinx FPGA devices
|
||||
|
||||
|
||||
## {{% heading "whatsnext" %}}
|
||||
|
|
|
@ -50,7 +50,7 @@ Extensions are software components that extend and deeply integrate with Kuberne
|
|||
They adapt it to support new types and new kinds of hardware.
|
||||
|
||||
Most cluster administrators will use a hosted or distribution
|
||||
instance of Kubernetes. As a result, most Kubernetes users will need to
|
||||
instance of Kubernetes. As a result, most Kubernetes users will not need to
|
||||
install extensions and fewer will need to author new ones.
|
||||
|
||||
## Extension Patterns
|
||||
|
|
|
@ -1,4 +1,5 @@
|
|||
---
|
||||
title: "Overview"
|
||||
weight: 20
|
||||
---
|
||||
description: Get a high-level outline of Kubernetes and the components it is built from.
|
||||
---
|
||||
|
|
|
@ -3,6 +3,9 @@ reviewers:
|
|||
- lavalamp
|
||||
title: Kubernetes Components
|
||||
content_type: concept
|
||||
description: >
|
||||
A Kubernetes cluster consists of the components that represent the control plane
|
||||
and a set of machines called nodes.
|
||||
weight: 20
|
||||
card:
|
||||
name: concepts
|
||||
|
|
|
@ -4,6 +4,9 @@ reviewers:
|
|||
title: The Kubernetes API
|
||||
content_type: concept
|
||||
weight: 30
|
||||
description: >
|
||||
The Kubernetes API lets you query and manipulate the state of objects in Kubernetes.
|
||||
The core of Kubernetes' control plane is the API server and the HTTP API that it exposes. Users, the different parts of your cluster, and external components all communicate with one another through the API server.
|
||||
card:
|
||||
name: concepts
|
||||
weight: 30
|
||||
|
|
|
@ -74,7 +74,7 @@ Kubernetes lets you store and manage sensitive information, such as passwords, O
|
|||
|
||||
## What Kubernetes is not
|
||||
|
||||
Kubernetes is not a traditional, all-inclusive PaaS (Platform as a Service) system. Since Kubernetes operates at the container level rather than at the hardware level, it provides some generally applicable features common to PaaS offerings, such as deployment, scaling, load balancing, logging, and monitoring. However, Kubernetes is not monolithic, and these default solutions are optional and pluggable. Kubernetes provides the building blocks for building developer platforms, but preserves user choice and flexibility where it is important.
|
||||
Kubernetes is not a traditional, all-inclusive PaaS (Platform as a Service) system. Since Kubernetes operates at the container level rather than at the hardware level, it provides some generally applicable features common to PaaS offerings, such as deployment, scaling, load balancing, and lets users integrate their logging, monitoring, and alerting solutions. However, Kubernetes is not monolithic, and these default solutions are optional and pluggable. Kubernetes provides the building blocks for building developer platforms, but preserves user choice and flexibility where it is important.
|
||||
|
||||
Kubernetes:
|
||||
|
||||
|
|
|
@ -1,5 +1,7 @@
|
|||
---
|
||||
title: "Working with Kubernetes Objects"
|
||||
weight: 40
|
||||
description: >
|
||||
Kubernetes objects are persistent entities in the Kubernetes system. Kubernetes uses these entities to represent the state of your cluster.
|
||||
Learn about the Kubernetes object model and how to work with these objects.
|
||||
---
|
||||
|
||||
|
|
|
@ -43,6 +43,10 @@ resources within the same namespace.
|
|||
Creation and deletion of namespaces are described in the [Admin Guide documentation
|
||||
for namespaces](/docs/admin/namespaces).
|
||||
|
||||
{{< note >}}
|
||||
Avoid creating namespace with prefix `kube-`, since it is reserved for Kubernetes system namespaces.
|
||||
{{< /note >}}
|
||||
|
||||
### Viewing namespaces
|
||||
|
||||
You can list the current namespaces in a cluster using:
|
||||
|
|
|
@ -40,12 +40,6 @@ objects, it provides no history of previous configurations.
|
|||
|
||||
Run an instance of the nginx container by creating a Deployment object:
|
||||
|
||||
```sh
|
||||
kubectl run nginx --image nginx
|
||||
```
|
||||
|
||||
Do the same thing using a different syntax:
|
||||
|
||||
```sh
|
||||
kubectl create deployment nginx --image nginx
|
||||
```
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
---
|
||||
title: "Policies"
|
||||
weight: 90
|
||||
description: >
|
||||
Policies you can configure that apply to groups of resources.
|
||||
---
|
||||
|
||||
|
|
|
@ -1,5 +1,8 @@
|
|||
---
|
||||
title: "Scheduling and Eviction"
|
||||
weight: 90
|
||||
description: >
|
||||
In Kubernetes, scheduling refers to making sure that Pods are matched to Nodes so that the kubelet can run them.
|
||||
Eviction is the process of proactively failing one or more Pods on resource-starved Nodes.
|
||||
---
|
||||
|
||||
|
|
|
@ -1,4 +1,6 @@
|
|||
---
|
||||
title: "Security"
|
||||
weight: 81
|
||||
description: >
|
||||
Concepts for keeping your cloud-native workload secure.
|
||||
---
|
||||
|
|
|
@ -1,5 +1,12 @@
|
|||
---
|
||||
title: "Services, Load Balancing, and Networking"
|
||||
weight: 60
|
||||
description: >
|
||||
Concepts and resources behind networking in Kubernetes.
|
||||
---
|
||||
|
||||
Kubernetes networking addresses four concerns:
|
||||
- Containers within a Pod use networking to communicate via loopback.
|
||||
- Cluster networking provides communication between different Pods.
|
||||
- The Service resource lets you expose an application running in Pods to be reachable from outside your cluster.
|
||||
- You can also use Services to publish services only for consumption inside your cluster.
|
||||
|
|
|
@ -23,7 +23,7 @@ Modification not using HostAliases is not suggested because the file is managed
|
|||
Start an Nginx Pod which is assigned a Pod IP:
|
||||
|
||||
```shell
|
||||
kubectl run nginx --image nginx --generator=run-pod/v1
|
||||
kubectl run nginx --image nginx
|
||||
```
|
||||
|
||||
```
|
||||
|
@ -64,14 +64,14 @@ By default, the `hosts` file only includes IPv4 and IPv6 boilerplates like
|
|||
## Adding additional entries with hostAliases
|
||||
|
||||
In addition to the default boilerplate, you can add additional entries to the
|
||||
`hosts` file.
|
||||
`hosts` file.
|
||||
For example: to resolve `foo.local`, `bar.local` to `127.0.0.1` and `foo.remote`,
|
||||
`bar.remote` to `10.1.2.3`, you can configure HostAliases for a Pod under
|
||||
`.spec.hostAliases`:
|
||||
|
||||
{{< codenew file="service/networking/hostaliases-pod.yaml" >}}
|
||||
|
||||
Yoyu can start a Pod with that configuration by running:
|
||||
You can start a Pod with that configuration by running:
|
||||
|
||||
```shell
|
||||
kubectl apply -f https://k8s.io/examples/service/networking/hostaliases-pod.yaml
|
||||
|
|
|
@ -1,5 +1,7 @@
|
|||
---
|
||||
title: "Storage"
|
||||
weight: 70
|
||||
description: >
|
||||
Ways to provide both long-term and temporary storage to Pods in your cluster.
|
||||
---
|
||||
|
||||
|
|
|
@ -41,7 +41,7 @@ be updated once they are created.
|
|||
|
||||
Administrators can specify a default StorageClass just for PVCs that don't
|
||||
request any particular class to bind to: see the
|
||||
[PersistentVolumeClaim section](/docs/concepts/storage/persistent-volumes/#class-1)
|
||||
[PersistentVolumeClaim section](/docs/concepts/storage/persistent-volumes/#persistentvolumeclaims)
|
||||
for details.
|
||||
|
||||
```yaml
|
||||
|
|
|
@ -13,7 +13,7 @@ weight: 30
|
|||
|
||||
<!-- overview -->
|
||||
|
||||
This document describes the concept of `VolumeSnapshotClass` in Kubernetes. Familiarity
|
||||
This document describes the concept of VolumeSnapshotClass in Kubernetes. Familiarity
|
||||
with [volume snapshots](/docs/concepts/storage/volume-snapshots/) and
|
||||
[storage classes](/docs/concepts/storage/storage-classes) is suggested.
|
||||
|
||||
|
@ -24,30 +24,43 @@ with [volume snapshots](/docs/concepts/storage/volume-snapshots/) and
|
|||
|
||||
## Introduction
|
||||
|
||||
Just like `StorageClass` provides a way for administrators to describe the "classes"
|
||||
of storage they offer when provisioning a volume, `VolumeSnapshotClass` provides a
|
||||
Just like StorageClass provides a way for administrators to describe the "classes"
|
||||
of storage they offer when provisioning a volume, VolumeSnapshotClass provides a
|
||||
way to describe the "classes" of storage when provisioning a volume snapshot.
|
||||
|
||||
## The VolumeSnapshotClass Resource
|
||||
|
||||
Each `VolumeSnapshotClass` contains the fields `driver`, `deletionPolicy`, and `parameters`,
|
||||
which are used when a `VolumeSnapshot` belonging to the class needs to be
|
||||
Each VolumeSnapshotClass contains the fields `driver`, `deletionPolicy`, and `parameters`,
|
||||
which are used when a VolumeSnapshot belonging to the class needs to be
|
||||
dynamically provisioned.
|
||||
|
||||
The name of a `VolumeSnapshotClass` object is significant, and is how users can
|
||||
The name of a VolumeSnapshotClass object is significant, and is how users can
|
||||
request a particular class. Administrators set the name and other parameters
|
||||
of a class when first creating `VolumeSnapshotClass` objects, and the objects cannot
|
||||
of a class when first creating VolumeSnapshotClass objects, and the objects cannot
|
||||
be updated once they are created.
|
||||
|
||||
Administrators can specify a default `VolumeSnapshotClass` just for VolumeSnapshots
|
||||
that don't request any particular class to bind to.
|
||||
|
||||
```yaml
|
||||
apiVersion: snapshot.storage.k8s.io/v1beta1
|
||||
kind: VolumeSnapshotClass
|
||||
metadata:
|
||||
name: csi-hostpath-snapclass
|
||||
driver: hostpath.csi.k8s.io
|
||||
driver: hostpath.csi.k8s.io
|
||||
deletionPolicy: Delete
|
||||
parameters:
|
||||
```
|
||||
|
||||
Administrators can specify a default VolumeSnapshotClass for VolumeSnapshots
|
||||
that don't request any particular class to bind to by adding the
|
||||
`snapshot.storage.kubernetes.io/is-default-class: "true"` annotation:
|
||||
|
||||
```yaml
|
||||
apiVersion: snapshot.storage.k8s.io/v1beta1
|
||||
kind: VolumeSnapshotClass
|
||||
metadata:
|
||||
name: csi-hostpath-snapclass
|
||||
annotations:
|
||||
snapshot.storage.kubernetes.io/is-default-class: "true"
|
||||
driver: hostpath.csi.k8s.io
|
||||
deletionPolicy: Delete
|
||||
parameters:
|
||||
```
|
||||
|
@ -59,9 +72,9 @@ used for provisioning VolumeSnapshots. This field must be specified.
|
|||
|
||||
### DeletionPolicy
|
||||
|
||||
Volume snapshot classes have a deletionPolicy. It enables you to configure what happens to a `VolumeSnapshotContent` when the `VolumeSnapshot` object it is bound to is to be deleted. The deletionPolicy of a volume snapshot can either be `Retain` or `Delete`. This field must be specified.
|
||||
Volume snapshot classes have a deletionPolicy. It enables you to configure what happens to a VolumeSnapshotContent when the VolumeSnapshot object it is bound to is to be deleted. The deletionPolicy of a volume snapshot can either be `Retain` or `Delete`. This field must be specified.
|
||||
|
||||
If the deletionPolicy is `Delete`, then the underlying storage snapshot will be deleted along with the `VolumeSnapshotContent` object. If the deletionPolicy is `Retain`, then both the underlying snapshot and `VolumeSnapshotContent` remain.
|
||||
If the deletionPolicy is `Delete`, then the underlying storage snapshot will be deleted along with the VolumeSnapshotContent object. If the deletionPolicy is `Retain`, then both the underlying snapshot and VolumeSnapshotContent remain.
|
||||
|
||||
## Parameters
|
||||
|
||||
|
|
|
@ -1,5 +1,7 @@
|
|||
---
|
||||
title: "Workloads"
|
||||
weight: 50
|
||||
description: >
|
||||
Understand Pods, the smallest deployable compute object in Kubernetes, and the higher-level abstractions that help you to run them.
|
||||
---
|
||||
|
||||
|
|
|
@ -861,7 +861,12 @@ The output is similar to this:
|
|||
```
|
||||
Waiting for rollout to finish: 2 of 3 updated replicas are available...
|
||||
deployment.apps/nginx-deployment successfully rolled out
|
||||
$ echo $?
|
||||
```
|
||||
and the exit status from `kubectl rollout` is 0 (success):
|
||||
```shell
|
||||
echo $?
|
||||
```
|
||||
```
|
||||
0
|
||||
```
|
||||
|
||||
|
@ -1003,7 +1008,12 @@ The output is similar to this:
|
|||
```
|
||||
Waiting for rollout to finish: 2 out of 3 new replicas have been updated...
|
||||
error: deployment "nginx" exceeded its progress deadline
|
||||
$ echo $?
|
||||
```
|
||||
and the exit status from `kubectl rollout` is 1 (indicating an error):
|
||||
```shell
|
||||
echo $?
|
||||
```
|
||||
```
|
||||
1
|
||||
```
|
||||
|
||||
|
|
|
@ -218,9 +218,6 @@ exponential back-off delay (10s, 20s, 40s ...) capped at six minutes. The
|
|||
back-off count is reset if no new failed Pods appear before the Job's next
|
||||
status check.
|
||||
|
||||
{{< note >}}
|
||||
Issue [#54870](https://github.com/kubernetes/kubernetes/issues/54870) still exists for versions of Kubernetes prior to version 1.12
|
||||
{{< /note >}}
|
||||
{{< note >}}
|
||||
If your job has `restartPolicy = "OnFailure"`, keep in mind that your container running the Job
|
||||
will be terminated once the job backoff limit has been reached. This can make debugging the Job's executable more difficult. We suggest setting
|
||||
|
|
|
@ -77,7 +77,7 @@ A [Probe](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#prob
|
|||
performed periodically by the [kubelet](/docs/admin/kubelet/)
|
||||
on a Container. To perform a diagnostic,
|
||||
the kubelet calls a
|
||||
[Handler](https://godoc.org/k8s.io/kubernetes/pkg/api/v1#Handler) implemented by
|
||||
[Handler](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#handler-v1-core) implemented by
|
||||
the Container. There are three types of handlers:
|
||||
|
||||
* [ExecAction](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#execaction-v1-core):
|
||||
|
@ -281,7 +281,7 @@ once bound to a node, a Pod will never be rebound to another node.
|
|||
In general, Pods remain until a human or
|
||||
{{< glossary_tooltip term_id="controller" text="controller" >}} process
|
||||
explicitly removes them.
|
||||
The control plane cleans up terminated Pods (with a phase of `Succeeded` or
|
||||
The control plane cleans up terminated Pods (with a phase of `Succeeded` or
|
||||
`Failed`), when the number of Pods exceeds the configured threshold
|
||||
(determined by `terminated-pod-gc-threshold` in the kube-controller-manager).
|
||||
This avoids a resource leak as Pods are created and terminated over time.
|
||||
|
@ -407,4 +407,3 @@ spec:
|
|||
|
||||
|
||||
|
||||
|
||||
|
|
|
@ -39,6 +39,7 @@ The PR wrangler’s duties include:
|
|||
- Assign `Doc Review: Open Issues` or `Tech Review: Open Issues` for PRs that have been reviewed and require further input or action before merging.
|
||||
- Assign `/lgtm` and `/approve` labels to PRs that can be merged.
|
||||
- Merge PRs when they are ready, or close PRs that shouldn’t be accepted.
|
||||
- Consider accepting accurate technical content even if the content meets only some of the docs' [style guidelines](/docs/contribute/style/style-guide/). Open a new issue with the label `good first issue` to address style concerns.
|
||||
- Triage and tag incoming issues daily. See [Triage and categorize issues](/docs/contribute/review/for-approvers/#triage-and-categorize-issues) for guidelines on how SIG Docs uses metadata.
|
||||
|
||||
### Helpful GitHub queries for wranglers
|
||||
|
|
|
@ -217,21 +217,37 @@ When you are ready to submit a pull request, commit your changes.
|
|||
|
||||
It's a good idea to preview your changes locally before pushing them or opening a pull request. A preview lets you catch build errors or markdown formatting problems.
|
||||
|
||||
You can either build the website's docker image or run Hugo locally. Building the docker image is slower but displays [Hugo shortcodes](/docs/contribute/style/hugo-shortcodes/), which can be useful for debugging.
|
||||
You can either build the website's container image or run Hugo locally. Building the container image is slower but displays [Hugo shortcodes](/docs/contribute/style/hugo-shortcodes/), which can be useful for debugging.
|
||||
|
||||
{{< tabs name="tab_with_hugo" >}}
|
||||
{{% tab name="Hugo in a container" %}}
|
||||
|
||||
{{< note >}}
|
||||
The commands below use Docker as default container engine. Set the `CONTAINER_ENGINE` environment variable to override this behaviour.
|
||||
{{< /note >}}
|
||||
|
||||
1. Build the image locally:
|
||||
|
||||
```bash
|
||||
make docker-image
|
||||
# Use docker (default)
|
||||
make container-image
|
||||
|
||||
### OR ###
|
||||
|
||||
# Use podman
|
||||
CONTAINER_ENGINE=podman make container-image
|
||||
```
|
||||
|
||||
2. After building the `kubernetes-hugo` image locally, build and serve the site:
|
||||
|
||||
```bash
|
||||
make docker-serve
|
||||
# Use docker (default)
|
||||
make container-serve
|
||||
|
||||
### OR ###
|
||||
|
||||
# Use podman
|
||||
CONTAINER_ENGINE=podman make container-serve
|
||||
```
|
||||
|
||||
3. In a web browser, navigate to `https://localhost:1313`. Hugo watches the
|
||||
|
|
|
@ -0,0 +1,119 @@
|
|||
---
|
||||
title: Participating in SIG Docs
|
||||
content_type: concept
|
||||
weight: 60
|
||||
card:
|
||||
name: contribute
|
||||
weight: 60
|
||||
---
|
||||
|
||||
<!-- overview -->
|
||||
|
||||
SIG Docs is one of the
|
||||
[special interest groups](https://github.com/kubernetes/community/blob/master/sig-list.md)
|
||||
within the Kubernetes project, focused on writing, updating, and maintaining
|
||||
the documentation for Kubernetes as a whole. See
|
||||
[SIG Docs from the community github repo](https://github.com/kubernetes/community/tree/master/sig-docs)
|
||||
for more information about the SIG.
|
||||
|
||||
SIG Docs welcomes content and reviews from all contributors. Anyone can open a
|
||||
pull request (PR), and anyone is welcome to file issues about content or comment
|
||||
on pull requests in progress.
|
||||
|
||||
You can also become a [member](/docs/contribute/participating/roles-and-responsibilities/#members),
|
||||
[reviewer](/docs/contribute/participating/roles-and-responsibilities/#reviewers), or [approver](/docs/contribute/participating/roles-and-responsibilities/#approvers). These roles require greater
|
||||
access and entail certain responsibilities for approving and committing changes.
|
||||
See [community-membership](https://github.com/kubernetes/community/blob/master/community-membership.md)
|
||||
for more information on how membership works within the Kubernetes community.
|
||||
|
||||
The rest of this document outlines some unique ways these roles function within
|
||||
SIG Docs, which is responsible for maintaining one of the most public-facing
|
||||
aspects of Kubernetes -- the Kubernetes website and documentation.
|
||||
|
||||
|
||||
|
||||
<!-- body -->
|
||||
|
||||
## SIG Docs chairperson
|
||||
|
||||
Each SIG, including SIG Docs, selects one or more SIG members to act as
|
||||
chairpersons. These are points of contact between SIG Docs and other parts of
|
||||
the Kubernetes organization. They require extensive knowledge of the structure
|
||||
of the Kubernetes project as a whole and how SIG Docs works within it. See
|
||||
[Leadership](https://github.com/kubernetes/community/tree/master/sig-docs#leadership)
|
||||
for the current list of chairpersons.
|
||||
|
||||
## SIG Docs teams and automation
|
||||
|
||||
Automation in SIG Docs relies on two different mechanisms:
|
||||
GitHub teams and OWNERS files.
|
||||
|
||||
### GitHub teams
|
||||
|
||||
There are two categories of SIG Docs [teams](https://github.com/orgs/kubernetes/teams?query=sig-docs) on GitHub:
|
||||
|
||||
- `@sig-docs-{language}-owners` are approvers and leads
|
||||
- `@sig-docs-{language}-reviewers` are reviewers
|
||||
|
||||
Each can be referenced with their `@name` in GitHub comments to communicate with
|
||||
everyone in that group.
|
||||
|
||||
Sometimes Prow and GitHub teams overlap without matching exactly. For assignment of issues, pull requests, and to support PR approvals,
|
||||
the automation uses information from `OWNERS` files.
|
||||
|
||||
### OWNERS files and front-matter
|
||||
|
||||
The Kubernetes project uses an automation tool called prow for automation
|
||||
related to GitHub issues and pull requests. The
|
||||
[Kubernetes website repository](https://github.com/kubernetes/website) uses
|
||||
two [prow plugins](https://github.com/kubernetes/test-infra/tree/master/prow/plugins):
|
||||
|
||||
- blunderbuss
|
||||
- approve
|
||||
|
||||
These two plugins use the
|
||||
[OWNERS](https://github.com/kubernetes/website/blob/master/OWNERS) and
|
||||
[OWNERS_ALIASES](https://github.com/kubernetes/website/blob/master/OWNERS_ALIASES)
|
||||
files in the top level of the `kubernetes/website` GitHub repository to control
|
||||
how prow works within the repository.
|
||||
|
||||
An OWNERS file contains a list of people who are SIG Docs reviewers and
|
||||
approvers. OWNERS files can also exist in subdirectories, and can override who
|
||||
can act as a reviewer or approver of files in that subdirectory and its
|
||||
descendants. For more information about OWNERS files in general, see
|
||||
[OWNERS](https://github.com/kubernetes/community/blob/master/contributors/guide/owners.md).
|
||||
|
||||
In addition, an individual Markdown file can list reviewers and approvers in its
|
||||
front-matter, either by listing individual GitHub usernames or GitHub groups.
|
||||
|
||||
The combination of OWNERS files and front-matter in Markdown files determines
|
||||
the advice PR owners get from automated systems about who to ask for technical
|
||||
and editorial review of their PR.
|
||||
|
||||
## How merging works
|
||||
|
||||
When a pull request is merged to the branch used to publish content, that content is published to http://kubernetes.io. To ensure that
|
||||
the quality of our published content is high, we limit merging pull requests to
|
||||
SIG Docs approvers. Here's how it works.
|
||||
|
||||
- When a pull request has both the `lgtm` and `approve` labels, has no `hold`
|
||||
labels, and all tests are passing, the pull request merges automatically.
|
||||
- Kubernetes organization members and SIG Docs approvers can add comments to
|
||||
prevent automatic merging of a given pull request (by adding a `/hold` comment
|
||||
or withholding a `/lgtm` comment).
|
||||
- Any Kubernetes member can add the `lgtm` label by adding a `/lgtm` comment.
|
||||
- Only SIG Docs approvers can merge a pull request
|
||||
by adding an `/approve` comment. Some approvers also perform additional
|
||||
specific roles, such as [PR Wrangler](/docs/contribute/advanced#be-the-pr-wrangler-for-a-week) or
|
||||
[SIG Docs chairperson](#sig-docs-chairperson).
|
||||
|
||||
|
||||
|
||||
## {{% heading "whatsnext" %}}
|
||||
|
||||
|
||||
For more information about contributing to the Kubernetes documentation, see:
|
||||
|
||||
- [Contributing new content](/docs/contribute/overview/)
|
||||
- [Reviewing content](/docs/contribute/review/reviewing-prs)
|
||||
- [Documentation style guide](/docs/contribute/style/)
|
|
@ -0,0 +1,191 @@
|
|||
---
|
||||
title: Roles and responsibilities
|
||||
content_type: concept
|
||||
weight: 10
|
||||
---
|
||||
|
||||
<!-- overview -->
|
||||
|
||||
Anyone can contribute to Kubernetes. As your contributions to SIG Docs grow, you can apply for different levels of membership in the community.
|
||||
These roles allow you to take on more responsibility within the community.
|
||||
Each role requires more time and commitment. The roles are:
|
||||
|
||||
- Anyone: regular contributors to the Kubernetes documentation
|
||||
- Members: can assign and triage issues and provide non-binding review on pull requests
|
||||
- Reviewers: can lead reviews on documentation pull requests and can vouch for a change's quality
|
||||
- Approvers: can lead reviews on documentation and merge changes
|
||||
|
||||
<!-- body -->
|
||||
|
||||
## Anyone
|
||||
|
||||
Anyone with a GitHub account can contribute to Kubernetes. SIG Docs welcomes all new contributors!
|
||||
|
||||
Anyone can:
|
||||
|
||||
- Open an issue in any [Kubernetes](https://github.com/kubernetes/) repository, including [`kubernetes/website`](https://github.com/kubernetes/website)
|
||||
- Give non-binding feedback on a pull request
|
||||
- Contribute to a localization
|
||||
- Suggest improvements on [Slack](http://slack.k8s.io/) or the [SIG docs mailing list](https://groups.google.com/forum/#!forum/kubernetes-sig-docs).
|
||||
|
||||
After [signing the CLA](/docs/contribute/new-content/overview/#sign-the-cla), anyone can also:
|
||||
|
||||
- Open a pull request to improve existing content, add new content, or write a blog post or case study
|
||||
- Create diagrams, graphics assets, and embeddable screencasts and videos
|
||||
|
||||
For more information, see [contributing new content](/docs/contribute/new-content/).
|
||||
|
||||
## Members
|
||||
|
||||
A member is someone who has submitted multiple pull requests to `kubernetes/website`. Members are a part of the [Kubernetes GitHub organization](https://github.com/kubernetes).
|
||||
|
||||
Members can:
|
||||
|
||||
- Do everything listed under [Anyone](#anyone)
|
||||
- Use the `/lgtm` comment to add the LGTM (looks good to me) label to a pull request
|
||||
|
||||
{{< note >}}
|
||||
Using `/lgtm` triggers automation. If you want to provide non-binding approval, simply commenting "LGTM" works too!
|
||||
{{< /note >}}
|
||||
- Use the `/hold` comment to block merging for a pull request
|
||||
- Use the `/assign` comment to assign a reviewer to a pull request
|
||||
- Provide non-binding review on pull requests
|
||||
- Use automation to triage and categorize issues
|
||||
- Document new features
|
||||
|
||||
### Becoming a member
|
||||
|
||||
After submitting at least 5 substantial pull requests and meeting the other [requirements](https://github.com/kubernetes/community/blob/master/community-membership.md#member):
|
||||
|
||||
1. Find two [reviewers](#reviewers) or [approvers](#approvers) to [sponsor](/docs/contribute/advanced#sponsor-a-new-contributor) your membership.
|
||||
|
||||
Ask for sponsorship in the [#sig-docs channel on Slack](https://kubernetes.slack.com) or on the
|
||||
[SIG Docs mailing list](https://groups.google.com/forum/#!forum/kubernetes-sig-docs).
|
||||
|
||||
{{< note >}}
|
||||
Don't send a direct email or Slack direct message to an individual
|
||||
SIG Docs member. You must request sponsorship before submitting your application.
|
||||
{{< /note >}}
|
||||
|
||||
2. Open a GitHub issue in the [`kubernetes/org`](https://github.com/kubernetes/org/) repository. Use the **Organization Membership Request** issue template.
|
||||
|
||||
3. Let your sponsors know about the GitHub issue. You can either:
|
||||
- Mention their GitHub username in an issue (`@<GitHub-username>`)
|
||||
- Send them the issue link using Slack or email.
|
||||
|
||||
Sponsors will approve your request with a `+1` vote. Once your sponsors approve the request, a Kubernetes GitHub admin adds you as a member. Congratulations!
|
||||
|
||||
If your membership request is not accepted you will receive feedback. After addressing the feedback, apply again.
|
||||
|
||||
4. Accept the invitation to the Kubernetes GitHub organization in your email account.
|
||||
|
||||
{{< note >}}
|
||||
GitHub sends the invitation to the default email address in your account.
|
||||
{{< /note >}}
|
||||
|
||||
## Reviewers
|
||||
|
||||
Reviewers are responsible for reviewing open pull requests. Unlike member feedback, you must address reviewer feedback. Reviewers are members of the [@kubernetes/sig-docs-{language}-reviews](https://github.com/orgs/kubernetes/teams?query=sig-docs) GitHub team.
|
||||
|
||||
Reviewers can:
|
||||
|
||||
- Do everything listed under [Anyone](#anyone) and [Members](#members)
|
||||
- Review pull requests and provide binding feedback
|
||||
|
||||
{{< note >}}
|
||||
To provide non-binding feedback, prefix your comments with a phrase like "Optionally: ".
|
||||
{{< /note >}}
|
||||
|
||||
- Edit user-facing strings in code
|
||||
- Improve code comments
|
||||
|
||||
You can be a SIG Docs reviewer, or a reviewer for docs in a specific subject area.
|
||||
|
||||
### Assigning reviewers to pull requests
|
||||
|
||||
Automation assigns reviewers to all pull requests. You can request a
|
||||
review from a specific person by commenting: `/assign
|
||||
[@_github_handle]`.
|
||||
|
||||
If the assigned reviewer has not commented on the PR, another reviewer can step in. You can also assign technical reviewers as needed.
|
||||
|
||||
### Using `/lgtm`
|
||||
|
||||
LGTM stands for "Looks good to me" and indicates that a pull request is technically accurate and ready to merge. All PRs need a `/lgtm` comment from a reviewer and a `/approve` comment from an approver to merge.
|
||||
|
||||
A `/lgtm` comment from reviewer is binding and triggers automation that adds the `lgtm` label.
|
||||
|
||||
### Becoming a reviewer
|
||||
|
||||
When you meet the
|
||||
[requirements](https://github.com/kubernetes/community/blob/master/community-membership.md#reviewer), you can become a SIG Docs reviewer. Reviewers in other SIGs must apply separately for reviewer status in SIG Docs.
|
||||
|
||||
To apply:
|
||||
|
||||
1. Open a pull request that adds your GitHub user name to a section of the
|
||||
[OWNERS_ALIASES](https://github.com/kubernetes/website/blob/master/OWNERS) file
|
||||
in the `kubernetes/website` repository.
|
||||
|
||||
{{ note }}
|
||||
If you aren't sure where to add yourself, add yourself to `sig-docs-en-reviews`.
|
||||
{{ /note }}
|
||||
|
||||
2. Assign the PR to one or more SIG-Docs approvers (user names listed under `sig-docs-{language}-owners`).
|
||||
|
||||
If approved, a SIG Docs lead adds you to the appropriate GitHub team. Once added, [K8s-ci-robot](https://github.com/kubernetes/test-infra/tree/master/prow#bots-home) assigns and suggests you as a reviewer on new pull requests.
|
||||
|
||||
## Approvers
|
||||
|
||||
Approvers review and approve pull requests for merging. Approvers are members of the
|
||||
[@kubernetes/sig-docs-{language}-owners](https://github.com/orgs/kubernetes/teams/?query=sig-docs) GitHub teams.
|
||||
|
||||
Approvers can do the following:
|
||||
|
||||
- Everything listed under [Anyone](#anyone), [Members](#members) and [Reviewers](#reviewers)
|
||||
- Publish contributor content by approving and merging pull requests using the `/approve` comment
|
||||
- Propose improvements to the style guide
|
||||
- Propose improvements to docs tests
|
||||
- Propose improvements to the Kubernetes website or other tooling
|
||||
|
||||
If the PR already has a `/lgtm`, or if the approver also comments with `/lgtm`, the PR merges automatically. A SIG Docs approver should only leave a `/lgtm` on a change that doesn't need additional technical review.
|
||||
|
||||
|
||||
### Approving pull requests
|
||||
|
||||
Approvers and SIG Docs leads are the only ones who can merge pull requests into the website repository. This comes with certain responsibilities.
|
||||
|
||||
- Approvers can use the `/approve` command, which merges PRs into the repo.
|
||||
|
||||
{{< warning >}}
|
||||
A careless merge can break the site, so be sure that when you merge something, you mean it.
|
||||
{{< /warning >}}
|
||||
|
||||
- Make sure that proposed changes meet the [contribution guidelines](/docs/contribute/style/content-guide/#contributing-content).
|
||||
|
||||
If you ever have a question, or you're not sure about something, feel free to call for additional review.
|
||||
|
||||
- Verify that Netlify tests pass before you `/approve` a PR.
|
||||
|
||||
<img src="/images/docs/contribute/netlify-pass.png" width="75%" alt="Netlify tests must pass before approving" />
|
||||
|
||||
- Visit the Netlify page preview for a PR to make sure things look good before approving.
|
||||
|
||||
- Participate in the [PR Wrangler rotation schedule](https://github.com/kubernetes/website/wiki/PR-Wranglers) for weekly rotations. SIG Docs expects all approvers to participate in this
|
||||
rotation. See [Be the PR Wrangler for a week](/docs/contribute/advanced#be-the-pr-wrangler-for-a-week)
|
||||
for more details.
|
||||
|
||||
### Becoming an approver
|
||||
|
||||
When you meet the [requirements](https://github.com/kubernetes/community/blob/master/community-membership.md#approver), you can become a SIG Docs approver. Approvers in other SIGs must apply separately for approver status in SIG Docs.
|
||||
|
||||
To apply:
|
||||
|
||||
1. Open a pull request adding yourself to a section of the [OWNERS_ALIASES](https://github.com/kubernetes/website/blob/master/OWNERS) file in the `kubernetes/website` repository.
|
||||
|
||||
{{ note }}
|
||||
If you aren't sure where to add yourself, add yourself to `sig-docs-en-owners`.
|
||||
{{ /note }}
|
||||
|
||||
2. Assign the PR to one or more current SIG Docs approvers.
|
||||
|
||||
If approved, a SIG Docs lead adds you to the appropriate GitHub team. Once added, [K8s-ci-robot](https://github.com/kubernetes/test-infra/tree/master/prow#bots-home) assigns and suggests you as a reviewer on new pull requests.
|
|
@ -1,316 +0,0 @@
|
|||
---
|
||||
title: Participating in SIG Docs
|
||||
content_type: concept
|
||||
weight: 60
|
||||
card:
|
||||
name: contribute
|
||||
weight: 60
|
||||
---
|
||||
|
||||
<!-- overview -->
|
||||
|
||||
SIG Docs is one of the
|
||||
[special interest groups](https://github.com/kubernetes/community/blob/master/sig-list.md)
|
||||
within the Kubernetes project, focused on writing, updating, and maintaining
|
||||
the documentation for Kubernetes as a whole. See
|
||||
[SIG Docs from the community github repo](https://github.com/kubernetes/community/tree/master/sig-docs)
|
||||
for more information about the SIG.
|
||||
|
||||
SIG Docs welcomes content and reviews from all contributors. Anyone can open a
|
||||
pull request (PR), and anyone is welcome to file issues about content or comment
|
||||
on pull requests in progress.
|
||||
|
||||
You can also become a [member](#members),
|
||||
[reviewer](#reviewers), or [approver](#approvers). These roles require greater
|
||||
access and entail certain responsibilities for approving and committing changes.
|
||||
See [community-membership](https://github.com/kubernetes/community/blob/master/community-membership.md)
|
||||
for more information on how membership works within the Kubernetes community.
|
||||
|
||||
The rest of this document outlines some unique ways these roles function within
|
||||
SIG Docs, which is responsible for maintaining one of the most public-facing
|
||||
aspects of Kubernetes -- the Kubernetes website and documentation.
|
||||
|
||||
|
||||
|
||||
<!-- body -->
|
||||
|
||||
## Roles and responsibilities
|
||||
|
||||
- **Anyone** can contribute to Kubernetes documentation. To contribute, you must [sign the CLA](/docs/contribute/new-content/overview/#sign-the-cla) and have a GitHub account.
|
||||
- **Members** of the Kubernetes organization are contributors who have spent time and effort on the Kubernetes project, usually by opening pull requests with accepted changes. See [Community membership](https://github.com/kubernetes/community/blob/master/community-membership.md) for membership criteria.
|
||||
- A SIG Docs **Reviewer** is a member of the Kubernetes organization who has
|
||||
expressed interest in reviewing documentation pull requests, and has been
|
||||
added to the appropriate GitHub group and `OWNERS` files in the GitHub
|
||||
repository by a SIG Docs Approver.
|
||||
- A SIG Docs **Approver** is a member in good standing who has shown a continued
|
||||
commitment to the project. An approver can merge pull requests
|
||||
and publish content on behalf of the Kubernetes organization.
|
||||
Approvers can also represent SIG Docs in the larger Kubernetes community.
|
||||
Some duties of a SIG Docs approver, such as coordinating a release,
|
||||
require a significant time commitment.
|
||||
|
||||
## Anyone
|
||||
|
||||
Anyone can do the following:
|
||||
|
||||
- Open a GitHub issue against any part of Kubernetes, including documentation.
|
||||
- Provide non-binding feedback on a pull request.
|
||||
- Help to localize existing content
|
||||
- Bring up ideas for improvement on [Slack](http://slack.k8s.io/) or the [SIG docs mailing list](https://groups.google.com/forum/#!forum/kubernetes-sig-docs).
|
||||
- Use the `/lgtm` Prow command (short for "looks good to me") to recommend the changes in a pull request for merging.
|
||||
{{< note >}}
|
||||
If you are not a member of the Kubernetes organization, using `/lgtm` has no effect on automated systems.
|
||||
{{< /note >}}
|
||||
|
||||
After [signing the CLA](/docs/contribute/new-content/overview/#sign-the-cla), anyone can also:
|
||||
- Open a pull request to improve existing content, add new content, or write a blog post or case study.
|
||||
|
||||
## Members
|
||||
|
||||
Members are contributors to the Kubernetes project who meet the [membership criteria](https://github.com/kubernetes/community/blob/master/community-membership.md#member). SIG Docs welcomes contributions from all members of the Kubernetes community,
|
||||
and frequently requests reviews from members of other SIGs for technical accuracy.
|
||||
|
||||
Any member of the [Kubernetes organization](https://github.com/kubernetes) can do the following:
|
||||
|
||||
- Everything listed under [Anyone](#anyone)
|
||||
- Use the `/lgtm` comment to add the LGTM (looks good to me) label to a pull request.
|
||||
- Use the `/hold` command to prevent a pull request from being merged, if the pull request already has the LGTM and approve labels.
|
||||
- Use the `/assign` comment to assign a reviewer to a pull request.
|
||||
|
||||
### Becoming a member
|
||||
|
||||
After you have successfully submitted at least 5 substantive pull requests, you
|
||||
can request [membership](https://github.com/kubernetes/community/blob/master/community-membership.md#member)
|
||||
in the Kubernetes organization. Follow these steps:
|
||||
|
||||
1. Find two reviewers or approvers to [sponsor](/docs/contribute/advanced#sponsor-a-new-contributor)
|
||||
your membership.
|
||||
|
||||
Ask for sponsorship in the [#sig-docs channel on the
|
||||
Kubernetes Slack instance](https://kubernetes.slack.com) or on the
|
||||
[SIG Docs mailing list](https://groups.google.com/forum/#!forum/kubernetes-sig-docs).
|
||||
|
||||
{{< note >}}
|
||||
Don't send a direct email or Slack direct message to an individual
|
||||
SIG Docs member.
|
||||
{{< /note >}}
|
||||
|
||||
2. Open a GitHub issue in the `kubernetes/org` repository to request membership.
|
||||
Fill out the template using the guidelines at
|
||||
[Community membership](https://github.com/kubernetes/community/blob/master/community-membership.md).
|
||||
|
||||
3. Let your sponsors know about the GitHub issue, either by at-mentioning them
|
||||
in the GitHub issue (adding a comment with `@<GitHub-username>`) or by sending them the link directly,
|
||||
so that they can add a `+1` vote.
|
||||
|
||||
4. When your membership is approved, the github admin team member assigned to your request updates the
|
||||
GitHub issue to show approval and then closes the GitHub issue.
|
||||
Congratulations, you are now a member!
|
||||
|
||||
If your membership request is not accepted, the
|
||||
membership committee provides information or steps to take before applying
|
||||
again.
|
||||
|
||||
## Reviewers
|
||||
|
||||
Reviewers are members of the
|
||||
[@kubernetes/sig-docs-pr-reviews](https://github.com/orgs/kubernetes/teams/sig-docs-pr-reviews)
|
||||
GitHub group. Reviewers review documentation pull requests and provide feedback on proposed
|
||||
changes. Reviewers can:
|
||||
|
||||
- Do everything listed under [Anyone](#anyone) and [Members](#members)
|
||||
- Document new features
|
||||
- Triage and categorize issues
|
||||
- Review pull requests and provide binding feedback
|
||||
- Create diagrams, graphics assets, and embeddable screencasts and videos
|
||||
- Edit user-facing strings in code
|
||||
- Improve code comments
|
||||
|
||||
### Assigning reviewers to pull requests
|
||||
|
||||
Automation assigns reviewers to all pull requests. You can request a
|
||||
review from a specific reviewer with a comment on the pull request: `/assign
|
||||
[@_github_handle]`. To indicate that a pull request is technically accurate and
|
||||
requires no further changes, a reviewer adds a `/lgtm` comment to the pull
|
||||
request.
|
||||
|
||||
If the assigned reviewer has not yet reviewed the content, another reviewer can
|
||||
step in. In addition, you can assign technical reviewers and wait for them to
|
||||
provide a `/lgtm` comment.
|
||||
|
||||
For a trivial change or one that needs no technical review, SIG Docs
|
||||
[approvers](#approvers) can provide the `/lgtm` as well.
|
||||
|
||||
An `/approve` comment from a reviewer is ignored by automation.
|
||||
|
||||
### Becoming a reviewer
|
||||
|
||||
When you meet the
|
||||
[requirements](https://github.com/kubernetes/community/blob/master/community-membership.md#reviewer),
|
||||
you can become a SIG Docs reviewer. Reviewers in other SIGs must apply
|
||||
separately for reviewer status in SIG Docs.
|
||||
|
||||
To apply, open a pull request to add yourself to the `reviewers` section of the
|
||||
[top-level OWNERS file](https://github.com/kubernetes/website/blob/master/OWNERS)
|
||||
in the `kubernetes/website` repository. Assign the PR to one or more current SIG
|
||||
Docs approvers.
|
||||
|
||||
If your pull request is approved, you are now a SIG Docs reviewer.
|
||||
[K8s-ci-robot](https://github.com/kubernetes/test-infra/tree/master/prow#bots-home)
|
||||
will assign and suggest you as a reviewer on new pull requests.
|
||||
|
||||
If you are approved, request that a current SIG Docs approver add you to the
|
||||
[@kubernetes/sig-docs-pr-reviews](https://github.com/orgs/kubernetes/teams/sig-docs-pr-reviews)
|
||||
GitHub group. Only members of the `kubernetes-website-admins` GitHub group can
|
||||
add new members to a GitHub group.
|
||||
|
||||
## Approvers
|
||||
|
||||
Approvers are members of the
|
||||
[@kubernetes/sig-docs-maintainers](https://github.com/orgs/kubernetes/teams/sig-docs-maintainers)
|
||||
GitHub group. See [SIG Docs teams and automation](#sig-docs-teams-and-automation) for details.
|
||||
|
||||
Approvers can do the following:
|
||||
|
||||
- Everything listed under [Anyone](#anyone), [Members](#members) and [Reviewers](#reviewers)
|
||||
- Publish contributor content by approving and merging pull requests using the `/approve` comment.
|
||||
If someone who is not an approver leaves the approval comment, automation ignores it.
|
||||
- Participate in a Kubernetes release team as a docs representative
|
||||
- Propose improvements to the style guide
|
||||
- Propose improvements to docs tests
|
||||
- Propose improvements to the Kubernetes website or other tooling
|
||||
|
||||
If the PR already has a `/lgtm`, or if the approver also comments with `/lgtm`,
|
||||
the PR merges automatically. A SIG Docs approver should only leave a `/lgtm` on
|
||||
a change that doesn't need additional technical review.
|
||||
|
||||
### Becoming an approver
|
||||
|
||||
When you meet the
|
||||
[requirements](https://github.com/kubernetes/community/blob/master/community-membership.md#approver),
|
||||
you can become a SIG Docs approver. Approvers in other SIGs must apply
|
||||
separately for approver status in SIG Docs.
|
||||
|
||||
To apply, open a pull request to add yourself to the `approvers` section of the
|
||||
[top-level OWNERS file](https://github.com/kubernetes/website/blob/master/OWNERS)
|
||||
in the `kubernetes/website` repository. Assign the PR to one or more current SIG
|
||||
Docs approvers.
|
||||
|
||||
If your pull request is approved, you are now a SIG Docs approver.
|
||||
[K8s-ci-robot](https://github.com/kubernetes/test-infra/tree/master/prow#bots-home)
|
||||
will assign and suggest you as a reviewer on new pull requests.
|
||||
|
||||
If you are approved, request that a current SIG Docs approver add you to the
|
||||
[@kubernetes/sig-docs-maintainers](https://github.com/orgs/kubernetes/teams/sig-docs-maintainers)
|
||||
GitHub group. Only members of the `kubernetes-website-admins` GitHub group can
|
||||
add new members to a GitHub group.
|
||||
|
||||
### Approver responsibilities
|
||||
|
||||
Approvers improve the documentation by reviewing and merging pull requests into the website repository. Because this role carries additional privileges, approvers have additional responsibilities:
|
||||
|
||||
- Approvers can use the `/approve` command, which merges PRs into the repo.
|
||||
|
||||
A careless merge can break the site, so be sure that when you merge something, you mean it.
|
||||
|
||||
- Make sure that proposed changes meet the [contribution guidelines](/docs/contribute/style/content-guide/#contributing-content).
|
||||
|
||||
If you ever have a question, or you're not sure about something, feel free to call for additional review.
|
||||
|
||||
- Verify that Netlify tests pass before you `/approve` a PR.
|
||||
|
||||
<img src="/images/docs/contribute/netlify-pass.png" width="75%" alt="Netlify tests must pass before approving" />
|
||||
|
||||
- Visit the Netlify page preview for a PR to make sure things look good before approving.
|
||||
|
||||
- Participate in the [PR Wrangler rotation schedule](https://github.com/kubernetes/website/wiki/PR-Wranglers) for weekly rotations. SIG Docs expects all approvers to participate in this
|
||||
rotation. See [Be the PR Wrangler for a week](/docs/contribute/advanced#be-the-pr-wrangler-for-a-week)
|
||||
for more details.
|
||||
|
||||
## SIG Docs chairperson
|
||||
|
||||
Each SIG, including SIG Docs, selects one or more SIG members to act as
|
||||
chairpersons. These are points of contact between SIG Docs and other parts of
|
||||
the Kubernetes organization. They require extensive knowledge of the structure
|
||||
of the Kubernetes project as a whole and how SIG Docs works within it. See
|
||||
[Leadership](https://github.com/kubernetes/community/tree/master/sig-docs#leadership)
|
||||
for the current list of chairpersons.
|
||||
|
||||
## SIG Docs teams and automation
|
||||
|
||||
Automation in SIG Docs relies on two different mechanisms for automation:
|
||||
GitHub groups and OWNERS files.
|
||||
|
||||
### GitHub groups
|
||||
|
||||
The SIG Docs group defines two teams on GitHub:
|
||||
|
||||
- [@kubernetes/sig-docs-maintainers](https://github.com/orgs/kubernetes/teams/sig-docs-maintainers)
|
||||
- [@kubernetes/sig-docs-pr-reviews](https://github.com/orgs/kubernetes/teams/sig-docs-pr-reviews)
|
||||
|
||||
Each can be referenced with their `@name` in GitHub comments to communicate with
|
||||
everyone in that group.
|
||||
|
||||
These teams overlap, but do not exactly match, the groups used by the automation
|
||||
tooling. For assignment of issues, pull requests, and to support PR approvals,
|
||||
the automation uses information from OWNERS files.
|
||||
|
||||
### OWNERS files and front-matter
|
||||
|
||||
The Kubernetes project uses an automation tool called prow for automation
|
||||
related to GitHub issues and pull requests. The
|
||||
[Kubernetes website repository](https://github.com/kubernetes/website) uses
|
||||
two [prow plugins](https://github.com/kubernetes/test-infra/tree/master/prow/plugins):
|
||||
|
||||
- blunderbuss
|
||||
- approve
|
||||
|
||||
These two plugins use the
|
||||
[OWNERS](https://github.com/kubernetes/website/blob/master/OWNERS) and
|
||||
[OWNERS_ALIASES](https://github.com/kubernetes/website/blob/master/OWNERS_ALIASES)
|
||||
files in the top level of the `kubernetes/website` GitHub repository to control
|
||||
how prow works within the repository.
|
||||
|
||||
An OWNERS file contains a list of people who are SIG Docs reviewers and
|
||||
approvers. OWNERS files can also exist in subdirectories, and can override who
|
||||
can act as a reviewer or approver of files in that subdirectory and its
|
||||
descendents. For more information about OWNERS files in general, see
|
||||
[OWNERS](https://github.com/kubernetes/community/blob/master/contributors/guide/owners.md).
|
||||
|
||||
In addition, an individual Markdown file can list reviewers and approvers in its
|
||||
front-matter, either by listing individual GitHub usernames or GitHub groups.
|
||||
|
||||
The combination of OWNERS files and front-matter in Markdown files determines
|
||||
the advice PR owners get from automated systems about who to ask for technical
|
||||
and editorial review of their PR.
|
||||
|
||||
## How merging works
|
||||
|
||||
When a pull request is merged to the branch used to publish content (currently
|
||||
`master`), that content is published and available to the world. To ensure that
|
||||
the quality of our published content is high, we limit merging pull requests to
|
||||
SIG Docs approvers. Here's how it works.
|
||||
|
||||
- When a pull request has both the `lgtm` and `approve` labels, has no `hold`
|
||||
labels, and all tests are passing, the pull request merges automatically.
|
||||
- Kubernetes organization members and SIG Docs approvers can add comments to
|
||||
prevent automatic merging of a given pull request (by adding a `/hold` comment
|
||||
or withholding a `/lgtm` comment).
|
||||
- Any Kubernetes member can add the `lgtm` label by adding a `/lgtm` comment.
|
||||
- Only SIG Docs approvers can merge a pull request
|
||||
by adding an `/approve` comment. Some approvers also perform additional
|
||||
specific roles, such as [PR Wrangler](/docs/contribute/advanced#be-the-pr-wrangler-for-a-week) or
|
||||
[SIG Docs chairperson](#sig-docs-chairperson).
|
||||
|
||||
|
||||
|
||||
## {{% heading "whatsnext" %}}
|
||||
|
||||
|
||||
For more information about contributing to the Kubernetes documentation, see:
|
||||
|
||||
- [Contributing new content](/docs/contribute/overview/)
|
||||
- [Reviewing content](/docs/contribute/review/reviewing-prs)
|
||||
- [Documentation style guide](/docs/contribute/style/)
|
||||
|
||||
|
|
@ -240,8 +240,8 @@ Renders to:
|
|||
## {{% heading "whatsnext" %}}
|
||||
|
||||
* Learn about [Hugo](https://gohugo.io/).
|
||||
* Learn about [writing a new topic](/docs/home/contribute/style/write-new-topic/).
|
||||
* Learn about [page content types](/docs/home/contribute/style/page-content-types/).
|
||||
* Learn about [staging your changes](/docs/home/contribute/stage-documentation-changes/)
|
||||
* Learn about [creating a pull request](/docs/home/contribute/create-pull-request/).
|
||||
* Learn about [writing a new topic](/docs/contribute/style/write-new-topic/).
|
||||
* Learn about [page content types](/docs/contribute/style/page-content-types/).
|
||||
* Learn about [creating a pull request](/docs/contribute/new-content/new-content/).
|
||||
* Learn about [advanced contributing](/docs/contribute/advanced/).
|
||||
|
||||
|
|
|
@ -121,7 +121,7 @@ document, use the backtick (`` ` ``).
|
|||
{{< table caption = "Do and Don't - Use code style for inline code and commands" >}}
|
||||
Do | Don't
|
||||
:--| :-----
|
||||
The `kubectl run`command creates a Deployment. | The "kubectl run" command creates a Deployment.
|
||||
The `kubectl run`command creates a Pod. | The "kubectl run" command creates a Pod.
|
||||
For declarative management, use `kubectl apply`. | For declarative management, use "kubectl apply".
|
||||
Enclose code samples with triple backticks. (\`\`\`)| Enclose code samples with any other syntax.
|
||||
Use single backticks to enclose inline code. For example, `var example = true`. | Use two asterisks (`**`) or an underscore (`_`) to enclose inline code. For example, **var example = true**.
|
||||
|
@ -496,7 +496,7 @@ Do | Don't
|
|||
:--| :-----
|
||||
You can explore the API using a browser. | The API can be explored using a browser.
|
||||
The YAML file specifies the replica count. | The replica count is specified in the YAML file.
|
||||
{{< /table >}}
|
||||
{{< /table >}}
|
||||
|
||||
|
||||
Exception: Use passive voice if active voice leads to an awkward construction.
|
||||
|
@ -511,7 +511,7 @@ Do | Don't
|
|||
To create a ReplicaSet, ... | In order to create a ReplicaSet, ...
|
||||
See the configuration file. | Please see the configuration file.
|
||||
View the Pods. | With this next command, we'll view the Pods.
|
||||
{{< /table >}}
|
||||
{{< /table >}}
|
||||
|
||||
### Address the reader as "you"
|
||||
|
||||
|
@ -520,7 +520,7 @@ Do | Don't
|
|||
:--| :-----
|
||||
You can create a Deployment by ... | We'll create a Deployment by ...
|
||||
In the preceding output, you can see... | In the preceding output, we can see ...
|
||||
{{< /table >}}
|
||||
{{< /table >}}
|
||||
|
||||
|
||||
### Avoid Latin phrases
|
||||
|
@ -532,7 +532,7 @@ Do | Don't
|
|||
:--| :-----
|
||||
For example, ... | e.g., ...
|
||||
That is, ...| i.e., ...
|
||||
{{< /table >}}
|
||||
{{< /table >}}
|
||||
|
||||
|
||||
Exception: Use "etc." for et cetera.
|
||||
|
@ -550,7 +550,7 @@ Do | Don't
|
|||
Version 1.4 includes ... | In version 1.4, we have added ...
|
||||
Kubernetes provides a new feature for ... | We provide a new feature ...
|
||||
This page teaches you how to use Pods. | In this page, we are going to learn about Pods.
|
||||
{{< /table >}}
|
||||
{{< /table >}}
|
||||
|
||||
|
||||
### Avoid jargon and idioms
|
||||
|
@ -562,7 +562,7 @@ Do | Don't
|
|||
:--| :-----
|
||||
Internally, ... | Under the hood, ...
|
||||
Create a new cluster. | Turn up a new cluster.
|
||||
{{< /table >}}
|
||||
{{< /table >}}
|
||||
|
||||
|
||||
### Avoid statements about the future
|
||||
|
@ -581,7 +581,7 @@ Do | Don't
|
|||
:--| :-----
|
||||
In version 1.4, ... | In the current version, ...
|
||||
The Federation feature provides ... | The new Federation feature provides ...
|
||||
{{< /table >}}
|
||||
{{< /table >}}
|
||||
|
||||
|
||||
|
||||
|
|
|
@ -59,6 +59,8 @@ cards:
|
|||
- name: release-notes
|
||||
title: Release Notes
|
||||
description: If you are installing Kubernetes or upgrading to the newest version, refer to the current release notes.
|
||||
button: "Download Kubernetes"
|
||||
button_path: "/docs/setup/release/notes"
|
||||
- name: about
|
||||
title: About the documentation
|
||||
description: This website contains documentation for the current and previous 4 versions of Kubernetes.
|
||||
|
|
|
@ -99,7 +99,9 @@ NamespaceLifecycle, LimitRanger, ServiceAccount, TaintNodesByCondition, Priority
|
|||
|
||||
## What does each admission controller do?
|
||||
|
||||
### AlwaysAdmit {#alwaysadmit} {{< feature-state for_k8s_version="v1.13" state="deprecated" >}}
|
||||
### AlwaysAdmit {#alwaysadmit}
|
||||
|
||||
{{< feature-state for_k8s_version="v1.13" state="deprecated" >}}
|
||||
|
||||
This admission controller allows all pods into the cluster. It is deprecated because its behavior is the same as if there were no admission controller at all.
|
||||
|
||||
|
@ -113,7 +115,9 @@ scheduled onto the right node), without any authorization check against the imag
|
|||
is enabled, images are always pulled prior to starting containers, which means valid credentials are
|
||||
required.
|
||||
|
||||
### AlwaysDeny {#alwaysdeny} {{< feature-state for_k8s_version="v1.13" state="deprecated" >}}
|
||||
### AlwaysDeny {#alwaysdeny}
|
||||
|
||||
{{< feature-state for_k8s_version="v1.13" state="deprecated" >}}
|
||||
|
||||
Rejects all requests. AlwaysDeny is DEPRECATED as no real meaning.
|
||||
|
||||
|
@ -164,7 +168,9 @@ if the pods don't already have toleration for taints
|
|||
`node.kubernetes.io/not-ready:NoExecute` or
|
||||
`node.alpha.kubernetes.io/unreachable:NoExecute`.
|
||||
|
||||
### DenyExecOnPrivileged {#denyexeconprivileged} {{< feature-state for_k8s_version="v1.13" state="deprecated" >}}
|
||||
### DenyExecOnPrivileged {#denyexeconprivileged}
|
||||
|
||||
{{< feature-state for_k8s_version="v1.13" state="deprecated" >}}
|
||||
|
||||
This admission controller will intercept all requests to exec a command in a pod if that pod has a privileged container.
|
||||
|
||||
|
@ -175,7 +181,9 @@ Use of a policy-based admission plugin (like [PodSecurityPolicy](#podsecuritypol
|
|||
which can be targeted at specific users or Namespaces and also protects against creation of overly privileged Pods
|
||||
is recommended instead.
|
||||
|
||||
### DenyEscalatingExec {#denyescalatingexec} {{< feature-state for_k8s_version="v1.13" state="deprecated" >}}
|
||||
### DenyEscalatingExec {#denyescalatingexec}
|
||||
|
||||
{{< feature-state for_k8s_version="v1.13" state="deprecated" >}}
|
||||
|
||||
This admission controller will deny exec and attach commands to pods that run with escalated privileges that
|
||||
allow host access. This includes pods that run as privileged, have access to the host IPC namespace, and
|
||||
|
@ -187,7 +195,9 @@ Use of a policy-based admission plugin (like [PodSecurityPolicy](#podsecuritypol
|
|||
which can be targeted at specific users or Namespaces and also protects against creation of overly privileged Pods
|
||||
is recommended instead.
|
||||
|
||||
### EventRateLimit {#eventratelimit} {{< feature-state for_k8s_version="v1.13" state="alpha" >}}
|
||||
### EventRateLimit {#eventratelimit}
|
||||
|
||||
{{< feature-state for_k8s_version="v1.13" state="alpha" >}}
|
||||
|
||||
This admission controller mitigates the problem where the API server gets flooded by
|
||||
event requests. The cluster admin can specify event rate limits by:
|
||||
|
@ -446,7 +456,9 @@ applies a 0.1 CPU requirement to all Pods in the `default` namespace.
|
|||
|
||||
See the [limitRange design doc](https://git.k8s.io/community/contributors/design-proposals/resource-management/admission_control_limit_range.md) and the [example of Limit Range](/docs/tasks/configure-pod-container/limit-range/) for more details.
|
||||
|
||||
### MutatingAdmissionWebhook {#mutatingadmissionwebhook} {{< feature-state for_k8s_version="v1.13" state="beta" >}}
|
||||
### MutatingAdmissionWebhook {#mutatingadmissionwebhook}
|
||||
|
||||
{{< feature-state for_k8s_version="v1.13" state="beta" >}}
|
||||
|
||||
This admission controller calls any mutating webhooks which match the request. Matching
|
||||
webhooks are called in serial; each one may modify the object if it desires.
|
||||
|
@ -537,7 +549,9 @@ This admission controller also protects the access to `metadata.ownerReferences[
|
|||
of an object, so that only users with "update" permission to the `finalizers`
|
||||
subresource of the referenced *owner* can change it.
|
||||
|
||||
### PersistentVolumeLabel {#persistentvolumelabel} {{< feature-state for_k8s_version="v1.13" state="deprecated" >}}
|
||||
### PersistentVolumeLabel {#persistentvolumelabel}
|
||||
|
||||
{{< feature-state for_k8s_version="v1.13" state="deprecated" >}}
|
||||
|
||||
This admission controller automatically attaches region or zone labels to PersistentVolumes
|
||||
as defined by the cloud provider (for example, GCE or AWS).
|
||||
|
@ -708,7 +722,9 @@ objects in your Kubernetes deployment, you MUST use this admission controller to
|
|||
|
||||
See the [resourceQuota design doc](https://git.k8s.io/community/contributors/design-proposals/resource-management/admission_control_resource_quota.md) and the [example of Resource Quota](/docs/concepts/policy/resource-quotas/) for more details.
|
||||
|
||||
### RuntimeClass {#runtimeclass} {{< feature-state for_k8s_version="v1.16" state="alpha" >}}
|
||||
### RuntimeClass {#runtimeclass}
|
||||
|
||||
{{< feature-state for_k8s_version="v1.16" state="alpha" >}}
|
||||
|
||||
For [RuntimeClass](/docs/concepts/containers/runtime-class/) definitions which describe an overhead associated with running a pod,
|
||||
this admission controller will set the pod.Spec.Overhead field accordingly.
|
||||
|
@ -729,11 +745,15 @@ We strongly recommend using this admission controller if you intend to make use
|
|||
|
||||
The `StorageObjectInUseProtection` plugin adds the `kubernetes.io/pvc-protection` or `kubernetes.io/pv-protection` finalizers to newly created Persistent Volume Claims (PVCs) or Persistent Volumes (PV). In case a user deletes a PVC or PV the PVC or PV is not removed until the finalizer is removed from the PVC or PV by PVC or PV Protection Controller. Refer to the [Storage Object in Use Protection](/docs/concepts/storage/persistent-volumes/#storage-object-in-use-protection) for more detailed information.
|
||||
|
||||
### TaintNodesByCondition {#taintnodesbycondition} {{< feature-state for_k8s_version="v1.12" state="beta" >}}
|
||||
### TaintNodesByCondition {#taintnodesbycondition}
|
||||
|
||||
{{< feature-state for_k8s_version="v1.12" state="beta" >}}
|
||||
|
||||
This admission controller {{< glossary_tooltip text="taints" term_id="taint" >}} newly created Nodes as `NotReady` and `NoSchedule`. That tainting avoids a race condition that could cause Pods to be scheduled on new Nodes before their taints were updated to accurately reflect their reported conditions.
|
||||
|
||||
### ValidatingAdmissionWebhook {#validatingadmissionwebhook} {{< feature-state for_k8s_version="v1.13" state="beta" >}}
|
||||
### ValidatingAdmissionWebhook {#validatingadmissionwebhook}
|
||||
|
||||
{{< feature-state for_k8s_version="v1.13" state="beta" >}}
|
||||
|
||||
This admission controller calls any validating webhooks which match the request. Matching
|
||||
webhooks are called in parallel; if any of them rejects the request, the request
|
||||
|
@ -773,6 +793,4 @@ phase, and therefore is the last admission controller to run.
|
|||
in the mutating phase.
|
||||
|
||||
For earlier versions, there was no concept of validating versus mutating and the
|
||||
admission controllers ran in the exact order specified.
|
||||
|
||||
|
||||
admission controllers ran in the exact order specified.
|
|
@ -26,6 +26,8 @@ even a file with a list of usernames and passwords. In this regard, _Kubernetes
|
|||
does not have objects which represent normal user accounts._ Normal users
|
||||
cannot be added to a cluster through an API call.
|
||||
|
||||
Even though normal user cannot be added via an API call, but any user that presents a valid certificate signed by the cluster’s certificate authority (CA) is considered authenticated. In this configuration, Kubernetes determines the username from the common name field in the ‘subject’ of the cert (e.g., “/CN=bob”). From there, the role based access control (RBAC) sub-system would determine whether the user is authorized to perform a specific operation a resource. You can refer to [creating user certificate request](/docs/reference/access-authn-authz/certificate-signing-requests/#user-csr) for more details about this.
|
||||
|
||||
In contrast, service accounts are users managed by the Kubernetes API. They are
|
||||
bound to specific namespaces, and created automatically by the API server or
|
||||
manually through API calls. Service accounts are tied to a set of credentials
|
||||
|
|
|
@ -48,7 +48,7 @@ The CertificateSigningRequest `status.certificate` field is empty until the sign
|
|||
|
||||
Once the `status.certificate` field has been populated, the request has been completed and clients can now
|
||||
fetch the signed certificate PEM data from the CertificateSigningRequest resource.
|
||||
Signers can instead deny certificate signing if the approval conditions are not met.
|
||||
The signers can instead deny certificate signing if the approval conditions are not met.
|
||||
|
||||
In order to reduce the number of old CertificateSigningRequest resources left in a cluster, a garbage collection
|
||||
controller runs periodically. The garbage collection removes CertificateSigningRequests that have not changed
|
||||
|
@ -67,10 +67,10 @@ This includes:
|
|||
1. **Permitted subjects**: any restrictions on and behavior when a disallowed subject is requested.
|
||||
1. **Permitted x509 extensions**: including IP subjectAltNames, DNS subjectAltNames, Email subjectAltNames, URI subjectAltNames etc, and behavior when a disallowed extension is requested.
|
||||
1. **Permitted key usages / extended key usages**: any restrictions on and behavior when usages different than the signer-determined usages are specified in the CSR.
|
||||
1. **Expiration/certificate lifetime**: whether it is fixed by the signer, configurable by the admin, determined by the CSR object etc and behavior if an expiration different than the signer-determined expiration is specified in the CSR.
|
||||
1. **Expiration/certificate lifetime**: whether it is fixed by the signer, configurable by the admin, determined by the CSR object etc and the behavior when an expiration is different than the signer-determined expiration that is specified in the CSR.
|
||||
1. **CA bit allowed/disallowed**: and behavior if a CSR contains a request a for a CA certificate when the signer does not permit it.
|
||||
|
||||
Commonly, the `status.certificate` field contains a single PEM-encoded X.509 certificate once the CSR is approved and the certificate is issued. Some signers store multiple certificates into the `status.certificate` field. In that case, the documentation for the signer should specify the meaning of additional certificates; for example, this might be certificate plus intermediates to be presented during TLS handshakes.
|
||||
Commonly, the `status.certificate` field contains a single PEM-encoded X.509 certificate once the CSR is approved and the certificate is issued. Some signers store multiple certificates into the `status.certificate` field. In that case, the documentation for the signer should specify the meaning of additional certificates; for example, this might be the certificate plus intermediates to be presented during TLS handshakes.
|
||||
|
||||
### Kubernetes signers
|
||||
|
||||
|
@ -88,19 +88,18 @@ Kubernetes provides built-in signers that each have a well-known `signerName`:
|
|||
1. `kubernetes.io/kube-apiserver-client-kubelet`: signs client certificates that will be honored as client-certs by the
|
||||
kube-apiserver.
|
||||
May be auto-approved by {{< glossary_tooltip term_id="kube-controller-manager" >}}.
|
||||
1. Trust distribution: signed certificates must be honored as client-certificates by the kube-apiserver. The CA bundle
|
||||
1. Trust distribution: signed certificates must be honored as client-certificates by the kube-apiserver. The CA bundle
|
||||
is not distributed by any other means.
|
||||
1. Permitted subjects - organizations are exactly `[]string{"system:nodes"}`, common name starts with `"system:node:"`
|
||||
1. Permitted x509 extensions - honors key usage extensions, forbids subjectAltName extensions, drops other extensions.
|
||||
1. Permitted x509 extensions - honors key usage extensions, forbids subjectAltName extensions and drops other extensions.
|
||||
1. Permitted key usages - exactly `[]string{"key encipherment", "digital signature", "client auth"}`
|
||||
1. Expiration/certificate lifetime - minimum of CSR signer or request. Sanity of the time is the concern of the signer.
|
||||
1. Expiration/certificate lifetime - minimum of CSR signer or request. The signer is responsible for checking that the certificate lifetime is valid and permissible.
|
||||
1. CA bit allowed/disallowed - not allowed.
|
||||
|
||||
1. `kubernetes.io/kubelet-serving`: signs serving certificates that are honored as a valid kubelet serving certificate
|
||||
by the kube-apiserver, but has no other guarantees.
|
||||
Never auto-approved by {{< glossary_tooltip term_id="kube-controller-manager" >}}.
|
||||
1. Trust distribution: signed certificates must be honored by the kube-apiserver as valid to terminate connections to a kubelet.
|
||||
The CA bundle is not distributed by any other means.
|
||||
1. Trust distribution: signed certificates must be honored by the kube-apiserver as valid to terminate connections to a kubelet. The CA bundle is not distributed by any other means.
|
||||
1. Permitted subjects - organizations are exactly `[]string{"system:nodes"}`, common name starts with `"system:node:"`
|
||||
1. Permitted x509 extensions - honors key usage and DNSName/IPAddress subjectAltName extensions, forbids EmailAddress and URI subjectAltName extensions, drops other extensions. At least one DNS or IP subjectAltName must be present.
|
||||
1. Permitted key usages - exactly `[]string{"key encipherment", "digital signature", "server auth"}`
|
||||
|
@ -108,13 +107,13 @@ Kubernetes provides built-in signers that each have a well-known `signerName`:
|
|||
1. CA bit allowed/disallowed - not allowed.
|
||||
|
||||
1. `kubernetes.io/legacy-unknown`: has no guarantees for trust at all. Some distributions may honor these as client
|
||||
certs, but that behavior is not standard Kubernetes behavior.
|
||||
certs, but that behavior is non-standard Kubernetes behavior.
|
||||
Never auto-approved by {{< glossary_tooltip term_id="kube-controller-manager" >}}.
|
||||
1. Trust distribution: None. There is no standard trust or distribution for this signer in a Kubernetes cluster.
|
||||
1. Permitted subjects - any
|
||||
1. Permitted x509 extensions - honors subjectAltName and key usage extensions and discards other extensions.
|
||||
1. Permitted key usages - any
|
||||
1. Expiration/certificate lifetime - minimum of CSR signer or request. Sanity of the time is the concern of the signer.
|
||||
1. Expiration/certificate lifetime - minimum of CSR signer or request. The signer is responsible for checking that the certificate lifetime is valid and permissible.
|
||||
1. CA bit allowed/disallowed - not allowed.
|
||||
|
||||
{{< note >}}
|
||||
|
@ -226,6 +225,101 @@ rules:
|
|||
- sign
|
||||
```
|
||||
|
||||
## Normal User
|
||||
|
||||
There are a few steps are required in order to get normal user to be able to authenticate and invoke API. First, this user must have certificate issued by the Kubernetes Cluster, and then present that Certificate into the API call as the Certificate Header, or through the kubectl.
|
||||
|
||||
### Create Private Key
|
||||
|
||||
The following scripts show how to generate PKI private key and CSR. It is important to set CN and O attribute of the CSR. CN is the name of the user and O is the group that this user will belong to. You can refer to [RBAC](/docs/reference/access-authn-authz/rbac/) for standard groups.
|
||||
|
||||
```
|
||||
openssl genrsa -out john.key 2048
|
||||
openssl req -new -key john.key -out john.csr
|
||||
```
|
||||
|
||||
### Create Certificate Request Kubernetes Object
|
||||
|
||||
Create a CertificateSigningRequest and submit it to a Kubernetes Cluster via kubectl. Below is a script to generate the CertificateSigningRequest.
|
||||
|
||||
```
|
||||
cat <<EOF | kubectl apply -f -
|
||||
apiVersion: certificates.k8s.io/v1beta1
|
||||
kind: CertificateSigningRequest
|
||||
metadata:
|
||||
name: john
|
||||
spec:
|
||||
groups:
|
||||
- system:authenticated
|
||||
request: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURSBSRVFVRVNULS0tLS0KTUlJQ1ZqQ0NBVDRDQVFBd0VURVBNQTBHQTFVRUF3d0dZVzVuWld4aE1JSUJJakFOQmdrcWhraUc5dzBCQVFFRgpBQU9DQVE4QU1JSUJDZ0tDQVFFQTByczhJTHRHdTYxakx2dHhWTTJSVlRWMDNHWlJTWWw0dWluVWo4RElaWjBOCnR2MUZtRVFSd3VoaUZsOFEzcWl0Qm0wMUFSMkNJVXBGd2ZzSjZ4MXF3ckJzVkhZbGlBNVhwRVpZM3ExcGswSDQKM3Z3aGJlK1o2MVNrVHF5SVBYUUwrTWM5T1Nsbm0xb0R2N0NtSkZNMUlMRVI3QTVGZnZKOEdFRjJ6dHBoaUlFMwpub1dtdHNZb3JuT2wzc2lHQ2ZGZzR4Zmd4eW8ybmlneFNVekl1bXNnVm9PM2ttT0x1RVF6cXpkakJ3TFJXbWlECklmMXBMWnoyalVnald4UkhCM1gyWnVVV1d1T09PZnpXM01LaE8ybHEvZi9DdS8wYk83c0x0MCt3U2ZMSU91TFcKcW90blZtRmxMMytqTy82WDNDKzBERHk5aUtwbXJjVDBnWGZLemE1dHJRSURBUUFCb0FBd0RRWUpLb1pJaHZjTgpBUUVMQlFBRGdnRUJBR05WdmVIOGR4ZzNvK21VeVRkbmFjVmQ1N24zSkExdnZEU1JWREkyQTZ1eXN3ZFp1L1BVCkkwZXpZWFV0RVNnSk1IRmQycVVNMjNuNVJsSXJ3R0xuUXFISUh5VStWWHhsdnZsRnpNOVpEWllSTmU3QlJvYXgKQVlEdUI5STZXT3FYbkFvczFqRmxNUG5NbFpqdU5kSGxpT1BjTU1oNndLaTZzZFhpVStHYTJ2RUVLY01jSVUyRgpvU2djUWdMYTk0aEpacGk3ZnNMdm1OQUxoT045UHdNMGM1dVJVejV4T0dGMUtCbWRSeEgvbUNOS2JKYjFRQm1HCkkwYitEUEdaTktXTU0xMzhIQXdoV0tkNjVoVHdYOWl4V3ZHMkh4TG1WQzg0L1BHT0tWQW9FNkpsYWFHdTlQVmkKdjlOSjVaZlZrcXdCd0hKbzZXdk9xVlA3SVFjZmg3d0drWm89Ci0tLS0tRU5EIENFUlRJRklDQVRFIFJFUVVFU1QtLS0tLQo=
|
||||
usages:
|
||||
- client auth
|
||||
EOF
|
||||
```
|
||||
|
||||
Some points to note:
|
||||
|
||||
- usage has to be 'client auth'
|
||||
- request is the base64 encoded value of the CSR file content. You can use this command to get that ```cat john.csr | base64 | tr -d "\n"```
|
||||
|
||||
### Approve Certificate Request
|
||||
|
||||
Use kubeadmin to create a CSR and approve it.
|
||||
|
||||
Get the list of CSRs
|
||||
```
|
||||
kubectl get csr
|
||||
```
|
||||
|
||||
Approve the CSR
|
||||
```
|
||||
kubectl certificate approve john
|
||||
```
|
||||
|
||||
### Get the Certificate
|
||||
|
||||
Retrieve the Certificate from the CSR.
|
||||
|
||||
```
|
||||
kubectl get csr/john -o yaml
|
||||
```
|
||||
|
||||
The Certificate value is in Base64-encoded format under status.certificate.
|
||||
|
||||
### Create Role and Role Binding
|
||||
|
||||
You get the Certificate already. Now it is time to define the Role and Role Binding for this user to access Kubernetes Cluster resources.
|
||||
|
||||
This is a sample script to create role for this new user
|
||||
```
|
||||
kubectl create role developer --verb=create --verb=get --verb=list --verb=update --verb=delete --resource=pods
|
||||
```
|
||||
|
||||
This is a sample script to create role binding for this new user
|
||||
```
|
||||
kubectl create rolebinding developer-binding-john --role=developer --user=john
|
||||
```
|
||||
|
||||
### Add to KubeConfig
|
||||
|
||||
The last step is to add this user into the KubeConfig. We assume the key and crt files are located here "/home/vagrant/work/".
|
||||
|
||||
First, we need to add new credentials
|
||||
```
|
||||
kubectl config set-credentials john --client-key=/home/vagrant/work/john.key --client-certificate=/home/vagrant/work/john.crt --embed-certs=true
|
||||
|
||||
```
|
||||
|
||||
Then, we need to add the context
|
||||
```
|
||||
kubectl config set-context john --cluster=kubernetes --user=john
|
||||
```
|
||||
|
||||
To test it, change kubecontext to john
|
||||
```
|
||||
kubectl config use-context john
|
||||
```
|
||||
|
||||
## Approval & rejection
|
||||
|
||||
### Control plane automated approval {#approval-rejection-control-plane}
|
||||
|
|
|
@ -137,7 +137,8 @@ different Kubernetes components.
|
|||
| `ServerSideApply` | `true` | Beta | 1.16 | |
|
||||
| `ServiceNodeExclusion` | `false` | Alpha | 1.8 | |
|
||||
| `ServiceTopology` | `false` | Alpha | 1.17 | |
|
||||
| `StartupProbe` | `false` | Alpha | 1.16 | |
|
||||
| `StartupProbe` | `false` | Alpha | 1.16 | 1.17 |
|
||||
| `StartupProbe` | `true` | Beta | 1.18 | |
|
||||
| `StorageVersionHash` | `false` | Alpha | 1.14 | 1.14 |
|
||||
| `StorageVersionHash` | `true` | Beta | 1.15 | |
|
||||
| `StreamingProxyRedirects` | `false` | Beta | 1.5 | 1.5 |
|
||||
|
|
|
@ -290,10 +290,10 @@ kubectl logs -f my-pod # stream pod logs (stdout)
|
|||
kubectl logs -f my-pod -c my-container # stream pod container logs (stdout, multi-container case)
|
||||
kubectl logs -f -l name=myLabel --all-containers # stream all pods logs with label name=myLabel (stdout)
|
||||
kubectl run -i --tty busybox --image=busybox -- sh # Run pod as interactive shell
|
||||
kubectl run nginx --image=nginx --restart=Never -n
|
||||
kubectl run nginx --image=nginx -n
|
||||
mynamespace # Run pod nginx in a specific namespace
|
||||
kubectl run nginx --image=nginx --restart=Never # Run pod nginx and write its spec into a file called pod.yaml
|
||||
--dry-run -o yaml > pod.yaml
|
||||
kubectl run nginx --image=nginx # Run pod nginx and write its spec into a file called pod.yaml
|
||||
--dry-run=client -o yaml > pod.yaml
|
||||
|
||||
kubectl attach my-pod -i # Attach to Running Container
|
||||
kubectl port-forward my-pod 5000:6000 # Listen on port 5000 on the local machine and forward to port 6000 on my-pod
|
||||
|
|
|
@ -76,9 +76,6 @@ kubectl run [-i] [--tty] --attach <name> --image=<image>
|
|||
Unlike `docker run ...`, if you specify `--attach`, then you attach `stdin`, `stdout` and `stderr`. You cannot control which streams are attached (`docker -a ...`).
|
||||
To detach from the container, you can type the escape sequence Ctrl+P followed by Ctrl+Q.
|
||||
|
||||
Because the kubectl run command starts a Deployment for the container, the Deployment restarts if you terminate the attached process by using Ctrl+C, unlike `docker run -it`.
|
||||
To destroy the Deployment and its pods you need to run `kubectl delete deployment <name>`.
|
||||
|
||||
## docker ps
|
||||
|
||||
To list what is currently running, see [kubectl get](/docs/reference/generated/kubectl/kubectl-commands/#get).
|
||||
|
@ -191,7 +188,7 @@ docker exec -ti 55c103fa1296 /bin/sh
|
|||
kubectl:
|
||||
|
||||
```shell
|
||||
kubectl exec -ti nginx-app-5jyvm -- /bin/sh
|
||||
kubectl exec -ti nginx-app-5jyvm -- /bin/sh
|
||||
# exit
|
||||
```
|
||||
|
||||
|
|
|
@ -59,7 +59,7 @@ kubeadm join phase kubelet-start --help
|
|||
```
|
||||
|
||||
Similar to the [kubeadm init phase](/docs/reference/setup-tools/kubeadm/kubeadm-init/#init-phases)
|
||||
command, `kubadm join phase` allows you to skip a list of phases using the `--skip-phases` flag.
|
||||
command, `kubeadm join phase` allows you to skip a list of phases using the `--skip-phases` flag.
|
||||
|
||||
For example:
|
||||
|
||||
|
|
|
@ -374,16 +374,19 @@ systemctl restart containerd
|
|||
## Set up the repository
|
||||
### Install required packages
|
||||
yum install -y yum-utils device-mapper-persistent-data lvm2
|
||||
```
|
||||
|
||||
```shell
|
||||
## Add docker repository
|
||||
yum-config-manager \
|
||||
--add-repo \
|
||||
https://download.docker.com/linux/centos/docker-ce.repo
|
||||
```
|
||||
|
||||
```shell
|
||||
## Install containerd
|
||||
yum update -y && yum install -y containerd.io
|
||||
```
|
||||
|
||||
```shell
|
||||
## Configure containerd
|
||||
|
|
|
@ -118,7 +118,7 @@ While `--apiserver-advertise-address` can be used to set the advertise address f
|
|||
control-plane node's API server, `--control-plane-endpoint` can be used to set the shared endpoint
|
||||
for all control-plane nodes.
|
||||
|
||||
`--control-plane-endpoint` allows IP addresses but also DNS names that can map to IP addresses.
|
||||
`--control-plane-endpoint` allows both IP addresses and DNS names that can map to IP addresses.
|
||||
Please contact your network administrator to evaluate possible solutions with respect to such mapping.
|
||||
|
||||
Here is an example mapping:
|
||||
|
@ -531,10 +531,9 @@ Talking to the control-plane node with the appropriate credentials, run:
|
|||
|
||||
```bash
|
||||
kubectl drain <node name> --delete-local-data --force --ignore-daemonsets
|
||||
kubectl delete node <node name>
|
||||
```
|
||||
|
||||
Then, on the node being removed, reset all `kubeadm` installed state:
|
||||
Before removing the node, reset the state installed by `kubeadm`:
|
||||
|
||||
```bash
|
||||
kubeadm reset
|
||||
|
@ -552,6 +551,11 @@ If you want to reset the IPVS tables, you must run the following command:
|
|||
ipvsadm -C
|
||||
```
|
||||
|
||||
Now remove the node:
|
||||
```bash
|
||||
kubectl delete node <node name>
|
||||
```
|
||||
|
||||
If you wish to start over simply run `kubeadm init` or `kubeadm join` with the
|
||||
appropriate arguments.
|
||||
|
||||
|
|
|
@ -191,7 +191,7 @@ sudo apt-mark hold kubelet kubeadm kubectl
|
|||
{{% /tab %}}
|
||||
{{% tab name="CentOS, RHEL or Fedora" %}}
|
||||
```bash
|
||||
cat <<EOF > /etc/yum.repos.d/kubernetes.repo
|
||||
cat <<EOF | sudo tee /etc/yum.repos.d/kubernetes.repo
|
||||
[kubernetes]
|
||||
name=Kubernetes
|
||||
baseurl=https://packages.cloud.google.com/yum/repos/kubernetes-el7-\$basearch
|
||||
|
@ -203,12 +203,12 @@ exclude=kubelet kubeadm kubectl
|
|||
EOF
|
||||
|
||||
# Set SELinux in permissive mode (effectively disabling it)
|
||||
setenforce 0
|
||||
sed -i 's/^SELINUX=enforcing$/SELINUX=permissive/' /etc/selinux/config
|
||||
sudo setenforce 0
|
||||
sudo sed -i 's/^SELINUX=enforcing$/SELINUX=permissive/' /etc/selinux/config
|
||||
|
||||
yum install -y kubelet kubeadm kubectl --disableexcludes=kubernetes
|
||||
sudo yum install -y kubelet kubeadm kubectl --disableexcludes=kubernetes
|
||||
|
||||
systemctl enable --now kubelet
|
||||
sudo systemctl enable --now kubelet
|
||||
```
|
||||
|
||||
**Notes:**
|
||||
|
@ -220,37 +220,41 @@ systemctl enable --now kubelet
|
|||
- You can leave SELinux enabled if you know how to configure it but it may require settings that are not supported by kubeadm.
|
||||
|
||||
{{% /tab %}}
|
||||
{{% tab name="Container Linux" %}}
|
||||
{{% tab name="Fedora CoreOS" %}}
|
||||
Install CNI plugins (required for most pod network):
|
||||
|
||||
```bash
|
||||
CNI_VERSION="v0.8.2"
|
||||
mkdir -p /opt/cni/bin
|
||||
curl -L "https://github.com/containernetworking/plugins/releases/download/${CNI_VERSION}/cni-plugins-linux-amd64-${CNI_VERSION}.tgz" | tar -C /opt/cni/bin -xz
|
||||
sudo mkdir -p /opt/cni/bin
|
||||
curl -L "https://github.com/containernetworking/plugins/releases/download/${CNI_VERSION}/cni-plugins-linux-amd64-${CNI_VERSION}.tgz" | sudo tar -C /opt/cni/bin -xz
|
||||
```
|
||||
|
||||
Define the directory to download command files
|
||||
|
||||
```bash
|
||||
DOWNLOAD_DIR=/usr/local/bin
|
||||
sudo mkdir -p $DOWNLOAD_DIR
|
||||
```
|
||||
|
||||
Install crictl (required for kubeadm / Kubelet Container Runtime Interface (CRI))
|
||||
|
||||
```bash
|
||||
CRICTL_VERSION="v1.17.0"
|
||||
mkdir -p /opt/bin
|
||||
curl -L "https://github.com/kubernetes-sigs/cri-tools/releases/download/${CRICTL_VERSION}/crictl-${CRICTL_VERSION}-linux-amd64.tar.gz" | tar -C /opt/bin -xz
|
||||
curl -L "https://github.com/kubernetes-sigs/cri-tools/releases/download/${CRICTL_VERSION}/crictl-${CRICTL_VERSION}-linux-amd64.tar.gz" | sudo tar -C $DOWNLOAD_DIR -xz
|
||||
```
|
||||
|
||||
Install `kubeadm`, `kubelet`, `kubectl` and add a `kubelet` systemd service:
|
||||
|
||||
```bash
|
||||
RELEASE="$(curl -sSL https://dl.k8s.io/release/stable.txt)"
|
||||
|
||||
mkdir -p /opt/bin
|
||||
cd /opt/bin
|
||||
curl -L --remote-name-all https://storage.googleapis.com/kubernetes-release/release/${RELEASE}/bin/linux/amd64/{kubeadm,kubelet,kubectl}
|
||||
chmod +x {kubeadm,kubelet,kubectl}
|
||||
cd $DOWNLOAD_DIR
|
||||
sudo curl -L --remote-name-all https://storage.googleapis.com/kubernetes-release/release/${RELEASE}/bin/linux/amd64/{kubeadm,kubelet,kubectl}
|
||||
sudo chmod +x {kubeadm,kubelet,kubectl}
|
||||
|
||||
RELEASE_VERSION="v0.2.7"
|
||||
curl -sSL "https://raw.githubusercontent.com/kubernetes/release/${RELEASE_VERSION}/cmd/kubepkg/templates/latest/deb/kubelet/lib/systemd/system/kubelet.service" | sed "s:/usr/bin:/opt/bin:g" > /etc/systemd/system/kubelet.service
|
||||
mkdir -p /etc/systemd/system/kubelet.service.d
|
||||
curl -sSL "https://raw.githubusercontent.com/kubernetes/release/${RELEASE_VERSION}/cmd/kubepkg/templates/latest/deb/kubeadm/10-kubeadm.conf" | sed "s:/usr/bin:/opt/bin:g" > /etc/systemd/system/kubelet.service.d/10-kubeadm.conf
|
||||
curl -sSL "https://raw.githubusercontent.com/kubernetes/release/${RELEASE_VERSION}/cmd/kubepkg/templates/latest/deb/kubelet/lib/systemd/system/kubelet.service" | sed "s:/usr/bin:${DOWNLOAD_DIR}:g" | sudo tee /etc/systemd/system/kubelet.service
|
||||
sudo mkdir -p /etc/systemd/system/kubelet.service.d
|
||||
curl -sSL "https://raw.githubusercontent.com/kubernetes/release/${RELEASE_VERSION}/cmd/kubepkg/templates/latest/deb/kubeadm/10-kubeadm.conf" | sed "s:/usr/bin:${DOWNLOAD_DIR}:g" | sudo tee /etc/systemd/system/kubelet.service.d/10-kubeadm.conf
|
||||
```
|
||||
|
||||
Enable and start `kubelet`:
|
||||
|
|
|
@ -173,7 +173,7 @@ Environment="KUBELET_CONFIG_ARGS=--config=/var/lib/kubelet/config.yaml"
|
|||
the KUBELET_KUBEADM_ARGS variable dynamically
|
||||
EnvironmentFile=-/var/lib/kubelet/kubeadm-flags.env
|
||||
# This is a file that the user can use for overrides of the kubelet args as a last resort. Preferably,
|
||||
#the user should use the .NodeRegistration.KubeletExtraArgs object in the configuration files instead.
|
||||
# the user should use the .NodeRegistration.KubeletExtraArgs object in the configuration files instead.
|
||||
# KUBELET_EXTRA_ARGS should be sourced from this file.
|
||||
EnvironmentFile=-/etc/default/kubelet
|
||||
ExecStart=
|
||||
|
@ -198,9 +198,8 @@ The DEB and RPM packages shipped with the Kubernetes releases are:
|
|||
| Package name | Description |
|
||||
|--------------|-------------|
|
||||
| `kubeadm` | Installs the `/usr/bin/kubeadm` CLI tool and the [kubelet drop-in file](#the-kubelet-drop-in-file-for-systemd) for the kubelet. |
|
||||
| `kubelet` | Installs the `/usr/bin/kubelet` binary. |
|
||||
| `kubelet` | Installs the kubelet binary in `/usr/bin` and CNI binaries in `/opt/cni/bin`. |
|
||||
| `kubectl` | Installs the `/usr/bin/kubectl` binary. |
|
||||
| `kubernetes-cni` | Installs the official CNI binaries into the `/opt/cni/bin` directory. |
|
||||
| `cri-tools` | Installs the `/usr/bin/crictl` binary from the [cri-tools git repository](https://github.com/kubernetes-incubator/cri-tools). |
|
||||
|
||||
|
||||
|
|
|
@ -35,7 +35,7 @@ Provision servers with the following [requirements](https://github.com/kubernete
|
|||
|
||||
* **Ansible v2.7.8 and python-netaddr is installed on the machine that will run Ansible commands**
|
||||
* **Jinja 2.9 (or newer) is required to run the Ansible Playbooks**
|
||||
* The target servers must have access to the Internet in order to pull docker images. Otherwise, additional configuration is required ([See Offline Environment](https://github.com/kubernetes-sigs/kubespray/blob/master/docs/downloads.md#offline-environment))
|
||||
* The target servers must have access to the Internet in order to pull docker images. Otherwise, additional configuration is required ([See Offline Environment](https://github.com/kubernetes-sigs/kubespray/blob/master/docs/offline-environment.md))
|
||||
* The target servers are configured to allow **IPv4 forwarding**
|
||||
* **Your ssh key must be copied** to all the servers part of your inventory
|
||||
* The **firewalls are not managed**, you'll need to implement your own rules the way you used to. in order to avoid any issue during deployment you should disable your firewall
|
||||
|
|
|
@ -23,9 +23,7 @@ To create a Kubernetes cluster on AWS, you will need an Access Key ID and a Secr
|
|||
|
||||
* [Kubernetes Operations](https://github.com/kubernetes/kops) - Production Grade K8s Installation, Upgrades, and Management. Supports running Debian, Ubuntu, CentOS, and RHEL in AWS.
|
||||
|
||||
* [CoreOS Tectonic](https://coreos.com/tectonic/) includes the open-source [Tectonic Installer](https://github.com/coreos/tectonic-installer) that creates Kubernetes clusters with Container Linux nodes on AWS.
|
||||
|
||||
* CoreOS originated and the Kubernetes Incubator maintains [a CLI tool, kube-aws](https://github.com/kubernetes-incubator/kube-aws), that creates and manages Kubernetes clusters with [Container Linux](https://coreos.com/why/) nodes, using AWS tools: EC2, CloudFormation and Autoscaling.
|
||||
* [kube-aws](https://github.com/kubernetes-incubator/kube-aws), creates and manages Kubernetes clusters with [Flatcar Linux](https://www.flatcar-linux.org/) nodes, using AWS tools: EC2, CloudFormation and Autoscaling.
|
||||
|
||||
* [KubeOne](https://github.com/kubermatic/kubeone) is an open source cluster lifecycle management tool that creates, upgrades and manages Kubernetes Highly-Available clusters.
|
||||
|
||||
|
|
|
@ -101,12 +101,12 @@ If needed, you can expand the **Advanced options** section where you can specify
|
|||
|
||||
Example:
|
||||
|
||||
```conf
|
||||
release=1.0
|
||||
tier=frontend
|
||||
environment=pod
|
||||
track=stable
|
||||
```
|
||||
```conf
|
||||
release=1.0
|
||||
tier=frontend
|
||||
environment=pod
|
||||
track=stable
|
||||
```
|
||||
|
||||
- **Namespace**: Kubernetes supports multiple virtual clusters backed by the same physical cluster. These virtual clusters are called [namespaces](/docs/tasks/administer-cluster/namespaces/). They let you partition resources into logically named groups.
|
||||
|
||||
|
|
|
@ -24,7 +24,7 @@ Depending on the installation method, your Kubernetes cluster may be deployed wi
|
|||
an existing StorageClass that is marked as default. This default StorageClass
|
||||
is then used to dynamically provision storage for PersistentVolumeClaims
|
||||
that do not require any specific storage class. See
|
||||
[PersistentVolumeClaim documentation](/docs/concepts/storage/persistent-volumes/#class-1)
|
||||
[PersistentVolumeClaim documentation](/docs/concepts/storage/persistent-volumes/#persistentvolumeclaims)
|
||||
for details.
|
||||
|
||||
The pre-installed default StorageClass may not fit well with your expected workload;
|
||||
|
|
|
@ -36,7 +36,7 @@ By default, the kubelet uses [CFS quota](https://en.wikipedia.org/wiki/Completel
|
|||
to enforce pod CPU limits. When the node runs many CPU-bound pods,
|
||||
the workload can move to different CPU cores depending on
|
||||
whether the pod is throttled and which CPU cores are available at
|
||||
scheduling time. Many workloads are not sensitive to this migration and thus
|
||||
scheduling time. Many workloads are not sensitive to this migration and thus
|
||||
work fine without any intervention.
|
||||
|
||||
However, in workloads where CPU cache affinity and scheduling latency
|
||||
|
|
|
@ -82,6 +82,10 @@ See the [design doc](https://git.k8s.io/community/contributors/design-proposals/
|
|||
|
||||
## Creating a new namespace
|
||||
|
||||
{{< note >}}
|
||||
Avoid creating namespace with prefix `kube-`, since it is reserved for Kubernetes system namespaces.
|
||||
{{< /note >}}
|
||||
|
||||
1. Create a new YAML file called `my-namespace.yaml` with the contents:
|
||||
|
||||
```yaml
|
||||
|
|
|
@ -57,7 +57,8 @@ The following sysctls are supported in the _safe_ set:
|
|||
|
||||
- `kernel.shm_rmid_forced`,
|
||||
- `net.ipv4.ip_local_port_range`,
|
||||
- `net.ipv4.tcp_syncookies`.
|
||||
- `net.ipv4.tcp_syncookies`,
|
||||
- `net.ipv4.ping_group_range` (since Kubernetes 1.18).
|
||||
|
||||
{{< note >}}
|
||||
The example `net.ipv4.tcp_syncookies` is not namespaced on Linux kernel version 4.4 or lower.
|
||||
|
|
|
@ -332,7 +332,7 @@ to 1 second. Minimum value is 1.
|
|||
* `successThreshold`: Minimum consecutive successes for the probe to be
|
||||
considered successful after having failed. Defaults to 1. Must be 1 for
|
||||
liveness. Minimum value is 1.
|
||||
* `failureThreshold`: When a Pod starts and the probe fails, Kubernetes will
|
||||
* `failureThreshold`: When a probe fails, Kubernetes will
|
||||
try `failureThreshold` times before giving up. Giving up in case of liveness probe means restarting the container. In case of readiness probe the Pod will be marked Unready.
|
||||
Defaults to 3. Minimum value is 1.
|
||||
|
||||
|
|
|
@ -323,7 +323,7 @@ The application is responsible for reloading the token when it rotates. Periodic
|
|||
{{< feature-state for_k8s_version="v1.18" state="alpha" >}}
|
||||
|
||||
The Service Account Issuer Discovery feature is enabled by enabling the
|
||||
`ServiceAccountIssuerDiscovery` [feature gate](/docs/reference/command-line-tools-reference/feature)
|
||||
`ServiceAccountIssuerDiscovery` [feature gate](/docs/reference/command-line-tools-reference/feature-gates)
|
||||
and then enabling the Service Account Token Projection feature as described
|
||||
[above](#service-account-token-volume-projection).
|
||||
|
||||
|
|
|
@ -116,6 +116,6 @@ For further details, see [Kubernetes Audit Events][falco_ka_docs] in the Falco d
|
|||
[falco_k8s_audit_rules]: https://github.com/falcosecurity/falco/blob/master/rules/k8s_audit_rules.yaml
|
||||
[falco_ka_docs]: https://falco.org/docs/event-sources/kubernetes-audit
|
||||
[falco_installation]: https://falco.org/docs/installation
|
||||
[falco_helm_chart]: https://github.com/helm/charts/tree/master/stable/falco
|
||||
[falco_helm_chart]: https://github.com/falcosecurity/charts/tree/master/falco
|
||||
|
||||
|
||||
|
|
|
@ -4,6 +4,7 @@ reviewers:
|
|||
- madhusudancs
|
||||
title: Configure Multiple Schedulers
|
||||
content_type: task
|
||||
weight: 20
|
||||
---
|
||||
|
||||
<!-- overview -->
|
|
@ -181,7 +181,8 @@ are preserved as annotations when working with `autoscaling/v1`.
|
|||
When you create a HorizontalPodAutoscaler API object, make sure the name specified is a valid
|
||||
[DNS subdomain name](/docs/concepts/overview/working-with-objects/names#dns-subdomain-names).
|
||||
More details about the API object can be found at
|
||||
[HorizontalPodAutoscaler Object](https://git.k8s.io/community/contributors/design-proposals/autoscaling/horizontal-pod-autoscaler.md#horizontalpodautoscaler-object).
|
||||
[HorizontalPodAutoscaler Object](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#horizontalpodautoscaler-v1-autoscaling).
|
||||
|
||||
|
||||
## Support for Horizontal Pod Autoscaler in kubectl
|
||||
|
||||
|
|
|
@ -70,7 +70,7 @@ Before walking through each tutorial, you may want to bookmark the
|
|||
|
||||
|
||||
If you would like to write a tutorial, see
|
||||
[Content Page Types](/docs/home/contribute/style/page-content-types/)
|
||||
[Content Page Types](/docs/contribute/style/page-content-types/)
|
||||
for information about the tutorial page type.
|
||||
|
||||
|
||||
|
|
|
@ -93,7 +93,7 @@ Use `kubectl exec` to enter the pod and run the `redis-cli` tool to verify that
|
|||
the configuration was correctly applied:
|
||||
|
||||
```shell
|
||||
kubectl exec -it redis redis-cli
|
||||
kubectl exec -it redis -- redis-cli
|
||||
127.0.0.1:6379> CONFIG GET maxmemory
|
||||
1) "maxmemory"
|
||||
2) "2097152"
|
||||
|
|
|
@ -40,7 +40,7 @@ weight: 10
|
|||
</ul>
|
||||
</div>
|
||||
<div class="content__box content__box_fill">
|
||||
<p><i> You can create from the start a Deployment with multiple instances using the --replicas parameter for the kubectl run command </i></p>
|
||||
<p><i> You can create from the start a Deployment with multiple instances using the --replicas parameter for the kubectl create deployment command </i></p>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
|
|
@ -190,8 +190,8 @@ Now you can verify that all objects exist.
|
|||
The response should be like this:
|
||||
|
||||
```
|
||||
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
|
||||
wordpress ClusterIP 10.0.0.89 <pending> 80:32406/TCP 4m
|
||||
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
|
||||
wordpress LoadBalancer 10.0.0.89 <pending> 80:32406/TCP 4m
|
||||
```
|
||||
|
||||
{{< note >}}
|
||||
|
|
|
@ -6,13 +6,13 @@ weight: 30
|
|||
|
||||
<!-- overview -->
|
||||
|
||||
El concepto del Cloud Controller Manager (CCM) (no confundir con el ejecutable) fue creado originalmente para permitir que Kubernetes y el código específico de proveedores de servicios en la nube evolucionasen de forma independiente. El Cloud Controller Manager se ejecuta a la par con otros componentes maestros como el Kubernetes Controller Manager, el API Server y el planificador. También puede ejecutarse como un extra, en cuyo caso se ejecuta por encima de Kubernetes.
|
||||
El concepto del Cloud Controller Manager (CCM) (no confundir con el ejecutable) fue creado originalmente para permitir que Kubernetes y el código específico de proveedores de servicios en la nube evolucionen de forma independiente. El Cloud Controller Manager se ejecuta a la par con otros componentes maestros como el Kubernetes Controller Manager, el API Server y el planificador. También puede ejecutarse como un extra, en cuyo caso se ejecuta por encima de Kubernetes.
|
||||
|
||||
El diseño del Cloud Controller Manager está basado en un sistema de plugins, lo que permite a nuevos proveedores de servicios integrarse de forma fácil con Kubernetes. Se está trabajando en incorporar nuevos proveedores de servicios y para migrar los existentes del viejo modelo al nuevo CCM.
|
||||
El diseño del Cloud Controller Manager está basado en un sistema de plugins, lo que permite a nuevos proveedores de servicios integrarse de forma fácil con Kubernetes. Se está trabajando en implementar nuevos proveedores de servicios y para migrar los existentes del antiguo modelo al nuevo CCM.
|
||||
|
||||
Este documento describe los conceptos tras el Cloud Controller Manager y da detalles sobre sus funciones asociadas.
|
||||
Este documento describe los conceptos tras el Cloud Controller Manager y detalla sus funciones asociadas.
|
||||
|
||||
En la siguiente imagen, se puede ver la arquitectura de un cluster de Kubernetes que no utiliza el Cloud Controller Manager:
|
||||
En la siguiente imagen, se puede visualizar la arquitectura de un cluster de Kubernetes que no utiliza el Cloud Controller Manager:
|
||||
|
||||
![Arquitectura previa a CCM](/images/docs/pre-ccm-arch.png)
|
||||
|
||||
|
@ -235,4 +235,3 @@ Los siguientes proveedores de servicios en la nube han implementado CCMs:
|
|||
|
||||
Instrucciones para configurar y ejecutar el CCM pueden encontrarse [aquí](/docs/tasks/administer-cluster/running-cloud-controller/#cloud-controller-manager).
|
||||
|
||||
|
||||
|
|
|
@ -111,7 +111,7 @@ El controlador juega múltiples papeles en la vida de un nodo. El primero es asi
|
|||
|
||||
El segundo es mantener actualizada la lista interna del controlador con la lista de máquinas disponibles a través del proveedor de servicios en la nube. Cuando Kubernetes se ejecuta en la nube, si un nodo deja de responder, el controlador del nodo preguntará al proveedor si la máquina virtual de dicho nodo continúa estando disponible. Si no lo está, el controlador borrará dicho nodo de su lista interna.
|
||||
|
||||
El tercero es el de monitorizar la salud de los nodos. El controlador de nodos es el responsable de actualizar la condición `NodeReady` del campo `NodeStatus` a `ConditionUnknown` cuando un nodo deja de estar accesible (por ejemplo, si deja de recibir señales de vida del nodo indicando que está disponible, conocidas como latidos o `hearbeats` en inglés) y, también es responsable de posteriormente desalojar todos los pods del nodo si este continúa estando inalcanzable. Por defecto, cuando un nodo deja de responder, el controlador sigue re-intentando contactar con el nodo durante 40 segundos antes de marcar el nodo con `ConditionUnknown` y, si el nodo no se recupera de ese estado pasados 5 minutos, empezará a drenar los pods del nodo para desplegarlos en otro nodo que esté disponible. El controlador comprueba el estado de cada nodo cada `--node-monitor-period` segundos.
|
||||
El tercero es el de monitorizar la salud de los nodos. El controlador de nodos es el responsable de actualizar la condición `NodeReady` del campo `NodeStatus` a `ConditionUnknown` cuando un nodo deja de estar accesible (por ejemplo, si deja de recibir señales de vida del nodo indicando que está disponible, conocidas como latidos o `hearbeats` en inglés) y, también es responsable de posteriormente desalojar todos los pods del nodo si este continúa estando inalcanzable. Por defecto, cuando un nodo deja de responder, el controlador sigue reintentando contactar con el nodo durante 40 segundos antes de marcar el nodo con `ConditionUnknown` y, si el nodo no se recupera de ese estado pasados 5 minutos, empezará a drenar los pods del nodo para desplegarlos en otro nodo que esté disponible. El controlador comprueba el estado de cada nodo cada `--node-monitor-period` segundos.
|
||||
|
||||
En versiones de Kubernetes previas a 1.13, `NodeStatus` es el `heartbeat` del nodo. Empezando con 1.13 la funcionalidad de `node lease` se introduce como alfa (`NodeLease`,
|
||||
[KEP-0009](https://github.com/kubernetes/enhancements/blob/master/keps/sig-node/0009-node-heartbeat.md)). Cuando la funcionalidad está habilitada, cada nodo tiene un objeto `Lease` asociado en el namespace `kube-node-lease` que se renueva periódicamente y ambos, el `NodeStatus` y el `Lease` son considerados como `hearbeats` del nodo. `Node leases` se renuevan con frecuencia, mientras que `NodeStatus` se transmite desde el nodo al máster únicamente si hay cambios o si ha pasado cierto tiempo (por defecto, 1 minuto, que es más que la cuenta atrás por defecto de 40 segundos que marca un nodo como inalcanzable). Al ser los `node lease` más ligeros que `NodeStatus`, los `hearbeats` resultan más económicos desde las perspectivas de escalabilidad y de rendimiento.
|
||||
|
@ -123,7 +123,7 @@ En la mayoría de los casos, el controlador de nodos limita el ritmo de desalojo
|
|||
El comportamiento de desalojo de nodos cambia cuando un nodo en una zona de disponibilidad tiene problemas. El controlador de nodos comprobará qué porcentaje de nodos en la zona no se encuentran en buen estado (es decir, que su condición `NodeReady` tiene un valor `ConditionUnknown` o `ConditionFalse`) al mismo tiempo. Si la fracción de nodos con problemas es de al menos `--unhealthy-zone-threshold` (0.55 por defecto) entonces se reduce el ratio de desalojos: si el clúster es pequeño (por ejemplo, tiene menos o los mismos nodos que `--large-cluster-size-threshold` - 50 por defecto) entonces los desalojos se paran. Sino, el ratio se reduce a `--secondary-node-eviction-rate` (0.01 por defecto) por segundo. La razón por la que estas políticas se implementan por zonas de disponibilidad es debido a que una zona puede quedarse aislada del nodo máster mientras que las demás continúan conectadas. Si un clúster no comprende más de una zona, todo el clúster se considera una única zona.
|
||||
|
||||
La razón principal por la que se distribuyen nodos entre varias zonas de disponibilidad es para que el volumen de trabajo se transfiera a aquellas zonas que se encuentren en buen estado cuando una de las zonas se caiga.
|
||||
Por consiguiente, si todos los nodos de una zona se encuentran en mal estado, el nodo controlador desaloja al ritmo normal `--node-eviction-rate`. En el caso extremo de que todas las zonas se encuentran en mal estado (es decir, no responda ningún nodo del clúster), el controlador de nodos asume que hay algún tipo de problema con la conectividad del nodo máster y paraliza todos los desalojos hasta que se re-establece la conectividad.
|
||||
Por consiguiente, si todos los nodos de una zona se encuentran en mal estado, el nodo controlador desaloja al ritmo normal `--node-eviction-rate`. En el caso extremo de que todas las zonas se encuentran en mal estado (es decir, no responda ningún nodo del clúster), el controlador de nodos asume que hay algún tipo de problema con la conectividad del nodo máster y paraliza todos los desalojos hasta que se restablezca la conectividad.
|
||||
|
||||
Desde la versión 1.6 de Kubernetes el controlador de nodos también es el responsable de desalojar pods que están ejecutándose en nodos con `NoExecute` taints, cuando los pods no permiten dichos taints. De forma adicional, como una funcionalidad alfa que permanece deshabilitada por defecto, el `NodeController` es responsable de añadir taints que se corresponden con problemas en los nodos del tipo nodo inalcanzable o nodo no preparado. En [esta sección de la documentación](/docs/concepts/configuration/taint-and-toleration/) hay más detalles acerca de los taints `NoExecute` y de la funcionalidad alfa.
|
||||
|
||||
|
|
|
@ -49,7 +49,7 @@ una plataforma: para poder construir un ecosistema de componentes y herramientas
|
|||
más fácil el desplegar, escalar y administrar aplicaciones.
|
||||
|
||||
Las etiquetas, o [Labels](/es/docs/concepts/overview/working-with-objects/labels/), le
|
||||
permiten a los usuarios organizar sus recursos como deseen. Las anotaciones , o [Annotations](/es/docs/concepts/overview/working-with-objects/annotations/), les permiten asignar información arbitraria a un recurso para
|
||||
permiten a los usuarios organizar sus recursos como deseen. Las anotaciones, o [Annotations](/es/docs/concepts/overview/working-with-objects/annotations/), les permiten asignar información arbitraria a un recurso para
|
||||
facilitar sus flujos de trabajo y hacer más fácil a las herramientas administrativas inspeccionar el estado.
|
||||
|
||||
Además, el [Plano de Control](/docs/concepts/overview/components/) de Kubernetes usa las mismas
|
||||
|
@ -127,7 +127,7 @@ En resumen, los beneficios de usar contenedores incluyen:
|
|||
|
||||
* **Ágil creación y despliegue de aplicaciones**:
|
||||
Mayor facilidad y eficiencia al crear imágenes de contenedor en vez de máquinas virtuales
|
||||
* **Desarrollo, integración y despliegue continuos**:
|
||||
* **Desarrollo, integración y despliegue continuo**:
|
||||
Permite que la imagen de contenedor se construya y despliegue de forma frecuente y confiable,
|
||||
facilitando los rollbacks pues la imagen es inmutable
|
||||
* **Separación de tareas entre Dev y Ops**:
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,183 @@
|
|||
---
|
||||
title: Recolección de Basura
|
||||
content_template: templates/concept
|
||||
weight: 60
|
||||
---
|
||||
|
||||
{{% capture overview %}}
|
||||
|
||||
El papel del recolector de basura de Kubernetes es el de eliminar determinados objetos
|
||||
que en algún momento tuvieron un propietario, pero que ahora ya no.
|
||||
|
||||
{{% /capture %}}
|
||||
|
||||
|
||||
{{% capture body %}}referencias de propietario
|
||||
|
||||
## Propietarios y subordinados
|
||||
|
||||
Algunos objetos de Kubernetes son propietarios de otros objetos. Por ejemplo, un ReplicaSet
|
||||
es el propietario de un conjunto de Pods. Los objetos que se poseen se denominan *subordinados* del
|
||||
objeto propietario. Cada objeto subordinado tiene un campo `metadata.ownerReferences`
|
||||
que apunta al objeto propietario.
|
||||
|
||||
En ocasiones, Kubernetes pone el valor del campo `ownerReference` automáticamente.
|
||||
Por ejemplo, cuando creas un ReplicaSet, Kubernetes automáticamente pone el valor del campo
|
||||
`ownerReference` de cada Pod en el ReplicaSet. A partir de la versión 1.8, Kubernetes
|
||||
automáticamente pone el valor de `ownerReference` para los objetos creados o adoptados
|
||||
por un ReplicationController, ReplicaSet, StatefulSet, DaemonSet, Deployment, Job
|
||||
y CronJob.
|
||||
|
||||
También puedes configurar las relaciones entre los propietarios y sus subordinados
|
||||
de forma manual indicando el valor del campo `ownerReference`.
|
||||
|
||||
Aquí se muestra un archivo de configuración para un ReplicaSet que tiene tres Pods:
|
||||
|
||||
{{< codenew file="controllers/replicaset.yaml" >}}
|
||||
|
||||
Si se crea el ReplicaSet y entonces se muestra los metadatos del Pod, se puede
|
||||
observar el campo OwnerReferences:
|
||||
|
||||
```shell
|
||||
kubectl apply -f https://k8s.io/examples/controllers/replicaset.yaml
|
||||
kubectl get pods --output=yaml
|
||||
```
|
||||
|
||||
La salida muestra que el propietario del Pod es el ReplicaSet denominado `my-repset`:
|
||||
|
||||
```shell
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
...
|
||||
ownerReferences:
|
||||
- apiVersion: apps/v1
|
||||
controller: true
|
||||
blockOwnerDeletion: true
|
||||
kind: ReplicaSet
|
||||
name: my-repset
|
||||
uid: d9607e19-f88f-11e6-a518-42010a800195
|
||||
...
|
||||
```
|
||||
|
||||
{{< note >}}
|
||||
No se recomienda el uso de OwnerReferences entre Namespaces por diseño. Esto quiere decir que:
|
||||
1) Los subordinados dentro del ámbito de Namespaces sólo pueden definir propietarios en ese mismo Namespace,
|
||||
y propietarios dentro del ámbito de clúster.
|
||||
2) Los subordinados dentro del ámbito del clúster sólo pueden definir propietarios dentro del ámbito del clúster, pero no
|
||||
propietarios dentro del ámbito de Namespaces.
|
||||
{{< /note >}}
|
||||
|
||||
## Controlar cómo el recolector de basura elimina los subordinados
|
||||
|
||||
Cuando eliminas un objeto, puedes indicar si sus subordinados deben eliminarse también
|
||||
de forma automática. Eliminar los subordinados automáticamente se denomina *borrado en cascada*.
|
||||
Hay dos modos de *borrado en cascada*: *en segundo plano* y *en primer plano*.
|
||||
|
||||
Si eliminas un objeto sin borrar sus subordinados de forma automática,
|
||||
dichos subordinados se convierten en *huérfanos*.
|
||||
|
||||
### Borrado en cascada en primer plano
|
||||
|
||||
En el *borrado en cascada en primer plano*, el objeto raíz primero entra en un estado
|
||||
llamado "deletion in progress". En este estado "deletion in progress",
|
||||
se cumplen las siguientes premisas:
|
||||
|
||||
* El objeto todavía es visible a través de la API REST
|
||||
* Se pone el valor del campo `deletionTimestamp` del objeto
|
||||
* El campo `metadata.finalizers` del objeto contiene el valor "foregroundDeletion".
|
||||
|
||||
Una vez que se pone el estado "deletion in progress", el recolector de basura elimina
|
||||
los subordinados del objeto. Una vez que el recolector de basura ha eliminado todos
|
||||
los subordinados "bloqueantes" (los objetos con `ownerReference.blockOwnerDeletion=true`), elimina
|
||||
el objeto propietario.
|
||||
|
||||
Cabe mencionar que usando "foregroundDeletion", sólo los subordinados con valor en
|
||||
`ownerReference.blockOwnerDeletion` bloquean la eliminación del objeto propietario.
|
||||
A partir de la versión 1.7, Kubernetes añadió un [controlador de admisión](/docs/reference/access-authn-authz/admission-controllers/#ownerreferencespermissionenforcement)
|
||||
que controla el acceso de usuario cuando se intenta poner el campo `blockOwnerDeletion` a true
|
||||
con base a los permisos de borrado del objeto propietario, de forma que aquellos subordinados no autorizados
|
||||
no puedan retrasar la eliminación del objeto propietario.
|
||||
|
||||
Si un controlador (como un Deployment o un ReplicaSet) establece el valor del campo `ownerReferences` de un objeto,
|
||||
se pone blockOwnerDeletion automáticamente y no se necesita modificar de forma manual este campo.
|
||||
|
||||
### Borrado en cascada en segundo plano
|
||||
|
||||
En el *borrado en cascada en segundo plano*, Kubernetes elimina el objeto propietario
|
||||
inmediatamente y es el recolector de basura quien se encarga de eliminar los subordinados en segundo plano.
|
||||
|
||||
### Configurar la regla de borrado en cascada
|
||||
|
||||
Para controlar la regla de borrado en cascada, configura el campo `propagationPolicy`
|
||||
del parámetro `deleteOptions` cuando elimines un objeto. Los valores posibles incluyen "Orphan",
|
||||
"Foreground", o "Background".
|
||||
|
||||
Antes de la versión 1.9 de Kubernetes, la regla predeterminada del recolector de basura para la mayoría de controladores era `orphan`.
|
||||
Esto incluía al ReplicationController, ReplicaSet, StatefulSet, DaemonSet, y al Deployment.
|
||||
Para los tipos dentro de las versiones de grupo `extensions/v1beta1`, `apps/v1beta1`, y `apps/v1beta2`, a menos que
|
||||
se indique de otra manera, los objetos subordinados se quedan huérfanos por defecto.
|
||||
En Kubernetes 1.9, para todos los tipos de la versión de grupo `apps/v1`, los objetos subordinados se eliminan por defecto.
|
||||
|
||||
Aquí se muestra un ejemplo que elimina los subordinados en segundo plano:
|
||||
|
||||
```shell
|
||||
kubectl proxy --port=8080
|
||||
curl -X DELETE localhost:8080/apis/apps/v1/namespaces/default/replicasets/my-repset \
|
||||
-d '{"kind":"DeleteOptions","apiVersion":"v1","propagationPolicy":"Background"}' \
|
||||
-H "Content-Type: application/json"
|
||||
```
|
||||
|
||||
Aquí se muestra un ejemplo que elimina los subordinados en primer plano:
|
||||
|
||||
```shell
|
||||
kubectl proxy --port=8080
|
||||
curl -X DELETE localhost:8080/apis/apps/v1/namespaces/default/replicasets/my-repset \
|
||||
-d '{"kind":"DeleteOptions","apiVersion":"v1","propagationPolicy":"Foreground"}' \
|
||||
-H "Content-Type: application/json"
|
||||
```
|
||||
|
||||
Aquí se muestra un ejemplo de subordinados huérfanos:
|
||||
|
||||
```shell
|
||||
kubectl proxy --port=8080
|
||||
curl -X DELETE localhost:8080/apis/apps/v1/namespaces/default/replicasets/my-repset \
|
||||
-d '{"kind":"DeleteOptions","apiVersion":"v1","propagationPolicy":"Orphan"}' \
|
||||
-H "Content-Type: application/json"
|
||||
```
|
||||
|
||||
kubectl también permite el borrado en cascada.
|
||||
Para eliminar los subordinados automáticamente, utiliza el parámetro `--cascade` a true.
|
||||
Usa false para subordinados huérfanos. Por defecto, el valor de `--cascade`
|
||||
es true.
|
||||
|
||||
Aquí se muestra un ejemplo de huérfanos de subordinados de un ReplicaSet:
|
||||
|
||||
```shell
|
||||
kubectl delete replicaset my-repset --cascade=false
|
||||
```
|
||||
|
||||
### Nota adicional sobre los Deployments
|
||||
|
||||
Antes de la versión 1.7, cuando se usaba el borrado en cascada con Deployments se *debía* usar `propagationPolicy: Foreground`
|
||||
para eliminar no sólo los ReplicaSets creados, sino también sus Pods correspondientes. Si este tipo de _propagationPolicy_
|
||||
no se usa, solo se elimina los ReplicaSets, y los Pods se quedan huérfanos.
|
||||
Ver [kubeadm/#149](https://github.com/kubernetes/kubeadm/issues/149#issuecomment-284766613) para más información.
|
||||
|
||||
## Problemas conocidos
|
||||
|
||||
Seguimiento en [#26120](https://github.com/kubernetes/kubernetes/issues/26120)
|
||||
|
||||
{{% /capture %}}
|
||||
|
||||
|
||||
{{% capture whatsnext %}}
|
||||
|
||||
[Documento de Diseño 1](https://git.k8s.io/community/contributors/design-proposals/api-machinery/garbage-collection.md)
|
||||
|
||||
[Documento de Diseño 2](https://git.k8s.io/community/contributors/design-proposals/api-machinery/synchronous-garbage-collection.md)
|
||||
|
||||
{{% /capture %}}
|
||||
|
||||
|
||||
|
|
@ -0,0 +1,457 @@
|
|||
---
|
||||
title: Jobs - Ejecución hasta el final
|
||||
content_template: templates/concept
|
||||
feature:
|
||||
title: Ejecución en lotes
|
||||
description: >
|
||||
Además de los servicios, Kubernetes puede gestionar tus trabajos por lotes y CI, sustituyendo los contenedores que fallen, si así se desea.
|
||||
weight: 70
|
||||
---
|
||||
|
||||
{{% capture overview %}}
|
||||
|
||||
Un Job crea uno o más Pods y se asegura de que un número específico de ellos termina de forma satisfactoria.
|
||||
Conforme los pods terminan satisfactoriamente, el Job realiza el seguimiento de las ejecuciones satisfactorias.
|
||||
Cuando se alcanza un número específico de ejecuciones satisfactorias, la tarea (esto es, el Job) se completa.
|
||||
Al eliminar un Job se eliminan los Pods que haya creado.
|
||||
|
||||
Un caso simple de uso es crear un objeto Job para que se ejecute un Pod de manera fiable hasta el final.
|
||||
El objeto Job arrancará un nuevo Pod si el primer Pod falla o se elimina (por ejemplo
|
||||
como consecuencia de un fallo de hardware o un reinicio en un nodo).
|
||||
|
||||
También se puede usar un Job para ejecutar múltiples Pods en paralelo.
|
||||
|
||||
{{% /capture %}}
|
||||
|
||||
|
||||
{{% capture body %}}
|
||||
|
||||
## Ejecutar un Job de ejemplo
|
||||
|
||||
Aquí se muestra un ejemplo de configuración de Job. Este ejemplo calcula los primeros 2000 decimales de π y los imprime por pantalla.
|
||||
Tarda unos 10s en completarse.
|
||||
|
||||
{{< codenew file="controllers/job.yaml" >}}
|
||||
|
||||
Puedes ejecutar el ejemplo con este comando:
|
||||
|
||||
```shell
|
||||
kubectl apply -f https://k8s.io/examples/controllers/job.yaml
|
||||
```
|
||||
```
|
||||
job "pi" created
|
||||
```
|
||||
|
||||
Comprueba el estado del Job con `kubectl`:
|
||||
|
||||
```shell
|
||||
kubectl describe jobs/pi
|
||||
```
|
||||
```
|
||||
Name: pi
|
||||
Namespace: default
|
||||
Selector: controller-uid=b1db589a-2c8d-11e6-b324-0209dc45a495
|
||||
Labels: controller-uid=b1db589a-2c8d-11e6-b324-0209dc45a495
|
||||
job-name=pi
|
||||
Annotations: <none>
|
||||
Parallelism: 1
|
||||
Completions: 1
|
||||
Start Time: Tue, 07 Jun 2016 10:56:16 +0200
|
||||
Pods Statuses: 0 Running / 1 Succeeded / 0 Failed
|
||||
Pod Template:
|
||||
Labels: controller-uid=b1db589a-2c8d-11e6-b324-0209dc45a495
|
||||
job-name=pi
|
||||
Containers:
|
||||
pi:
|
||||
Image: perl
|
||||
Port:
|
||||
Command:
|
||||
perl
|
||||
-Mbignum=bpi
|
||||
-wle
|
||||
print bpi(2000)
|
||||
Environment: <none>
|
||||
Mounts: <none>
|
||||
Volumes: <none>
|
||||
Events:
|
||||
FirstSeen LastSeen Count From SubobjectPath Type Reason Message
|
||||
--------- -------- ----- ---- ------------- -------- ------ -------
|
||||
1m 1m 1 {job-controller } Normal SuccessfulCreate Created pod: pi-dtn4q
|
||||
```
|
||||
|
||||
Para ver los Pods de un Job que se han completado, usa `kubectl get pods`.
|
||||
|
||||
Para listar todos los Pods que pertenecen a un Job de forma que sea legible, puedes usar un comando como:
|
||||
|
||||
```shell
|
||||
pods=$(kubectl get pods --selector=job-name=pi --output=jsonpath='{.items[*].metadata.name}')
|
||||
echo $pods
|
||||
```
|
||||
```
|
||||
pi-aiw0a
|
||||
```
|
||||
|
||||
En este caso, el selector es el mismo que el selector del Job. La opción `--output=jsonpath` indica un expresión
|
||||
que simplemente obtiene el nombre de cada Pod en la lista devuelta.
|
||||
|
||||
Mira la salida estándar de uno de los Pods:
|
||||
|
||||
```shell
|
||||
$ kubectl logs $pods
|
||||
3.1415926535897932384626433832795028841971693993751058209749445923078164062862089986280348253421170679821480865132823066470938446095505822317253594081284811174502841027019385211055596446229489549303819644288109756659334461284756482337867831652712019091456485669234603486104543266482133936072602491412737245870066063155881748815209209628292540917153643678925903600113305305488204665213841469519415116094330572703657595919530921861173819326117931051185480744623799627495673518857527248912279381830119491298336733624406566430860213949463952247371907021798609437027705392171762931767523846748184676694051320005681271452635608277857713427577896091736371787214684409012249534301465495853710507922796892589235420199561121290219608640344181598136297747713099605187072113499999983729780499510597317328160963185950244594553469083026425223082533446850352619311881710100031378387528865875332083814206171776691473035982534904287554687311595628638823537875937519577818577805321712268066130019278766111959092164201989380952572010654858632788659361533818279682303019520353018529689957736225994138912497217752834791315155748572424541506959508295331168617278558890750983817546374649393192550604009277016711390098488240128583616035637076601047101819429555961989467678374494482553797747268471040475346462080466842590694912933136770289891521047521620569660240580381501935112533824300355876402474964732639141992726042699227967823547816360093417216412199245863150302861829745557067498385054945885869269956909272107975093029553211653449872027559602364806654991198818347977535663698074265425278625518184175746728909777727938000816470600161452491921732172147723501414419735685481613611573525521334757418494684385233239073941433345477624168625189835694855620992192221842725502542568876717904946016534668049886272327917860857843838279679766814541009538837863609506800642251252051173929848960841284886269456042419652850222106611863067442786220391949450471237137869609563643719172874677646575739624138908658326459958133904780275901
|
||||
```
|
||||
|
||||
## Escribir una especificación de Job
|
||||
|
||||
Como con el resto de configuraciones de Kubernetes, un Job necesita los campos `apiVersion`, `kind`, y `metadata`.
|
||||
|
||||
Un Job también necesita la [sección `.spec`](https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status).
|
||||
|
||||
### Plantilla Pod
|
||||
|
||||
El campo `.spec.template` es el único campo obligatorio de `.spec`.
|
||||
|
||||
El campo `.spec.template` es una [plantilla Pod](/docs/concepts/workloads/pods/pod-overview/#pod-templates). Tiene exactamente el mismo esquema que un [pod](/docs/user-guide/pods),
|
||||
excepto por el hecho de que está anidado y no tiene el campo `apiVersion` o `kind`.
|
||||
|
||||
Además de los campos olbigatorios de un Pod, una plantilla Pod de un Job debe indicar las etiquetas apropiadas
|
||||
(ver [selector de pod](#pod-selector)) y una regla de reinicio apropiada.
|
||||
|
||||
Sólo se permite los valores `Never` o `OnFailure` para [`RestartPolicy`](/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy).
|
||||
|
||||
### Selector de Pod
|
||||
|
||||
El campo `.spec.selector` es opcional. En la práctica mayoría de los casos no deberías configurarlo.
|
||||
Mira la sección sobre [configurar tu propio selector de pod](#specifying-your-own-pod-selector).
|
||||
|
||||
|
||||
### Jobs en paralelo
|
||||
|
||||
Hay tres tipos principales de tarea aptos para ejecutarse como un Job:
|
||||
|
||||
1. Jobs no paralelos
|
||||
- normalmente, sólo se arranca un Pod, a menos que el Pod falle.
|
||||
- el Job se completa tan pronto como su Pod termine de forma satisfactoria.
|
||||
1. Jobs en paralelo con un *cupo fijo de terminación*:
|
||||
- se configura un valor positivo distinto de cero para el campo `.spec.completions`.
|
||||
- el Job representa la tarea en general, y se completa cuando hay una ejecución satisfactoria de un Pod por cada valor dentro del rango de 1 a `.spec.completions`.
|
||||
- **no implementado todavía:** A cada Pod se le pasa un índice diferenente dentro del rango de 1 a `.spec.completions`.
|
||||
1. Jobs en paralelo con una *cola de trabajo*:
|
||||
- no se especifica el campo `.spec.completions`, por defecto `.spec.parallelism`.
|
||||
- los Pods deben coordinarse entre ellos mismos o a través de un servicio externo que determine quién debe trabajar en qué.
|
||||
Por ejemplo, un Pod podría ir a buscar un lote de hasta N ítems de una cola de trabajo.
|
||||
- cada Pod es capaz de forma independiente de determinar si sus compañeros han terminado o no, y como consecuencia el Job entero ha terminado.
|
||||
- cuando _cualquier_ Pod del Job termina con éxito, no se crean nuevos Pods.
|
||||
- una vez que al menos uno de los Pods ha terminado con éxito y todos los Pods han terminado, entonces el Job termina con éxito.
|
||||
- una vez que cualquier Pod ha terminado con éxito, ningún otro Pod debería continuar trabajando en la misma tarea o escribiendo ningún resultado. Todos ellos deberían estar en proceso de terminarse.
|
||||
|
||||
En un Job _no paralelo_, no debes indicar el valor de `.spec.completions` ni `.spec.parallelism`. Cuando ambos se dejan
|
||||
sin valor, ambos se predeterminan a 1.
|
||||
|
||||
En un Job con _cupo fijo de terminación_, deberías poner el valor de `.spec.completions` al número de terminaciones que se necesiten.
|
||||
Puedes dar un valor a `.spec.parallelism`, o dejarlo sin valor, en cuyo caso se predetermina a 1.
|
||||
|
||||
En un Job con _cola de trabajo_, no debes indicar el valor de `.spec.completions`, y poner el valor de `.spec.parallelism` a
|
||||
un entero no negativo.
|
||||
|
||||
Para más información acerca de cómo usar los distintos tipos de Job, ver la sección de [patrones de job](#job-patterns).
|
||||
|
||||
|
||||
#### Controlar el paralelismo
|
||||
|
||||
El paralelismo solicitado (`.spec.parallelism`) puede usar cualquier valor no negativo.
|
||||
Si no se indica, se predeterminad a 1.
|
||||
Si se indica como 0, entonces el Job se pausa de forma efectiva hasta que se incremente.
|
||||
|
||||
El paralelismo actual (número de pods ejecutándose en cada momento) puede que sea mayor o menor que el solicitado,
|
||||
por los siguientes motivos:
|
||||
|
||||
- Para los Jobs con _cupo fijo de terminaciones_, el número actual de pods ejecutándose en paralelo no excede el número de terminaciones pendientes.
|
||||
Los valores superiores de `.spec.parallelism` se ignoran.
|
||||
- Para los Jobs con _cola de trabajo_, no se arranca nuevos Pods después de que cualquier Pod se haya completado -- sin embargo, se permite que se completen los Pods pendientes.
|
||||
- Cuando el controlador no ha tenido tiempo para reaccionar.
|
||||
- Cuando el controlador no pudo crear los Pods por el motivo que fuera (falta de `ResourceQuota`, falta de permisos, etc.),
|
||||
entonces puede que haya menos pods que los solicitados.
|
||||
- El controlador puede que regule la creación de nuevos Pods debido al excesivo número de fallos anteriores en el mismo Job.
|
||||
- Cuando un Pod se para de forma controlada, lleva tiempo pararlo.
|
||||
|
||||
## Gestionar Fallos de Pod y Contenedor
|
||||
|
||||
Un contenedor de un Pod puede fallar por cualquier motivo, como porque el proceso que se estaba ejecutando termina con un código de salida distinto de cero,
|
||||
o porque se mató el contenedor por exceder un límite de memoria, etc. Si esto ocurre, y se tiene
|
||||
`.spec.template.spec.restartPolicy = "OnFailure"`, entonces el Pod permance en el nodo,
|
||||
pero el contenedor se vuelve a ejecutar. Por lo tanto, tu aplicación debe poder gestionar el caso en que se reinicia de forma local,
|
||||
o bien especificar `.spec.template.spec.restartPolicy = "Never"`.
|
||||
Ver el [ciclo de vida de un pod](/docs/concepts/workloads/pods/pod-lifecycle/#example-states) para más información sobre `restartPolicy`.
|
||||
|
||||
Un Pod entero puede también fallar por cualquier motivo, como cuando se expulsa al Pod del nodo
|
||||
(porque el nodo se actualiza, reinicia, elimina, etc.), o si un contenedor del Pod falla
|
||||
cuando `.spec.template.spec.restartPolicy = "Never"`. Cuando un Pod falla, entonces el controlador del Job
|
||||
arranca un nuevo Pod. Esto quiere decir que tu aplicación debe ser capaz de gestionar el caso en que se reinicia en un nuevo pod.
|
||||
En particular, debe ser capaz de gestionar los ficheros temporales, los bloqueos, los resultados incompletos, y cualquier otra dependencia
|
||||
de ejecuciones previas.
|
||||
|
||||
Nótese que incluso si se configura `.spec.parallelism = 1` y `.spec.completions = 1` y
|
||||
`.spec.template.spec.restartPolicy = "Never"`, el mismo programa puede arrancarse dos veces.
|
||||
|
||||
Si se especifica `.spec.parallelism` y `.spec.completions` con valores mayores que 1,
|
||||
entonces puede que haya múltiples pods ejecutándose a la vez. Por ello, tus pods deben tolerar la concurrencia.
|
||||
|
||||
### Regla de retroceso de Pod por fallo
|
||||
|
||||
Hay situaciones en que quieres que el Job falle después de intentar ejecutarlo unas cuantas veces debido
|
||||
a un error lógico en la configuración, etc.
|
||||
Para hacerlo, pon el valor de `.spec.backoffLimit` al número de reintentos que quieres
|
||||
antes de considerar el Job como fallido. El límite de retroceso se predetermina a 6.
|
||||
Los Pods fallidos asociados al Job son recreados por el controlador del Job con un
|
||||
retroceso exponencial (10s, 20s, 40s ...) limitado a seis minutos. El contador
|
||||
de retroceso se resetea si no aparecen Pods fallidos antes del siguiente chequeo de estado del Job.
|
||||
|
||||
{{< note >}}
|
||||
El problema [#54870](https://github.com/kubernetes/kubernetes/issues/54870) todavía existe en las versiones de Kubernetes anteriores a la versión 1.12
|
||||
{{< /note >}}
|
||||
|
||||
## Terminación y Limpieza de un Job
|
||||
|
||||
Cuando un Job se completa, ya no se crea ningún Pod, pero tampoco se elimina los Pods. Guardarlos permite
|
||||
ver todavía los logs de los pods acabados para comprobar errores, avisos, o cualquier otro resultado de diagnóstico.
|
||||
El objeto job también se conserva una vez que se ha completado para que se pueda ver su estado. Es decisión del usuario si elimina
|
||||
los viejos jobs después de comprobar su estado. Eliminar el job con el comando `kubectl` (ej. `kubectl delete jobs/pi` o `kubectl delete -f ./job.yaml`).
|
||||
Cuando eliminas un job usando el comando `kubectl`, todos los pods que creó se eliminan también.
|
||||
|
||||
Por defecto, un Job se ejecutará de forma ininterrumpida a menos que uno de los Pods falle, en cuyo caso el Job se fija en el valor de
|
||||
`.spec.backoffLimit` descrito arriba. Otra forma de acabar un Job es poniéndole un vencimiento activo.
|
||||
Haz esto poniendo el valor del campo `.spec.activeDeadlineSeconds` del Job a un número de segundos.
|
||||
|
||||
El campo `activeDeadlineSeconds` se aplica a la duración del job, independientemente de cuántos Pods se hayan creado.
|
||||
Una vez que el Job alcanza `activeDeadlineSeconds`, se terminan todos sus Pods y el estado del Job se pone como `type: Failed` con `reason: DeadlineExceeded`.
|
||||
|
||||
Fíjate que el campo `.spec.activeDeadlineSeconds` de un Job tiene precedencia sobre el campo `.spec.backoffLimit`.
|
||||
Por lo tanto, un Job que está reintentando uno o más Pods fallidos no desplegará nuevos Pods una vez que alcance el límite de tiempo especificado por `activeDeadlineSeconds`,
|
||||
incluso si todavía no se ha alcanzado el `backoffLimit`.
|
||||
|
||||
Ejemplo:
|
||||
|
||||
```yaml
|
||||
apiVersion: batch/v1
|
||||
kind: Job
|
||||
metadata:
|
||||
name: pi-with-timeout
|
||||
spec:
|
||||
backoffLimit: 5
|
||||
activeDeadlineSeconds: 100
|
||||
template:
|
||||
spec:
|
||||
containers:
|
||||
- name: pi
|
||||
image: perl
|
||||
command: ["perl", "-Mbignum=bpi", "-wle", "print bpi(2000)"]
|
||||
restartPolicy: Never
|
||||
```
|
||||
|
||||
Fíjate que tanto la especificación del Job como la [especificación de la plantilla Pod](/docs/concepts/workloads/pods/init-containers/#detailed-behavior)
|
||||
dentro del Job tienen un campo `activeDeadlineSeconds`. Asegúrate que pones el valor de este campo de forma adecuada.
|
||||
|
||||
## Limpiar los Jobs terminados automáticamente
|
||||
|
||||
Normalmente, los Jobs que han terminado ya no se necesitan en el sistema. Conservarlos sólo añade
|
||||
más presión al servidor API. Si dichos Jobs no se gestionan de forma directa por un controlador de más alto nivel,
|
||||
como los [CronJobs](/docs/concepts/workloads/controllers/cron-jobs/), los Jobs pueden
|
||||
limpiarse por medio de CronJobs en base a la regla de limpieza basada en capacidad que se haya especificado.
|
||||
|
||||
### Mecanismo TTL para Jobs terminados
|
||||
|
||||
{{< feature-state for_k8s_version="v1.12" state="alpha" >}}
|
||||
|
||||
Otra forma de limpiar los Jobs terminados (bien `Complete` o `Failed`)
|
||||
de forma automática es usando un mecanismo TTL proporcionado por un
|
||||
[controlador TTL](/docs/concepts/workloads/controllers/ttlafterfinished/) de recursos finalizados,
|
||||
indicando el valor `.spec.ttlSecondsAfterFinished` del Job.
|
||||
|
||||
Cuando el controlador TTL limpia el Job, lo eliminará en cascada,
|
||||
esto es, eliminará sus objetos subordinados, como Pods, junto con el Job. Nótese
|
||||
que cuando se elimina el Job, sus garantías de ciclo de vida, como los finalizadores,
|
||||
se tendrán en cuenta.
|
||||
|
||||
Por ejemplo:
|
||||
|
||||
```yaml
|
||||
apiVersion: batch/v1
|
||||
kind: Job
|
||||
metadata:
|
||||
name: pi-with-ttl
|
||||
spec:
|
||||
ttlSecondsAfterFinished: 100
|
||||
template:
|
||||
spec:
|
||||
containers:
|
||||
- name: pi
|
||||
image: perl
|
||||
command: ["perl", "-Mbignum=bpi", "-wle", "print bpi(2000)"]
|
||||
restartPolicy: Never
|
||||
```
|
||||
|
||||
Aquí el Job `pi-with-ttl` será candidato a ser automáticamente eliminado, `100`
|
||||
segundos después de que termine.
|
||||
|
||||
Si el campo se pone a `0`, el Job será candidato a ser automáticamente eliminado
|
||||
inmediatamente después de haber terminado. Si no se pone valor al campo, este Job no será eliminado
|
||||
por el controlador TTL una vez concluya.
|
||||
|
||||
Nótese que este mecanismo TTL está todavía en alpha, a través de la característica denominada `TTLAfterFinished`.
|
||||
Para más información, ver la documentación del [controlador TTL](/docs/concepts/workloads/controllers/ttlafterfinished/) para
|
||||
recursos terminados.
|
||||
|
||||
## Patrones de Job
|
||||
|
||||
El objeto Job puede usarse para dar soporte a la ejecución fiable de Pods en paralelo. El objeto Job
|
||||
no se diseñó para dar soporte a procesos paralelos estrechamente comunicados, como los que comúnmente
|
||||
se encuentran en la computación científica. Eso sí, permite el proceso paralelo de un conjunto de *ítems de trabajo* independientes, pero relacionados entre sí.
|
||||
Estos pueden ser correos a enviar, marcos a renderizar, archivos a codificar, rangos de claves en una base de datos NoSQL a escanear, y demás.
|
||||
|
||||
En un sistema complejo, puede haber múltiples diferentes conjuntos de ítems de trabajo. Aquí sólo se está
|
||||
considerando un conjunto de ítems de trabajo que el usuario quiere gestionar de forma conjunta — un *proceso por lotes*.
|
||||
|
||||
Hay varios patrones diferentes para computación en paralelo, cada uno con sus fortalezas y sus debilidades.
|
||||
Los sacrificios a tener en cuenta son:
|
||||
|
||||
- Un objeto Job para cada ítem de trabajo vs. un objeto Job simple para todos los ítems de trabajo. El último es mejor
|
||||
para grandes números de ítems de trabajo. El primero añade sobrecarga para el usuario y para el sistema
|
||||
al tener que gestionar grandes números de objetos Job.
|
||||
- El número de pods creados es igual al número de ítems de trabajo vs. cada Pod puede procesar múltiplese ítems de trabajo.
|
||||
El primero típicamente requiere menos modificaciones al código existente y a los contenedores.
|
||||
El último es mejor cuanto mayor sea el número de ítems de trabajo, por las mismas razones que antes..
|
||||
- Varios enfoques usan una cola de trabajo. Ello requiere ejecutar un servicio de colas,
|
||||
y modificaciones a las aplicaciones o contenedores existentes para que hagan uso de la cola de trabajo.
|
||||
Otras estrategias son más fáciles de adaptar a una aplicación ya usando contenedores.
|
||||
|
||||
|
||||
Los sacrificios a tener en cuenta se indican a continuación, donde las columnas 2 a 4 representan los sacrificios de arriba.
|
||||
Los nombres de los patrones son también enlaces a ejemplos e información más detallada.
|
||||
|
||||
| Patrón | Objeto Job simple | ¿Menos pods que ítems de trabajo? | ¿No modificar la aplicación? | ¿Funciona en Kube 1.1? |
|
||||
| -------------------------------------------------------------------- |:-----------------:|:---------------------------:|:-------------------:|:-------------------:|
|
||||
| [Extensión de la Plantilla Job](/docs/tasks/job/parallel-processing-expansion/) | | | ✓ | ✓ |
|
||||
| [Cola con Pod por Ítem de Trabajo](/docs/tasks/job/coarse-parallel-processing-work-queue/) | ✓ | | a veces | ✓ |
|
||||
| [Cola con Cuenta Variable de Pods](/docs/tasks/job/fine-parallel-processing-work-queue/) | ✓ | ✓ | | ✓ |
|
||||
| Job simple con Asignación Estática de Trabajo | ✓ | | ✓ | |
|
||||
|
||||
Cuando se especifican terminaciones con `.spec.completions`, cada Pod creado por el controlado del Job
|
||||
tiene un [`spec`](https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status)idéntico.
|
||||
Esto significa que todos los pods de una tarea tendrán la misma línea de comandos y la
|
||||
misma imagne, los mismo volúmenes, y (casi) las mismas variables de entorno.
|
||||
Estos patrones otorgan diferentes formas de organizar los pods para que trabajen en cosas distintas.
|
||||
|
||||
Esta tabla muestra la configuración necesaria para `.spec.parallelism` y `.spec.completions` para cada uno de los patrones.
|
||||
Aquí, `T` es el número de ítems de trabajo.
|
||||
|
||||
| Patrón | `.spec.completions` | `.spec.parallelism` |
|
||||
| -------------------------------------------------------------------- |:-------------------:|:--------------------:|
|
||||
| [Extensión de la Plantilla Job](/docs/tasks/job/parallel-processing-expansion/) | 1 | debería ser 1 |
|
||||
| [Cola con Pod por Ítem de Trabajo](/docs/tasks/job/coarse-parallel-processing-work-queue/) | T | cualquiera |
|
||||
| [Cola con Cuenta Variable de Pods](/docs/tasks/job/fine-parallel-processing-work-queue/) | 1 | cualquiera |
|
||||
| Job simple con Asignación Estática de Trabajo | T | cualquiera |
|
||||
|
||||
|
||||
## Uso Avanzado
|
||||
|
||||
### Especificar tu propio selector de pod
|
||||
|
||||
Normalmente, cuando creas un objeto Job, no especificas el campo `.spec.selector`.
|
||||
La lógica por defecto del sistema añade este campo cuando se crea el Job.
|
||||
Se elige un valor de selector que no se entremezcle con otras tareas.
|
||||
|
||||
Sin embargo, en algunos casos, puede que necesites sobreescribir este selector que se configura de forma automática.
|
||||
Para ello, puedes indicar el valor de `.spec.selector` en el Job.
|
||||
|
||||
Pero ten mucho cuidado cuando lo hagas. Si configuras un selector de etiquta que no
|
||||
es único para los pods de ese Job, y que selecciona Pods que no tienen que ver,
|
||||
entonces estos últimos pueden ser eliminados, o este Job puede contar los otros
|
||||
Pods para terminarse, o uno o ambos Jobs pueden negarse a crear Pods o ejecutarse hasta el final.
|
||||
Si se elige un selector que no es único, entonces otros controladores (ej. ReplicationController)
|
||||
y sus Pods puede comportarse de forma impredecibles también. Kubernetes no te impide cometer un error
|
||||
especificando el `.spec.selector`.
|
||||
|
||||
Aquí se muestra un ejemplo de un caso en que puede que necesites usar esta característica.
|
||||
|
||||
Digamos que el Job `viejo` todavía está ejeuctándose. Quieres que los Pods existentes
|
||||
sigan corriendo, pero quieres que el resto de los Pods que se creen
|
||||
usen una plantilla pod diferente y que el Job tenga un nombre nuevo.
|
||||
Como no puedes modificar el Job porque esos campos no son modificables, eliminas el Job `old`,
|
||||
pero _dejas sus pods ejecutándose_ mediante el comando `kubectl delete jobs/old --cascade=false`.
|
||||
Antes de eliminarlo, apúntate el selector actual que está usando:
|
||||
|
||||
```
|
||||
kind: Job
|
||||
metadata:
|
||||
name: viejo
|
||||
...
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
job-uid: a8f3d00d-c6d2-11e5-9f87-42010af00002
|
||||
...
|
||||
```
|
||||
|
||||
Entonces, creas un nuevo Job con el nombre `nuevo` y le configuras explícitamente el mismo selector.
|
||||
Puesto que los Pods existentes tienen la etiqueta `job-uid=a8f3d00d-c6d2-11e5-9f87-42010af00002`,
|
||||
son controlados por el Job `nuevo` igualmente.
|
||||
|
||||
Necesitas configurar `manualSelector: true` en el nuevo Job, ya qye no estás usando
|
||||
el selector que normalmente se genera de forma automática por el sistema.
|
||||
|
||||
```
|
||||
kind: Job
|
||||
metadata:
|
||||
name: nuevo
|
||||
...
|
||||
spec:
|
||||
manualSelector: true
|
||||
selector:
|
||||
matchLabels:
|
||||
job-uid: a8f3d00d-c6d2-11e5-9f87-42010af00002
|
||||
...
|
||||
```
|
||||
|
||||
El mismo Job nuevo tendrá un uid distinto a `a8f3d00d-c6d2-11e5-9f87-42010af00002`.
|
||||
Poniendo `manualSelector: true` le dice al sistema que sabes lo que estás haciendo
|
||||
y que te permita hacer este desajuste.
|
||||
|
||||
## Alternativas
|
||||
|
||||
### Pods simples
|
||||
|
||||
Cuando el nodo donde un Pod simple se estaba ejecutando se reinicia o falla, dicho pod se termina
|
||||
y no será reinicado. Sin embargo, un Job creará nuevos Pods para sustituir a los que se han terminando.
|
||||
Por esta razón, se recomienda que se use un Job en vez de un Pod simple, incluso si tu aplicación
|
||||
sólo necesita un único Pod.
|
||||
|
||||
### Replication Controller
|
||||
|
||||
Los Jobs son complementarios a los [Replication Controllers](/docs/user-guide/replication-controller).
|
||||
Un Replication Controller gestiona aquellos Pods que se espera que no terminen (ej. servidores web), y un Job
|
||||
gestiona aquellos Pods que se espera que terminen (ej. tareas por lotes).
|
||||
|
||||
Como se discutió en el [Ciclo de vida de un Pod](/docs/concepts/workloads/pods/pod-lifecycle/), un `Job` *sólo* es apropiado
|
||||
para aquellos pods con `RestartPolicy` igual a `OnFailure` o `Never`.
|
||||
(Nota: Si `RestartPolicy` no se pone, el valor predeterminado es `Always`.)
|
||||
|
||||
### Job simple arranca que arranca un controlador de Pod
|
||||
|
||||
Otro patrón es aquel donde un Job simple crea un Pod que, a su vez, crea otros Pods, actuando como una especie
|
||||
de controlador personalizado para esos Pods. Esto da la máxima flexibilidad, pero puede que
|
||||
cueste un poco más de entender y ofrece menos integración con Kubernetes.
|
||||
|
||||
Un ejemplo de este patrón sería un Job que arranca un Pod que ejecuta una secuencia de comandos que, a su vez,
|
||||
arranca un controlador maestro de Spark (ver el [ejemplo de spark](https://github.com/kubernetes/examples/tree/{{< param "githubbranch" >}}/staging/spark/README.md)),
|
||||
ejecuta un manejador de spark, y a continuación lo limpia todo.
|
||||
|
||||
Una ventaja de este enfoque es que el proceso general obtiene la garantía del objeto Job,
|
||||
además del control completo de los Pods que se crean y cómo se les asigna trabajo.
|
||||
|
||||
## Cron Jobs {#cron-jobs}
|
||||
|
||||
Puedes utilizar un [`CronJob`](/docs/concepts/workloads/controllers/cron-jobs/) para crear un Job que se ejecute en una hora/fecha determinadas, de forma similar
|
||||
a la herramienta `cron` de Unix.
|
||||
|
||||
{{% /capture %}}
|
|
@ -0,0 +1,370 @@
|
|||
---
|
||||
title: ReplicaSet
|
||||
content_template: templates/concept
|
||||
weight: 10
|
||||
---
|
||||
|
||||
{{% capture overview %}}
|
||||
|
||||
El objeto de un ReplicaSet es el de mantener un conjunto estable de réplicas de Pods ejecutándose
|
||||
en todo momento. Así, se usa en numerosas ocasiones para garantizar la disponibilidad de un
|
||||
número específico de Pods idénticos.
|
||||
|
||||
|
||||
{{% /capture %}}
|
||||
|
||||
{{% capture body %}}
|
||||
|
||||
## Cómo funciona un ReplicaSet
|
||||
|
||||
Un ReplicaSet se define con campos, incluyendo un selector que indica cómo identificar a los Pods que puede adquirir,
|
||||
un número de réplicas indicando cuántos Pods debería gestionar, y una plantilla pod especificando los datos de los nuevos Pods
|
||||
que debería crear para conseguir el número de réplicas esperado. Un ReplicaSet alcanza entonces su propósito
|
||||
mediante la creación y eliminación de los Pods que sea necesario para alcanzar el número esperado.
|
||||
Cuando un ReplicaSet necesita crear nuevos Pods, utiliza su plantilla Pod.
|
||||
|
||||
El enlace que un ReplicaSet tiene hacia sus Pods es a través del campo del Pod denominado [metadata.ownerReferences](/docs/concepts/workloads/controllers/garbage-collection/#owners-and-dependents),
|
||||
el cual indica qué recurso es el propietario del objeto actual. Todos los Pods adquiridos por un ReplicaSet tienen su propia
|
||||
información de identificación del ReplicaSet en su campo ownerReferences. Y es a través de este enlace
|
||||
cómo el ReplicaSet conoce el estado de los Pods que está gestionando y actúa en consecuencia.
|
||||
|
||||
Un ReplicaSet identifica los nuevos Pods a adquirir usando su selector. Si hay un Pod que no tiene OwnerReference
|
||||
o donde OwnerReference no es un controlador, pero coincide con el selector del ReplicaSet,
|
||||
este será inmediatamente adquirido por dicho ReplicaSet.
|
||||
|
||||
## Cuándo usar un ReplicaSet
|
||||
|
||||
Un ReplicaSet garantiza que un número específico de réplicas de un pod se está ejeuctando en todo momento.
|
||||
Sin embargo, un Deployment es un concepto de más alto nivel que gestiona ReplicaSets y
|
||||
proporciona actualizaciones de forma declarativa de los Pods junto con muchas otras características útiles.
|
||||
Por lo tanto, se recomienda el uso de Deployments en vez del uso directo de ReplicaSets, a no ser
|
||||
que se necesite una orquestración personalizada de actualización o no se necesite las actualizaciones en absoluto.
|
||||
|
||||
En realidad, esto quiere decir que puede que nunca necesites manipular los objetos ReplicaSet:
|
||||
en vez de ello, usa un Deployment, y define tu aplicación en la sección spec.
|
||||
|
||||
## Ejemplo
|
||||
|
||||
{{< codenew file="controllers/frontend.yaml" >}}
|
||||
|
||||
Si guardas este manifiesto en un archivo llamado `frontend.yaml` y lo lanzas en un clúster de Kubernetes,
|
||||
se creará el ReplicaSet definido y los Pods que maneja.
|
||||
|
||||
```shell
|
||||
kubectl apply -f http://k8s.io/examples/controllers/frontend.yaml
|
||||
```
|
||||
|
||||
Puedes ver los ReplicaSets actuales desplegados:
|
||||
```shell
|
||||
kubectl get rs
|
||||
```
|
||||
|
||||
Y ver el frontend que has creado:
|
||||
```shell
|
||||
NAME DESIRED CURRENT READY AGE
|
||||
frontend 3 3 3 6s
|
||||
```
|
||||
|
||||
También puedes comprobar el estado del replicaset:
|
||||
```shell
|
||||
kubectl describe rs/frontend
|
||||
```
|
||||
|
||||
Y verás una salida parecida a la siguiente:
|
||||
```shell
|
||||
Name: frontend
|
||||
Namespace: default
|
||||
Selector: tier=frontend,tier in (frontend)
|
||||
Labels: app=guestbook
|
||||
tier=frontend
|
||||
Annotations: <none>
|
||||
Replicas: 3 current / 3 desired
|
||||
Pods Status: 3 Running / 0 Waiting / 0 Succeeded / 0 Failed
|
||||
Pod Template:
|
||||
Labels: app=guestbook
|
||||
tier=frontend
|
||||
Containers:
|
||||
php-redis:
|
||||
Image: gcr.io/google_samples/gb-frontend:v3
|
||||
Port: 80/TCP
|
||||
Requests:
|
||||
cpu: 100m
|
||||
memory: 100Mi
|
||||
Environment:
|
||||
GET_HOSTS_FROM: dns
|
||||
Mounts: <none>
|
||||
Volumes: <none>
|
||||
Events:
|
||||
FirstSeen LastSeen Count From SubobjectPath Type Reason Message
|
||||
--------- -------- ----- ---- ------------- -------- ------ -------
|
||||
1m 1m 1 {replicaset-controller } Normal SuccessfulCreate Created pod: frontend-qhloh
|
||||
1m 1m 1 {replicaset-controller } Normal SuccessfulCreate Created pod: frontend-dnjpy
|
||||
1m 1m 1 {replicaset-controller } Normal SuccessfulCreate Created pod: frontend-9si5l
|
||||
```
|
||||
|
||||
Y por último, puedes comprobar los Pods que ha arrancado:
|
||||
```shell
|
||||
kubectl get Pods
|
||||
```
|
||||
|
||||
Deberías ver la información de cada Pod similar a:
|
||||
```shell
|
||||
NAME READY STATUS RESTARTS AGE
|
||||
frontend-9si5l 1/1 Running 0 1m
|
||||
frontend-dnjpy 1/1 Running 0 1m
|
||||
frontend-qhloh 1/1 Running 0 1m
|
||||
```
|
||||
|
||||
También puedes verificar que la referencia de propietario de dichos pods está puesta al ReplicaSet frontend.
|
||||
Para ello, obtén el yaml de uno de los Pods ejecutándose:
|
||||
```shell
|
||||
kubectl get pods frontend-9si5l -o yaml
|
||||
```
|
||||
|
||||
La salida será parecida a esta, donde la información sobre el ReplicaSet aparece en el campo ownerReferences de los metadatos:
|
||||
```shell
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
creationTimestamp: 2019-01-31T17:20:41Z
|
||||
generateName: frontend-
|
||||
labels:
|
||||
tier: frontend
|
||||
name: frontend-9si5l
|
||||
namespace: default
|
||||
ownerReferences:
|
||||
- apiVersion: extensions/v1beta1
|
||||
blockOwnerDeletion: true
|
||||
controller: true
|
||||
kind: ReplicaSet
|
||||
name: frontend
|
||||
uid: 892a2330-257c-11e9-aecd-025000000001
|
||||
...
|
||||
```
|
||||
|
||||
## Adquisiciones de Pods fuera de la plantilla
|
||||
|
||||
Aunque puedes crear Pods simples sin problemas, se recomienda encarecidamente asegurarse de que dichos Pods no tienen
|
||||
etiquetas que puedan coincidir con el selector de alguno de tus ReplicaSets.
|
||||
La razón de esta recomendación es que un ReplicaSet no se limita a poseer los Pods
|
||||
especificados en su plantilla -- sino que puede adquirir otros Pods como se explicó en secciones anteriores.
|
||||
|
||||
Toma el ejemplo anterior del ReplicaSet frontend, y los Pods especificados en el siguiente manifiesto:
|
||||
|
||||
{{< codenew file="pods/pod-rs.yaml" >}}
|
||||
|
||||
Como estos Pods no tienen un Controlador (o cualquier otro objeto) como referencia de propietario
|
||||
y como además su selector coincide con el del ReplicaSet frontend, este último los terminará adquiriendo de forma inmediata.
|
||||
|
||||
Supón que creas los Pods después de que el ReplicaSet frontend haya desplegado los suyos
|
||||
para satisfacer su requisito de cuenta de réplicas:
|
||||
|
||||
```shell
|
||||
kubectl apply -f http://k8s.io/examples/pods/pod-rs.yaml
|
||||
```
|
||||
|
||||
Los nuevos Pods serán adquiridos por el ReplicaSet, e inmediatamente terminados ya que
|
||||
el ReplicaSet estaría por encima del número deseado.
|
||||
|
||||
Obtener los Pods:
|
||||
```shell
|
||||
kubectl get Pods
|
||||
```
|
||||
|
||||
La salida muestra que los nuevos Pods se han terminado, o están en el proceso de terminarse:
|
||||
```shell
|
||||
NAME READY STATUS RESTARTS AGE
|
||||
frontend-9si5l 1/1 Running 0 1m
|
||||
frontend-dnjpy 1/1 Running 0 1m
|
||||
frontend-qhloh 1/1 Running 0 1m
|
||||
pod2 0/1 Terminating 0 4s
|
||||
```
|
||||
|
||||
Si creas primero los Pods:
|
||||
```shell
|
||||
kubectl apply -f http://k8s.io/examples/pods/pod-rs.yaml
|
||||
```
|
||||
|
||||
Y entonces creas el ReplicaSet:
|
||||
```shell
|
||||
kubectl apply -f http://k8s.io/examples/controllers/frontend.yaml
|
||||
```
|
||||
|
||||
Verás que el ReplicaSet ha adquirido dichos Pods y simplemente ha creado tantos nuevos
|
||||
como necesarios para cumplir con su especificación hasta que el número de
|
||||
sus nuevos Pods y los originales coincidan con la cuenta deseado. Al obtener los Pods:
|
||||
```shell
|
||||
kubectl get Pods
|
||||
```
|
||||
|
||||
Veremos su salida:
|
||||
```shell
|
||||
NAME READY STATUS RESTARTS AGE
|
||||
frontend-pxj4r 1/1 Running 0 5s
|
||||
pod1 1/1 Running 0 13s
|
||||
pod2 1/1 Running 0 13s
|
||||
```
|
||||
|
||||
De esta forma, un ReplicaSet puede poseer un conjunto no homogéneo de Pods
|
||||
|
||||
## Escribir un manifiesto de ReplicaSet
|
||||
|
||||
Al igual que con el esto de los objeto de la API de Kubernetes, un ReplicaSet necesita los campos
|
||||
`apiVersion`, `kind`, y `metadata`. Para los ReplicaSets, el tipo es siempre ReplicaSet.
|
||||
En la versión 1.9 de Kubernetes, la versión `apps/v1` de la API en un tipo ReplicaSet es la versión actual y está habilitada por defecto.
|
||||
La versión `apps/v1beta2` de la API se ha desaprobado.
|
||||
Consulta las primeras líneas del ejemplo `frontend.yaml` como guía.
|
||||
|
||||
Un ReplicaSet también necesita una [sección `.spec`](https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status).
|
||||
|
||||
### Plantilla Pod
|
||||
|
||||
El campo `.spec.template` es una [plantilla pod](/docs/concepts/workloads/Pods/pod-overview/#pod-templates) que es
|
||||
también necesita obligatoriamente tener etiquetas definidas. En nuestro ejemplo `frontend.yaml` teníamos una etiqueta: `tier: frontend`.
|
||||
Lleva cuidado de que no se entremezcle con los selectores de otros controladores, no sea que traten de adquirir este Pod.
|
||||
|
||||
Para el campo de [regla de reinicio](/docs/concepts/workloads/Pods/pod-lifecycle/#restart-policy) de la plantilla,
|
||||
`.spec.template.spec.restartPolicy`, el único valor permitido es `Always`, que es el valor predeterminado.
|
||||
|
||||
### Selector de Pod
|
||||
|
||||
El campo `.spec.selector` es un [selector de etiqueta](/docs/concepts/overview/working-with-objects/labels/).
|
||||
Como se explicó [anteriormente](#how-a-replicaset-works), estas son las etiquetas que se usan para
|
||||
identificar los Pods potenciales a adquirir. En nuestro ejemplo `frontend.yaml`, el selector era:
|
||||
```shell
|
||||
matchLabels:
|
||||
tier: frontend
|
||||
```
|
||||
|
||||
El el ReplicaSet, `.spec.template.metadata.labels` debe coincidir con `spec.selector`, o será
|
||||
rechazado por la API.
|
||||
|
||||
{{< note >}}
|
||||
Cuando 2 ReplicaSets especifican el mismo campo `.spec.selector`, pero los campos
|
||||
`.spec.template.metadata.labels` y `.spec.template.spec` diferentes, cada ReplicaSet
|
||||
ignora los Pods creados por el otro ReplicaSet.
|
||||
{{< /note >}}
|
||||
|
||||
### Réplicas
|
||||
|
||||
Puedes configurar cuántos Pods deberían ejecutarse de forma concurrente indicando el campo `.spec.replicas`.
|
||||
El ReplicaSet creará/eliminará sus Pods para alcanzar este número.
|
||||
|
||||
Si no indicas el valor del campo `.spec.replicas`, entonces por defecto se inicializa a 1.
|
||||
|
||||
## Trabajar con ReplicaSets
|
||||
|
||||
### Eliminar un ReplicaSet y sus Pods
|
||||
|
||||
Para eliminar un ReplicaSet y todos sus Pods, utiliza el comando [`kubectl delete`](/docs/reference/generated/kubectl/kubectl-commands#delete).
|
||||
El [Recolector de basura](/docs/concepts/workloads/controllers/garbage-collection/) eliminará automáticamente
|
||||
todos los Pods subordinados por defecto.
|
||||
|
||||
Cuando se usa la API REST o la librería `client-go`, se debe poner el valor de `propagationPolicy` a `Background` o
|
||||
`Foreground` en la opción -d.
|
||||
Por ejemplo:
|
||||
```shell
|
||||
kubectl proxy --port=8080
|
||||
curl -X DELETE 'localhost:8080/apis/extensions/v1beta1/namespaces/default/replicasets/frontend' \
|
||||
> -d '{"kind":"DeleteOptions","apiVersion":"v1","propagationPolicy":"Foreground"}' \
|
||||
> -H "Content-Type: application/json"
|
||||
```
|
||||
|
||||
### Eliminar sólo un ReplicaSet
|
||||
|
||||
Se puede eliminar un ReplicaSet sin afectar a ninguno de sus Pods usando el comando [`kubectl delete`](/docs/reference/generated/kubectl/kubectl-commands#delete) con la opción `--cascade=false`.
|
||||
Cuando se usa la API REST o la librería `client-go`, se debe poner `propagationPolicy` a `Orphan`.
|
||||
Por ejemplo:
|
||||
```shell
|
||||
kubectl proxy --port=8080
|
||||
curl -X DELETE 'localhost:8080/apis/extensions/v1beta1/namespaces/default/replicasets/frontend' \
|
||||
> -d '{"kind":"DeleteOptions","apiVersion":"v1","propagationPolicy":"Orphan"}' \
|
||||
> -H "Content-Type: application/json"
|
||||
```
|
||||
|
||||
Una vez que se ha eliminado el original, se puede crear un nuevo ReplicaSet para sustituirlo.
|
||||
Mientras el viejo y el nuevo `.spec.selector` sean el mismo, el nuevo adoptará a los viejos Pods.
|
||||
Sin embargo, no se esforzará en conseguir que los Pods existentes coincidan con una plantilla pod nueva, diferente.
|
||||
Para actualizar dichos Pods a la nueva especificación de forma controlada,
|
||||
usa una [actualización en línea](#rolling-updates).
|
||||
|
||||
### Aislar Pods de un ReplicaSet
|
||||
|
||||
Es posible aislar Pods de un ReplicaSet cambiando sus etiquetas. Esta técnica puede usarse
|
||||
para eliminar Pods de un servicio para poder depurar, recuperar datos, etc. Los Pods
|
||||
que se eliminar de esta forma serán sustituidos de forma automática (siempre que el
|
||||
número de réplicas no haya cambiado).
|
||||
|
||||
### Escalar un ReplicaSet
|
||||
|
||||
Se puede aumentar o reducir fácilmente un ReplicaSet simplemente actualizando el campo `.spec.replicas`.
|
||||
El controlador del ReplicaSet se asegura de que el número deseado de Pods con un selector
|
||||
de etiquetas coincidente está disponible y operacional.
|
||||
|
||||
### ReplicaSet como blanco de un Horizontal Pod Autoscaler
|
||||
|
||||
Un ReplicaSet puede también ser el blanco de un
|
||||
[Horizontal Pod Autoscalers (HPA)](/docs/tasks/run-application/horizontal-pod-autoscale/). Esto es,
|
||||
un ReplicaSet puede auto-escalarse mediante un HPA. Aquí se muestra un ejemplo de HPA dirigido
|
||||
al ReplicaSet que creamos en el ejemplo anterior.
|
||||
|
||||
{{< codenew file="controllers/hpa-rs.yaml" >}}
|
||||
|
||||
Si guardas este manifiesto en un archivo `hpa-rs.yaml` y lo lanzas contra el clúster de Kubernetes,
|
||||
debería crear el HPA definido que auto-escala el ReplicaSet destino dependiendo del uso
|
||||
de CPU de los Pods replicados.
|
||||
|
||||
```shell
|
||||
kubectl apply -f https://k8s.io/examples/controllers/hpa-rs.yaml
|
||||
```
|
||||
|
||||
Alternativamente, puedes usar el comando `kubectl autoscale` para conseguir el mismo objetivo
|
||||
(¡y mucho más fácil!)
|
||||
|
||||
```shell
|
||||
kubectl autoscale rs frontend --max=10
|
||||
```
|
||||
|
||||
## Alternativas al ReplicaSet
|
||||
|
||||
### Deployment (recomendado)
|
||||
|
||||
Un[`Deployment`](/docs/concepts/workloads/controllers/deployment/) es un objeto que puede poseer ReplicaSets
|
||||
y actualizar a estos y a sus Pods mediante actualizaciones en línea declarativas en el servidor.
|
||||
Aunque que los ReplicaSets puede usarse independientemente, hoy en día se usan principalmente a través de los Deployments
|
||||
como el mecanismo para orquestrar la creación, eliminación y actualización de los Pods.
|
||||
Cuando usas Deployments no tienes que preocuparte de gestionar los ReplicaSets que crean.
|
||||
Los Deployments poseen y gestionan sus ReplicaSets.
|
||||
Por tanto, se recomienda que se use Deployments cuando se quiera ReplicaSets.
|
||||
|
||||
### Pods simples
|
||||
|
||||
A diferencia del caso en que un usuario creaba Pods de forma directa, un ReplicaSet sustituye los Pods que se eliminan
|
||||
o se terminan por la razón que sea, como en el caso de un fallo de un nodo o
|
||||
una intervención disruptiva de mantenimiento, como una actualización de kernel.
|
||||
Por esta razón, se recomienda que se use un ReplicaSet incluso cuando la aplicación
|
||||
sólo necesita un único Pod. Entiéndelo de forma similar a un proceso supervisor,
|
||||
donde se supervisa múltiples Pods entre múltiples nodos en vez de procesos individuales
|
||||
en un único nodo. Un ReplicaSet delega los reinicios del contenedor local a algún agente
|
||||
del nodo (por ejemplo, Kubelet o Docker).
|
||||
|
||||
### Job
|
||||
|
||||
Usa un [`Job`](/docs/concepts/jobs/run-to-completion-finite-workloads/) en vez de un ReplicaSet para
|
||||
aquellos Pods que se esperan que terminen por ellos mismos (esto es, trabajos por lotes).
|
||||
|
||||
### DaemonSet
|
||||
|
||||
Usa un [`DaemonSet`](/docs/concepts/workloads/controllers/daemonset/) en vez de un ReplicaSet para aquellos
|
||||
Pods que proporcionan funcionalidad a nivel de servidor, como monitorización de servidor o
|
||||
logging de servidor. Estos Pods tienen un ciclo de vida asociado al del servidor mismo:
|
||||
el Pod necesita ejecutarse en el servidor antes de que los otros Pods comiencen, y es seguro
|
||||
que terminen cuando el servidor esté listo para ser reiniciado/apagado.
|
||||
|
||||
### ReplicationController
|
||||
Los ReplicaSets son los sucesores de los [_ReplicationControllers_](/docs/concepts/workloads/controllers/replicationcontroller/).
|
||||
Los dos sirven al mismo propósito, y se comportan de forma similar, excepto porque un ReplicationController
|
||||
no soporta los requisitos del selector basado en conjunto, como se describe en la [guía de usuario de etiquetas](/docs/concepts/overview/working-with-objects/labels/#label-selectors).
|
||||
Por ello, se prefiere los ReplicaSets a los ReplicationControllers.
|
||||
|
||||
{{% /capture %}}
|
|
@ -0,0 +1,21 @@
|
|||
apiVersion: apps/v1
|
||||
kind: ReplicaSet
|
||||
metadata:
|
||||
name: frontend
|
||||
labels:
|
||||
app: guestbook
|
||||
tier: frontend
|
||||
spec:
|
||||
# modifica las réplicas según tu caso de uso
|
||||
replicas: 3
|
||||
selector:
|
||||
matchLabels:
|
||||
tier: frontend
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
tier: frontend
|
||||
spec:
|
||||
containers:
|
||||
- name: php-redis
|
||||
image: gcr.io/google_samples/gb-frontend:v3
|
|
@ -0,0 +1,11 @@
|
|||
apiVersion: autoscaling/v1
|
||||
kind: HorizontalPodAutoscaler
|
||||
metadata:
|
||||
name: frontend-scaler
|
||||
spec:
|
||||
scaleTargetRef:
|
||||
kind: ReplicaSet
|
||||
name: frontend
|
||||
minReplicas: 3
|
||||
maxReplicas: 10
|
||||
targetCPUUtilizationPercentage: 50
|
|
@ -0,0 +1,14 @@
|
|||
apiVersion: batch/v1
|
||||
kind: Job
|
||||
metadata:
|
||||
name: pi
|
||||
spec:
|
||||
template:
|
||||
spec:
|
||||
containers:
|
||||
- name: pi
|
||||
image: perl
|
||||
command: ["perl", "-Mbignum=bpi", "-wle", "print bpi(2000)"]
|
||||
restartPolicy: Never
|
||||
backoffLimit: 4
|
||||
|
|
@ -0,0 +1,21 @@
|
|||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: nginx-deployment
|
||||
labels:
|
||||
app: nginx
|
||||
spec:
|
||||
replicas: 3
|
||||
selector:
|
||||
matchLabels:
|
||||
app: nginx
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: nginx
|
||||
spec:
|
||||
containers:
|
||||
- name: nginx
|
||||
image: nginx:1.7.9
|
||||
ports:
|
||||
- containerPort: 80
|
|
@ -0,0 +1,17 @@
|
|||
apiVersion: apps/v1
|
||||
kind: ReplicaSet
|
||||
metadata:
|
||||
name: my-repset
|
||||
spec:
|
||||
replicas: 3
|
||||
selector:
|
||||
matchLabels:
|
||||
pod-is-for: garbage-collection-example
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
pod-is-for: garbage-collection-example
|
||||
spec:
|
||||
containers:
|
||||
- name: nginx
|
||||
image: nginx
|
|
@ -0,0 +1,23 @@
|
|||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: pod1
|
||||
labels:
|
||||
tier: frontend
|
||||
spec:
|
||||
containers:
|
||||
- name: hello1
|
||||
image: gcr.io/google-samples/hello-app:2.0
|
||||
|
||||
---
|
||||
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: pod2
|
||||
labels:
|
||||
tier: frontend
|
||||
spec:
|
||||
containers:
|
||||
- name: hello2
|
||||
image: gcr.io/google-samples/hello-app:1.0
|
|
@ -3,9 +3,6 @@ title: "Solution professionnelle d’orchestration de conteneurs"
|
|||
abstract: "Déploiement, mise à l'échelle et gestion automatisée des conteneurs"
|
||||
cid: home
|
||||
---
|
||||
{{< announcement >}}
|
||||
|
||||
{{< deprecationwarning >}}
|
||||
|
||||
{{< blocks/section id="oceanNodes" >}}
|
||||
{{% blocks/feature image="flower" %}}
|
||||
|
|
|
@ -0,0 +1,111 @@
|
|||
---
|
||||
title: Namespaces
|
||||
content_type: concept
|
||||
weight: 30
|
||||
---
|
||||
|
||||
<!-- overview -->
|
||||
|
||||
Kubernetes prend en charge plusieurs clusters virtuels presents sur le même cluster physique.
|
||||
Ces clusters virtuels sont appelés namespaces (espaces de noms en français).
|
||||
|
||||
<!-- body -->
|
||||
|
||||
## Quand utiliser plusieurs namespaces
|
||||
|
||||
Les namespaces sont destinés à être utilisés dans les environnements ayant de nombreux utilisateurs répartis en plusieurs équipes ou projets. Pour les clusters de quelques dizaines d'utilisateurs, vous n'avez pas
|
||||
besoin d'utiliser de namespaces. Commencez à utiliser des namespaces lorsque vous avez
|
||||
besoin des fonctionnalités qu'ils fournissent.
|
||||
|
||||
Les namespaces sont des groupes de noms. Ils fournissent un modèle d'isolation de nommage des ressources. Les noms des ressources doivent être uniques dans un namespace,
|
||||
mais pas dans l'ensemble des namespaces. Les namespaces ne peuvent pas être imbriqués les uns dans les autres et chaque ressource Kubernetes ne peut se trouver que dans un seul namespace.
|
||||
|
||||
Les namespaces sont un moyen de répartir les ressources d'un cluster entre plusieurs utilisateurs (via [quota de ressources](/docs/concepts/policy/resource-quotas/)).
|
||||
|
||||
Dans les futures versions de Kubernetes, les objets du même namespace auront les mêmes
|
||||
stratégies de contrôle d'accès par défaut.
|
||||
|
||||
Il n'est pas nécessaire d'utiliser plusieurs namespaces juste pour séparer des ressources légèrement différentes, telles que les versions du même logiciel: utiliser les [labels](/docs/user-guide/labels) pour distinguer les
|
||||
ressources dans le même namespace.
|
||||
|
||||
## Utilisation des namespaces
|
||||
|
||||
La création et la suppression des namespaces sont décrites dans la [Documentation du guide d'administration pour les namespaces](/docs/admin/namespaces).
|
||||
|
||||
{{< note >}}
|
||||
Évitez de créer des namespaces avec le préfixe `kube-`, car il est réservé aux namespaces système de Kubernetes.
|
||||
{{< /note >}}
|
||||
|
||||
### Affichage des namespaces
|
||||
|
||||
Dans un cluster vous pouvez lister les namespaces actuels à l'aide de:
|
||||
|
||||
```shell
|
||||
kubectl get namespace
|
||||
```
|
||||
|
||||
```
|
||||
NAME STATUS AGE
|
||||
default Active 1d
|
||||
kube-node-lease Active 1d
|
||||
kube-public Active 1d
|
||||
kube-system Active 1d
|
||||
```
|
||||
|
||||
Kubernetes démarre avec quatre namespaces initiaux:
|
||||
|
||||
- `default` Le namespace par défaut pour les objets sans autre namespace
|
||||
- `kube-system` Le namespace pour les objets créés par Kubernetes lui-même
|
||||
- `kube-public` Ce namespace est créé automatiquement et est visible par tous les utilisateurs (y compris ceux qui ne sont pas authentifiés). Ce namespace est principalement réservé à l'utilisation du cluster, au cas où certaines ressources devraient être disponibles publiquement dans l'ensemble du cluster. L'aspect public de ce namespace n'est qu'une convention, pas une exigence.
|
||||
- `kube-node-lease` Ce namespace contient les objets de bail associés à chaque nœud, ce qui améliore les performances des pulsations du nœud à mesure que le cluster évolue.
|
||||
|
||||
### Définition du namespaces pour une requête
|
||||
|
||||
Pour définir le namespace pour une requête en cours, utilisez l'indicateur `--namespace`.
|
||||
|
||||
Par exemple:
|
||||
|
||||
```shell
|
||||
kubectl run nginx --image=nginx --namespace=<insert-namespace-name-here>
|
||||
kubectl get pods --namespace=<insert-namespace-name-here>
|
||||
```
|
||||
|
||||
### Spécifier un namespace
|
||||
|
||||
Vous pouvez enregistrer de manière permanente le namespace à utiliser pour toutes les commandes kubectl à suivre.
|
||||
|
||||
```shell
|
||||
kubectl config set-context --current --namespace=<insert-namespace-name-here>
|
||||
# Validez-le
|
||||
kubectl config view --minify | grep namespace:
|
||||
```
|
||||
|
||||
## Namespaces et DNS
|
||||
|
||||
Lorsque vous créez un [Service](/fr/docs/concepts/services-networking/service/), il crée une [entrée DNS](/fr/docs/concepts/services-networking/dns-pod-service/) correspondante.
|
||||
Cette entrée est de la forme `<nom-service>.<nom-namespace>.svc.cluster.local`, ce qui signifie
|
||||
que si un conteneur utilise simplement `<nom-service>`, il résoudra le service qui
|
||||
est local à un namespace. Ceci est utile pour utiliser la même configuration pour
|
||||
plusieurs namespaces tels que le Développement, la Qualification et la Production. Si vous voulez naviguer
|
||||
entre plusieurs namespaces, vous devez utiliser le nom de domaine complet (FQDN ou nom de domaine complet en français).
|
||||
|
||||
## Tous les objets ne se trouvent pas dans un namespace
|
||||
|
||||
La plupart des ressources Kubernetes (par exemple, pods, services, contrôleurs de réplication et autres) sont
|
||||
dans des namespaces. Cependant, les ressources de type namespace ne sont pas elles-mêmes dans un namespace.
|
||||
Et les ressources de bas niveau, telles que les [noeuds](/docs/admin/node) et les volumes persistants, ne se trouvent dans aucun namespace.
|
||||
|
||||
Pour voir quelles ressources Kubernetes sont et ne sont pas dans un namespace:
|
||||
|
||||
```shell
|
||||
# Dans un namespace
|
||||
kubectl api-resources --namespaced=true
|
||||
|
||||
# Pas dans un namespace
|
||||
kubectl api-resources --namespaced=false
|
||||
```
|
||||
|
||||
## {{% heading "whatsnext" %}}
|
||||
|
||||
- En savoir plus sur [créer un nouveau namespace](/docs/tasks/administer-cluster/namespaces/#creating-a-new-namespace).
|
||||
- En savoir plus sur [suppression d'un namespace](/docs/tasks/administer-cluster/namespaces/#deleting-a-namespace).
|
|
@ -0,0 +1,16 @@
|
|||
---
|
||||
title: Add-ons
|
||||
id: addons
|
||||
date: 2019-12-15
|
||||
full_link: /docs/concepts/cluster-administration/addons/
|
||||
short_description: >
|
||||
Ressources qui étendent les fonctionnalités de Kubernetes.
|
||||
|
||||
aka:
|
||||
tags:
|
||||
- tool
|
||||
---
|
||||
Ressources qui étendent les fonctionnalités de Kubernetes.
|
||||
|
||||
<!--more-->
|
||||
[Installer des addons](/docs/concepts/cluster-administration/addons/) explique l'utilisation des modules complémentaires avec votre cluster et répertorie certains modules complémentaires populaires.
|
|
@ -0,0 +1,17 @@
|
|||
---
|
||||
title: UID
|
||||
id: uid
|
||||
date: 2018-04-12
|
||||
full_link: /docs/concepts/overview/working-with-objects/names
|
||||
short_description: >
|
||||
Chaîne de caractères générée par les systèmes Kubernetes pour identifier de manière unique les objets.
|
||||
|
||||
aka:
|
||||
tags:
|
||||
- fundamental
|
||||
---
|
||||
Chaîne de caractères générée par les systèmes Kubernetes pour identifier de manière unique les objets.
|
||||
|
||||
<!--more-->
|
||||
|
||||
Chaque objet créé pendant toute la durée de vie d'un cluster Kubernetes possède un UID distinct. Il vise à distinguer les occurrences historiques d'entités similaires.
|
|
@ -0,0 +1,20 @@
|
|||
---
|
||||
title: Volume
|
||||
id: volume
|
||||
date: 2018-04-12
|
||||
full_link: /fr/docs/concepts/storage/volumes/
|
||||
short_description: >
|
||||
Un répertoire contenant des données, accessible aux conteneurs d'un pod.
|
||||
|
||||
aka:
|
||||
tags:
|
||||
- core-object
|
||||
- fundamental
|
||||
---
|
||||
Un répertoire contenant des données, accessible aux {{< glossary_tooltip text="conteneurs" term_id="container" >}} d'un {{< glossary_tooltip term_id="pod" >}}.
|
||||
|
||||
<!--more-->
|
||||
|
||||
Un volume Kubernetes vit aussi longtemps que le pod qui le contient. Par conséquent, un volume survit à tous les conteneurs qui s'exécutent dans le pod, et les données contenues dans le volume sont préservées lors des redémarrages du conteneur.
|
||||
|
||||
Voir [stockage](/fr/docs/concepts/storage/) pour plus d'informations.
|
|
@ -0,0 +1,22 @@
|
|||
---
|
||||
title: Workload
|
||||
id: workloads
|
||||
date: 2019-02-13
|
||||
full_link: /fr/docs/concepts/workloads/
|
||||
short_description: >
|
||||
Une charge de travail (workload) est une application exécutée sur Kubernetes.
|
||||
|
||||
aka:
|
||||
tags:
|
||||
- fundamental
|
||||
---
|
||||
Une charge de travail (workload) est une application exécutée sur Kubernetes.
|
||||
|
||||
<!--more-->
|
||||
|
||||
Divers objets de base qui représentent différents types ou parties d'une charge de travail
|
||||
incluent les objets DaemonSet, Deployment, Job, ReplicaSet et StatefulSet.
|
||||
|
||||
Par exemple, une charge de travail constituée d'un serveur Web et d'une base de données peut exécuter la
|
||||
base de données dans un {{< glossary_tooltip term_id="StatefulSet" >}} et le serveur web
|
||||
dans un {{< glossary_tooltip term_id="Deployment" >}}.
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue