Merge remote-tracking branch 'upstream/master' into fix-community
commit
5918d2249f
10
_config.yml
10
_config.yml
|
@ -27,27 +27,27 @@ defaults:
|
|||
version: "v1.9"
|
||||
githubbranch: "v1.9.0"
|
||||
docsbranch: "release-1.9"
|
||||
url: https://kubernetes.io/docs/home/
|
||||
url: https://kubernetes.io
|
||||
- fullversion: "v1.8.4"
|
||||
version: "v1.8"
|
||||
githubbranch: "v1.8.4"
|
||||
docsbranch: "release-1.8"
|
||||
url: https://v1-8.docs.kubernetes.io/docs/home/
|
||||
url: https://v1-8.docs.kubernetes.io
|
||||
- fullversion: "v1.7.6"
|
||||
version: "v1.7"
|
||||
githubbranch: "v1.7.6"
|
||||
docsbranch: "release-1.7"
|
||||
url: https://v1-7.docs.kubernetes.io/docs/home/
|
||||
url: https://v1-7.docs.kubernetes.io
|
||||
- fullversion: "v1.6.8"
|
||||
version: "v1.6"
|
||||
githubbranch: "v1.6.8"
|
||||
docsbranch: "release-1.6"
|
||||
url: https://v1-6.docs.kubernetes.io/docs/home/
|
||||
url: https://v1-6.docs.kubernetes.io
|
||||
- fullversion: "v1.5.7"
|
||||
version: "v1.5"
|
||||
githubbranch: "v1.5.7"
|
||||
docsbranch: "release-1.5"
|
||||
url: https://v1-5.docs.kubernetes.io/docs/
|
||||
url: https://v1-5.docs.kubernetes.io
|
||||
deprecated: false
|
||||
currentUrl: https://kubernetes.io/docs/home/
|
||||
nextUrl: http://kubernetes-io-vnext-staging.netlify.com/
|
||||
|
|
|
@ -1,9 +1,10 @@
|
|||
id: labels
|
||||
name: Labels
|
||||
id: label
|
||||
name: Label
|
||||
full-link: /docs/concepts/overview/working-with-objects/labels
|
||||
tags:
|
||||
- fundamental
|
||||
short-description: >
|
||||
Used to tag objects with identifying attributes that are meaningful and relevant to users.
|
||||
Tags objects with identifying attributes that are meaningful and relevant to users.
|
||||
long-description: >
|
||||
Labels are key/value pairs that are attached to objects, such as pods. They can be used to organize and to select subsets of objects.
|
||||
Labels are key/value pairs that are attached to objects such as {% glossary_tooltip text="Pods" term_id="pod" %}.
|
||||
They are used to organize and to select subsets of objects.
|
||||
|
|
|
@ -0,0 +1,17 @@
|
|||
id: persistent-volume
|
||||
name: Persistent Volume
|
||||
full-link: /docs/concepts/storage/persistent-volumes/
|
||||
related:
|
||||
- statefulset
|
||||
- deployment
|
||||
- persistent-volume-claim
|
||||
- pod
|
||||
tags:
|
||||
- core-object
|
||||
- storage
|
||||
short-description: >
|
||||
An API object that represents a piece of storage in the cluster. Available as a general, pluggable resource that persists beyond the lifecycle of any individual {% glossary_tooltip term_id="pod" %}.
|
||||
long-description: |
|
||||
PersistentVolumes (PVs) provide an API that abstracts details of how storage is provided from how it is consumed.
|
||||
PVs are used directly in scenarios where storage can be be created ahead of time (static provisioning).
|
||||
For scenarios that require on-demand storage (dynamic provisioning), PersistentVolumeClaims (PVCs) are used instead.
|
|
@ -28,7 +28,7 @@
|
|||
© {{ 'now' | date: "%Y" }} The Kubernetes Authors | Documentation Distributed under <a href="https://git.k8s.io/website/LICENSE" class="light-text">CC BY 4.0</a>
|
||||
</div>
|
||||
<div id="miceType" class="center">
|
||||
Copyright © {{ 'now' | date: "%Y" }} The Linux Foundation®. All rights reserved. The Linux Foundation has registered trademarks and uses trademarks. For a list of trademarks of The Linux Foundation, please see our Trademark Usage page: <a href="https://www.linuxfoundation.org/trademark-usage" class="light-text">https://www.linuxfoundation.org/trademark-usage</a>
|
||||
Copyright © {{ 'now' | date: "%Y" }} The Linux Foundation®. All rights reserved. The Linux Foundation has registered trademarks and uses trademarks. For a list of trademarks of The Linux Foundation, please see our <a href="https://www.linuxfoundation.org/trademark-usage" class="light-text">Trademark Usage page</a>
|
||||
</div>
|
||||
</main>
|
||||
</footer>
|
||||
|
|
|
@ -16,7 +16,11 @@
|
|||
</a>
|
||||
<ul>
|
||||
{% for version in page.versions %}
|
||||
<li><a href="{{ version.url }}">{{ version.version }}</a></li>
|
||||
{% if page.versionedurl contains version.version %}
|
||||
<li><a href="{{ version.url }}{{ page.versionedurl[version.version] }}">{{ version.version }}</a></li>
|
||||
{% else %}
|
||||
<li><a href="{{ version.url }}{{ page.url }}">{{ version.version }}</a></li>
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
</ul>
|
||||
</li>
|
||||
|
|
|
@ -6415,7 +6415,7 @@ The resulting set of endpoints can be viewed as:<br>
|
|||
<tbody>
|
||||
<tr>
|
||||
<td class="tableblock halign-left valign-top"><p class="tableblock">names</p></td>
|
||||
<td class="tableblock halign-left valign-top"><p class="tableblock">Names by which this image is known. e.g. ["gcr.io/google_containers/hyperkube:v1.0.7", "dockerhub.io/google_containers/hyperkube:v1.0.7"]</p></td>
|
||||
<td class="tableblock halign-left valign-top"><p class="tableblock">Names by which this image is known. e.g. ["k8s.gcr.io/hyperkube:v1.0.7", "dockerhub.io/google_containers/hyperkube:v1.0.7"]</p></td>
|
||||
<td class="tableblock halign-left valign-top"><p class="tableblock">true</p></td>
|
||||
<td class="tableblock halign-left valign-top"><p class="tableblock">string array</p></td>
|
||||
<td class="tableblock halign-left valign-top"></td>
|
||||
|
|
|
@ -6671,7 +6671,7 @@ The resulting set of endpoints can be viewed as:<br>
|
|||
<tbody>
|
||||
<tr>
|
||||
<td class="tableblock halign-left valign-top"><p class="tableblock">names</p></td>
|
||||
<td class="tableblock halign-left valign-top"><p class="tableblock">Names by which this image is known. e.g. ["gcr.io/google_containers/hyperkube:v1.0.7", "dockerhub.io/google_containers/hyperkube:v1.0.7"]</p></td>
|
||||
<td class="tableblock halign-left valign-top"><p class="tableblock">Names by which this image is known. e.g. ["k8s.gcr.io/hyperkube:v1.0.7", "dockerhub.io/google_containers/hyperkube:v1.0.7"]</p></td>
|
||||
<td class="tableblock halign-left valign-top"><p class="tableblock">true</p></td>
|
||||
<td class="tableblock halign-left valign-top"><p class="tableblock">string array</p></td>
|
||||
<td class="tableblock halign-left valign-top"></td>
|
||||
|
|
|
@ -6850,7 +6850,7 @@ The resulting set of endpoints can be viewed as:<br>
|
|||
<tbody>
|
||||
<tr>
|
||||
<td class="tableblock halign-left valign-top"><p class="tableblock">names</p></td>
|
||||
<td class="tableblock halign-left valign-top"><p class="tableblock">Names by which this image is known. e.g. ["gcr.io/google_containers/hyperkube:v1.0.7", "dockerhub.io/google_containers/hyperkube:v1.0.7"]</p></td>
|
||||
<td class="tableblock halign-left valign-top"><p class="tableblock">Names by which this image is known. e.g. ["k8s.gcr.io/hyperkube:v1.0.7", "dockerhub.io/google_containers/hyperkube:v1.0.7"]</p></td>
|
||||
<td class="tableblock halign-left valign-top"><p class="tableblock">true</p></td>
|
||||
<td class="tableblock halign-left valign-top"><p class="tableblock">string array</p></td>
|
||||
<td class="tableblock halign-left valign-top"></td>
|
||||
|
|
|
@ -443,3 +443,4 @@ https://kubernetes-io-v1-7.netlify.com/* https://v1-7.docs.kubernetes.io/:spl
|
|||
/docs/admin/kubefed_unjoin/ /docs/reference/generated/kubefed_unjoin/ 301
|
||||
/docs/admin/kubefed_version/ /docs/reference/generated/kubefed_version/ 301
|
||||
|
||||
/docs/reference/generated/kubeadm/ /docs/reference/setup-tools/kubeadm/kubeadm/ 301
|
||||
|
|
|
@ -85,7 +85,7 @@ AWS使用的规格为:
|
|||
```yaml
|
||||
containers:
|
||||
- name: fluentd-cloud-logging
|
||||
image: gcr.io/google_containers/fluentd-gcp:1.16
|
||||
image: k8s.gcr.io/fluentd-gcp:1.16
|
||||
resources:
|
||||
limits:
|
||||
cpu: 100m
|
||||
|
|
|
@ -40,7 +40,7 @@ title: 节点设置校验
|
|||
# $LOG_DIR 是测试结果输出的路径。
|
||||
sudo docker run -it --rm --privileged --net=host \
|
||||
-v /:/rootfs -v $CONFIG_DIR:$CONFIG_DIR -v $LOG_DIR:/var/result \
|
||||
gcr.io/google_containers/node-test:0.2
|
||||
k8s.gcr.io/node-test:0.2
|
||||
```
|
||||
|
||||
## 针对其他硬件体系结构运行节点合规性测试
|
||||
|
@ -61,7 +61,7 @@ Kubernetes 也为其他硬件体系结构的系统提供了节点合规性测试
|
|||
sudo docker run -it --rm --privileged --net=host \
|
||||
-v /:/rootfs:ro -v $CONFIG_DIR:$CONFIG_DIR -v $LOG_DIR:/var/result \
|
||||
-e FOCUS=MirrorPod \ # 只运行MirrorPod测试
|
||||
gcr.io/google_containers/node-test:0.2
|
||||
k8s.gcr.io/node-test:0.2
|
||||
```
|
||||
|
||||
为跳过指定的测试,用正则表达式来描述将要跳过的测试,并重载 `SKIP` 环境变量。
|
||||
|
@ -70,7 +70,7 @@ sudo docker run -it --rm --privileged --net=host \
|
|||
sudo docker run -it --rm --privileged --net=host \
|
||||
-v /:/rootfs:ro -v $CONFIG_DIR:$CONFIG_DIR -v $LOG_DIR:/var/result \
|
||||
-e SKIP=MirrorPod \ # 运行除MirrorPod外的所有测试
|
||||
gcr.io/google_containers/node-test:0.2
|
||||
k8s.gcr.io/node-test:0.2
|
||||
```
|
||||
|
||||
节点合规性测试是[节点端到端测试](https://github.com/kubernetes/community/blob/{{page.githubbranch}}/contributors/devel/e2e-node-tests.md)的一个容器化的版本。
|
||||
|
|
|
@ -172,7 +172,7 @@ v1/ServiceAccount:
|
|||
针对CCM的RBAC ClusterRole如下所示:
|
||||
|
||||
```yaml
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: cloud-controller-manager
|
||||
|
|
|
@ -216,7 +216,7 @@ metadata:
|
|||
spec:
|
||||
containers:
|
||||
- name: sleep-forever
|
||||
image: gcr.io/google_containers/pause:0.8.0
|
||||
image: k8s.gcr.io/pause:0.8.0
|
||||
resources:
|
||||
requests:
|
||||
cpu: 100m
|
||||
|
|
|
@ -199,7 +199,7 @@ Conditions:
|
|||
Events:
|
||||
FirstSeen LastSeen Count From SubobjectPath Reason Message
|
||||
Tue, 07 Jul 2015 12:53:51 -0700 Tue, 07 Jul 2015 12:53:51 -0700 1 {scheduler } scheduled Successfully assigned simmemleak-hra99 to kubernetes-node-tf0f
|
||||
Tue, 07 Jul 2015 12:53:51 -0700 Tue, 07 Jul 2015 12:53:51 -0700 1 {kubelet kubernetes-node-tf0f} implicitly required container POD pulled Pod container image "gcr.io/google_containers/pause:0.8.0" already present on machine
|
||||
Tue, 07 Jul 2015 12:53:51 -0700 Tue, 07 Jul 2015 12:53:51 -0700 1 {kubelet kubernetes-node-tf0f} implicitly required container POD pulled Pod container image "k8s.gcr.io/pause:0.8.0" already present on machine
|
||||
Tue, 07 Jul 2015 12:53:51 -0700 Tue, 07 Jul 2015 12:53:51 -0700 1 {kubelet kubernetes-node-tf0f} implicitly required container POD created Created with docker id 6a41280f516d
|
||||
Tue, 07 Jul 2015 12:53:51 -0700 Tue, 07 Jul 2015 12:53:51 -0700 1 {kubelet kubernetes-node-tf0f} implicitly required container POD started Started with docker id 6a41280f516d
|
||||
Tue, 07 Jul 2015 12:53:51 -0700 Tue, 07 Jul 2015 12:53:51 -0700 1 {kubelet kubernetes-node-tf0f} spec.containers{simmemleak} created Created with docker id 87348f12526a
|
||||
|
|
|
@ -23,4 +23,4 @@ spec:
|
|||
- another-node-label-value
|
||||
containers:
|
||||
- name: with-node-affinity
|
||||
image: gcr.io/google_containers/pause:2.0
|
||||
image: k8s.gcr.io/pause:2.0
|
|
@ -26,4 +26,4 @@ spec:
|
|||
topologyKey: kubernetes.io/hostname
|
||||
containers:
|
||||
- name: with-pod-affinity
|
||||
image: gcr.io/google_containers/pause:2.0
|
||||
image: k8s.gcr.io/pause:2.0
|
||||
|
|
|
@ -518,7 +518,7 @@ spec:
|
|||
secretName: dotfile-secret
|
||||
containers:
|
||||
- name: dotfile-test-container
|
||||
image: gcr.io/google_containers/busybox
|
||||
image: k8s.gcr.io/busybox
|
||||
command:
|
||||
- ls
|
||||
- "-l"
|
||||
|
|
|
@ -108,7 +108,7 @@ spec:
|
|||
containers:
|
||||
- args:
|
||||
- /server
|
||||
image: gcr.io/google_containers/liveness
|
||||
image: k8s.gcr.io/liveness
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
# when "host" is not defined, "PodIP" will be used
|
||||
|
|
|
@ -9,7 +9,7 @@ metadata:
|
|||
spec:
|
||||
containers:
|
||||
- name: master
|
||||
image: gcr.io/google_containers/redis:v1
|
||||
image: k8s.gcr.io/redis:v1
|
||||
env:
|
||||
- name: MASTER
|
||||
value: "true"
|
||||
|
|
|
@ -178,7 +178,7 @@ $ kubectl get pods valid-pod --namespace=limit-example -o yaml | grep -C 6 resou
|
|||
uid: 3b1bfd7a-f53c-11e5-b066-64510658e388
|
||||
spec:
|
||||
containers:
|
||||
- image: gcr.io/google_containers/serve_hostname
|
||||
- image: k8s.gcr.io/serve_hostname
|
||||
imagePullPolicy: Always
|
||||
name: kubernetes-serve-hostname
|
||||
resources:
|
||||
|
|
|
@ -13,7 +13,7 @@ spec:
|
|||
spec:
|
||||
containers:
|
||||
- name: autoscaler
|
||||
image: gcr.io/google_containers/cluster-proportional-autoscaler-amd64:1.0.0
|
||||
image: k8s.gcr.io/cluster-proportional-autoscaler-amd64:1.0.0
|
||||
resources:
|
||||
requests:
|
||||
cpu: "20m"
|
||||
|
|
|
@ -7,4 +7,4 @@ metadata:
|
|||
spec:
|
||||
containers:
|
||||
- name: pod-with-no-annotation-container
|
||||
image: gcr.io/google_containers/pause:2.0
|
||||
image: k8s.gcr.io/pause:2.0
|
|
@ -8,4 +8,4 @@ spec:
|
|||
schedulerName: default-scheduler
|
||||
containers:
|
||||
- name: pod-with-default-annotation-container
|
||||
image: gcr.io/google_containers/pause:2.0
|
||||
image: k8s.gcr.io/pause:2.0
|
||||
|
|
|
@ -8,4 +8,4 @@ spec:
|
|||
schedulerName: my-scheduler
|
||||
containers:
|
||||
- name: pod-with-second-annotation-container
|
||||
image: gcr.io/google_containers/pause:2.0
|
||||
image: k8s.gcr.io/pause:2.0
|
||||
|
|
|
@ -15,7 +15,7 @@ spec:
|
|||
- -c
|
||||
- touch /tmp/healthy; sleep 30; rm -rf /tmp/healthy; sleep 600
|
||||
|
||||
image: gcr.io/google_containers/busybox
|
||||
image: k8s.gcr.io/busybox
|
||||
|
||||
livenessProbe:
|
||||
exec:
|
||||
|
|
|
@ -9,7 +9,7 @@ spec:
|
|||
- name: liveness
|
||||
args:
|
||||
- /server
|
||||
image: gcr.io/google_containers/liveness
|
||||
image: k8s.gcr.io/liveness
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /healthz
|
||||
|
|
|
@ -7,7 +7,7 @@ metadata:
|
|||
spec:
|
||||
containers:
|
||||
- name: goproxy
|
||||
image: gcr.io/google_containers/goproxy:0.1
|
||||
image: k8s.gcr.io/goproxy:0.1
|
||||
ports:
|
||||
- containerPort: 8080
|
||||
readinessProbe:
|
||||
|
|
|
@ -5,7 +5,7 @@ metadata:
|
|||
spec:
|
||||
containers:
|
||||
- name: test-container
|
||||
image: gcr.io/google_containers/busybox:1.24
|
||||
image: k8s.gcr.io/busybox:1.24
|
||||
command: [ "sh", "-c"]
|
||||
args:
|
||||
- while true; do
|
||||
|
|
|
@ -5,7 +5,7 @@ metadata:
|
|||
spec:
|
||||
containers:
|
||||
- name: test-container
|
||||
image: gcr.io/google_containers/busybox
|
||||
image: k8s.gcr.io/busybox
|
||||
command: [ "sh", "-c"]
|
||||
args:
|
||||
- while true; do
|
||||
|
|
|
@ -5,7 +5,7 @@ metadata:
|
|||
spec:
|
||||
containers:
|
||||
- name: client-container
|
||||
image: gcr.io/google_containers/busybox:1.24
|
||||
image: k8s.gcr.io/busybox:1.24
|
||||
command: ["sh", "-c"]
|
||||
args:
|
||||
- while true; do
|
||||
|
|
|
@ -12,7 +12,7 @@ metadata:
|
|||
spec:
|
||||
containers:
|
||||
- name: client-container
|
||||
image: gcr.io/google_containers/busybox
|
||||
image: k8s.gcr.io/busybox
|
||||
command: ["sh", "-c"]
|
||||
args:
|
||||
- while true; do
|
||||
|
|
|
@ -41,13 +41,13 @@ spec:
|
|||
containers:
|
||||
-
|
||||
name: gpu-container-1
|
||||
image: gcr.io/google_containers/pause:2.0
|
||||
image: k8s.gcr.io/pause:2.0
|
||||
resources:
|
||||
limits:
|
||||
alpha.kubernetes.io/nvidia-gpu: 2 # requesting 2 GPUs
|
||||
-
|
||||
name: gpu-container-2
|
||||
image: gcr.io/google_containers/pause:2.0
|
||||
image: k8s.gcr.io/pause:2.0
|
||||
resources:
|
||||
limits:
|
||||
alpha.kubernetes.io/nvidia-gpu: 3 # requesting 3 GPUs
|
||||
|
@ -141,7 +141,7 @@ metadata:
|
|||
spec:
|
||||
containers:
|
||||
- name: gpu-container-1
|
||||
image: gcr.io/google_containers/pause:2.0
|
||||
image: k8s.gcr.io/pause:2.0
|
||||
resources:
|
||||
limits:
|
||||
alpha.kubernetes.io/nvidia-gpu: 1
|
||||
|
|
|
@ -33,7 +33,7 @@ Kubernetes 集群中运行的应用通过抽象的 Service 查找彼此,相互
|
|||
你必须拥有一个正常工作的 Kubernetes 1.5 集群,用来运行本文中的示例。该示例使用一个简单的 nginx webserver 回送它接收到的请求的 HTTP 头中的源 IP 地址。你可以像下面这样创建它:
|
||||
|
||||
```console
|
||||
$ kubectl run source-ip-app --image=gcr.io/google_containers/echoserver:1.4
|
||||
$ kubectl run source-ip-app --image=k8s.gcr.io/echoserver:1.4
|
||||
deployment "source-ip-app" created
|
||||
```
|
||||
|
||||
|
|
|
@ -434,7 +434,7 @@ Kubernetes 1.7 版本的 StatefulSet 控制器支持自动更新。更新策略
|
|||
Patch `web` StatefulSet 的容器镜像。
|
||||
|
||||
```shell
|
||||
kubectl patch statefulset web --type='json' -p='[{"op": "replace", "path": "/spec/template/spec/containers/0/image", "value":"gcr.io/google_containers/nginx-slim:0.7"}]'
|
||||
kubectl patch statefulset web --type='json' -p='[{"op": "replace", "path": "/spec/template/spec/containers/0/image", "value":"k8s.gcr.io/nginx-slim:0.7"}]'
|
||||
"web" patched
|
||||
```
|
||||
|
||||
|
@ -470,9 +470,9 @@ web-0 1/1 Running 0 3s
|
|||
|
||||
```shell{% raw %}
|
||||
kubectl get pod -l app=nginx -o jsonpath='{range .items[*]}{.metadata.name}{"\t"}{.spec.containers[0].image}{"\n"}{end}'
|
||||
web-0 gcr.io/google_containers/nginx-slim:0.7
|
||||
web-1 gcr.io/google_containers/nginx-slim:0.8
|
||||
web-2 gcr.io/google_containers/nginx-slim:0.8
|
||||
web-0 k8s.gcr.io/nginx-slim:0.7
|
||||
web-1 k8s.gcr.io/nginx-slim:0.8
|
||||
web-2 k8s.gcr.io/nginx-slim:0.8
|
||||
{% endraw %}```
|
||||
|
||||
`web-0` has had its image updated, but `web-0` and `web-1` still have the original
|
||||
|
@ -513,9 +513,9 @@ web-2 1/1 Running 0 36s
|
|||
|
||||
```shell{% raw %}
|
||||
kubectl get pod -l app=nginx -o jsonpath='{range .items[*]}{.metadata.name}{"\t"}{.spec.containers[0].image}{"\n"}{end}'
|
||||
web-0 gcr.io/google_containers/nginx-slim:0.7
|
||||
web-1 gcr.io/google_containers/nginx-slim:0.7
|
||||
web-2 gcr.io/google_containers/nginx-slim:0.7
|
||||
web-0 k8s.gcr.io/nginx-slim:0.7
|
||||
web-1 k8s.gcr.io/nginx-slim:0.7
|
||||
web-2 k8s.gcr.io/nginx-slim:0.7
|
||||
{% endraw %}
|
||||
```
|
||||
|
||||
|
@ -539,7 +539,7 @@ statefulset "web" patched
|
|||
在一个终端窗口中 patch `web` StatefulSet 来再次的改变容器镜像。
|
||||
|
||||
```shell
|
||||
kubectl patch statefulset web --type='json' -p='[{"op": "replace", "path": "/spec/template/spec/containers/0/image", "value":"gcr.io/google_containers/nginx-slim:0.8"}]'
|
||||
kubectl patch statefulset web --type='json' -p='[{"op": "replace", "path": "/spec/template/spec/containers/0/image", "value":"k8s.gcr.io/nginx-slim:0.8"}]'
|
||||
statefulset "web" patched
|
||||
```
|
||||
|
||||
|
@ -589,9 +589,9 @@ StatefulSet 里的 Pod 采用和序号相反的顺序更新。在更新下一个
|
|||
|
||||
```shell{% raw %}
|
||||
for p in 0 1 2; do kubectl get po web-$p --template '{{range $i, $c := .spec.containers}}{{$c.image}}{{end}}'; echo; done
|
||||
gcr.io/google_containers/nginx-slim:0.8
|
||||
gcr.io/google_containers/nginx-slim:0.8
|
||||
gcr.io/google_containers/nginx-slim:0.8
|
||||
k8s.gcr.io/nginx-slim:0.8
|
||||
k8s.gcr.io/nginx-slim:0.8
|
||||
k8s.gcr.io/nginx-slim:0.8
|
||||
{% endraw %}
|
||||
```
|
||||
|
||||
|
@ -617,7 +617,7 @@ statefulset "web" patched
|
|||
再次 Patch StatefulSet 来改变容器镜像。
|
||||
|
||||
```shell
|
||||
kubectl patch statefulset web --type='json' -p='[{"op": "replace", "path": "/spec/template/spec/containers/0/image", "value":"gcr.io/google_containers/nginx-slim:0.7"}]'
|
||||
kubectl patch statefulset web --type='json' -p='[{"op": "replace", "path": "/spec/template/spec/containers/0/image", "value":"k8s.gcr.io/nginx-slim:0.7"}]'
|
||||
statefulset "web" patched
|
||||
```
|
||||
|
||||
|
@ -646,7 +646,7 @@ web-2 1/1 Running 0 18s
|
|||
|
||||
```shell{% raw %}
|
||||
get po web-2 --template '{{range $i, $c := .spec.containers}}{{$c.image}}{{end}}'
|
||||
gcr.io/google_containers/nginx-slim:0.8
|
||||
k8s.gcr.io/nginx-slim:0.8
|
||||
{% endraw %}
|
||||
```
|
||||
|
||||
|
@ -683,7 +683,7 @@ web-2 1/1 Running 0 18s
|
|||
|
||||
```shell{% raw %}
|
||||
kubectl get po web-2 --template '{{range $i, $c := .spec.containers}}{{$c.image}}{{end}}'
|
||||
gcr.io/google_containers/nginx-slim:0.7
|
||||
k8s.gcr.io/nginx-slim:0.7
|
||||
{% endraw %}
|
||||
```
|
||||
|
||||
|
@ -721,7 +721,7 @@ web-1 1/1 Running 0 18s
|
|||
|
||||
```shell{% raw %}
|
||||
get po web-1 --template '{{range $i, $c := .spec.containers}}{{$c.image}}{{end}}'
|
||||
gcr.io/google_containers/nginx-slim:0.8
|
||||
k8s.gcr.io/nginx-slim:0.8
|
||||
{% endraw %}
|
||||
```
|
||||
|
||||
|
@ -767,9 +767,9 @@ web-0 1/1 Running 0 3s
|
|||
|
||||
```shell{% raw %}
|
||||
for p in 0 1 2; do kubectl get po web-$p --template '{{range $i, $c := .spec.containers}}{{$c.image}}{{end}}'; echo; done
|
||||
gcr.io/google_containers/nginx-slim:0.7
|
||||
gcr.io/google_containers/nginx-slim:0.7
|
||||
gcr.io/google_containers/nginx-slim:0.7
|
||||
k8s.gcr.io/nginx-slim:0.7
|
||||
k8s.gcr.io/nginx-slim:0.7
|
||||
k8s.gcr.io/nginx-slim:0.7
|
||||
{% endraw %}
|
||||
```
|
||||
|
||||
|
|
|
@ -26,7 +26,7 @@ spec:
|
|||
spec:
|
||||
containers:
|
||||
- name: nginx
|
||||
image: gcr.io/google_containers/nginx-slim:0.8
|
||||
image: k8s.gcr.io/nginx-slim:0.8
|
||||
ports:
|
||||
- containerPort: 80
|
||||
name: web
|
||||
|
|
|
@ -27,7 +27,7 @@ spec:
|
|||
spec:
|
||||
containers:
|
||||
- name: nginx
|
||||
image: gcr.io/google_containers/nginx-slim:0.8
|
||||
image: k8s.gcr.io/nginx-slim:0.8
|
||||
ports:
|
||||
- containerPort: 80
|
||||
name: web
|
||||
|
|
|
@ -0,0 +1,3 @@
|
|||
# Kubernetes Community Code of Conduct
|
||||
|
||||
Please refer to our [Kubernetes Community Code of Conduct](https://git.k8s.io/community/code-of-conduct.md)
|
|
@ -196,10 +196,10 @@ spec:
|
|||
metadata:
|
||||
# ...
|
||||
spec:
|
||||
serviceAccountName: bob-the-bot
|
||||
containers:
|
||||
- name: nginx
|
||||
image: nginx:1.7.9
|
||||
serviceAccountName: bob-the-bot
|
||||
```
|
||||
|
||||
Service account bearer tokens are perfectly valid to use outside the cluster and
|
||||
|
|
|
@ -36,74 +36,86 @@ information. It is used when referring to a token without leaking the secret
|
|||
part used for authentication. The second part is the "Token Secret" and should
|
||||
only be shared with trusted parties.
|
||||
|
||||
## Enabling Bootstrap Tokens
|
||||
## Enabling Bootstrap Token Authentication
|
||||
|
||||
All features for Bootstrap Tokens are disabled by default in Kubernetes v1.8.
|
||||
The Bootstrap Token authenticator can be enabled using the following flag on the
|
||||
API server:
|
||||
|
||||
You can enable the Bootstrap Token authenticator with the
|
||||
`--enable-bootstrap-token-auth` flag on the API server. You can enable
|
||||
the Bootstrap controllers by specifying them with the `--controllers` flag on the
|
||||
controller manager with something like
|
||||
`--controllers=*,tokencleaner,bootstrapsigner`. This is done automatically when
|
||||
using `kubeadm`.
|
||||
```
|
||||
--enable-bootstrap-token-auth
|
||||
```
|
||||
|
||||
Tokens are used in an HTTPS call as follows:
|
||||
When enabled, bootstrapping tokens can be used as bearer token credentials to
|
||||
authenticate requests against the API server.
|
||||
|
||||
```http
|
||||
Authorization: Bearer 07401b.f395accd246ae52d
|
||||
```
|
||||
|
||||
Tokens authenticate as the username `system:bootstrap:<token id>` and are members
|
||||
of the group `system:bootstrappers`. Additional groups may be specified in the
|
||||
token's Secret.
|
||||
|
||||
Expired tokens can be deleted automatically by enabling the `tokencleaner`
|
||||
controller on the controller manager.
|
||||
|
||||
```
|
||||
--controllers=*,tokencleaner
|
||||
```
|
||||
|
||||
## Bootstrap Token Secret Format
|
||||
|
||||
Each valid token is backed by a secret in the `kube-system` namespace. You can
|
||||
find the full design doc
|
||||
[here](https://github.com/kubernetes/community/blob/{{page.githubbranch}}/contributors/design-proposals/cluster-lifecycle/bootstrap-discovery.md).
|
||||
|
||||
Here is what the secret looks like. Note that `base64(string)` indicates the
|
||||
value should be base64 encoded. The undecoded version is provided here for
|
||||
readability.
|
||||
Here is what the secret looks like.
|
||||
|
||||
```yaml
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
# Name MUST be of form "bootstrap-token-<token id>"
|
||||
name: bootstrap-token-07401b
|
||||
namespace: kube-system
|
||||
|
||||
# Type MUST be 'bootstrap.kubernetes.io/token'
|
||||
type: bootstrap.kubernetes.io/token
|
||||
data:
|
||||
description: base64(The default bootstrap token generated by 'kubeadm init'.)
|
||||
token-id: base64(07401b)
|
||||
token-secret: base64(f395accd246ae52d)
|
||||
expiration: base64(2017-03-10T03:22:11Z)
|
||||
usage-bootstrap-authentication: base64(true)
|
||||
usage-bootstrap-signing: base64(true)
|
||||
auth-extra-groups: base64(system:bootstrappers:group1,system:bootstrappers:group2)
|
||||
stringData:
|
||||
# Human readable description. Optional.
|
||||
description: "The default bootstrap token generated by 'kubeadm init'."
|
||||
|
||||
# Token ID and secret. Required.
|
||||
token-id: 07401b
|
||||
token-secret: f395accd246ae52d
|
||||
|
||||
# Expiration. Optional.
|
||||
expiration: 2017-03-10T03:22:11Z
|
||||
|
||||
# Allowed usages.
|
||||
usage-bootstrap-authentication: true
|
||||
usage-bootstrap-signing: true
|
||||
|
||||
# Extra groups to authenticate the token as. Must start with "system:bootstrappers:"
|
||||
auth-extra-groups: system:bootstrappers:worker,system:bootstrappers:ingress
|
||||
```
|
||||
|
||||
The type of the secret must be `bootstrap.kubernetes.io/token` and the name must
|
||||
be `bootstrap-token-<token id>`. It must also exist in the `kube-system`
|
||||
namespace. `description` is a human readable description that should not be
|
||||
used for machine readable information. The Token ID and Secret are included in
|
||||
the data dictionary.
|
||||
namespace.
|
||||
|
||||
The `usage-bootstrap-*` members indicate what this secret is intended to be used
|
||||
for. A value must be set to `true` to be enabled.
|
||||
|
||||
`usage-bootstrap-authentication` indicates that the token can be used to
|
||||
authenticate to the API server. The authenticator authenticates as
|
||||
`system:bootstrap:<Token ID>`. It is included in the `system:bootstrappers`
|
||||
group. `auth-extra-groups` indicates that it will also be included in the
|
||||
`system:bootstrappers:group1`, and `system:bootstrappers:group2` groups. The
|
||||
naming and groups are intentionally limited to discourage users from using these
|
||||
tokens past bootstrapping. Extra bootstrap token groups must start with
|
||||
`system:bootstrappers:`.
|
||||
|
||||
`usage-bootstrap-signing` indicates that the token should be used to sign the
|
||||
* `usage-bootstrap-authentication` indicates that the token can be used to
|
||||
authenticate to the API server as a bearer token.
|
||||
* `usage-bootstrap-signing` indicates that the token may be used to sign the
|
||||
`cluster-info` ConfigMap as described below.
|
||||
|
||||
The `expiration` data member lists a time after which the token is no longer
|
||||
valid. This is encoded as an absolute UTC time using RFC3339. The TokenCleaner
|
||||
controller will delete expired tokens.
|
||||
The `expiration` field controls the expiry of the token. Expired tokens are
|
||||
rejected when used for authentication and ignored during ConfigMap signing.
|
||||
The expiry value is encoded as an absolute UTC time using RFC3339. Enable the
|
||||
`tokencleaner` controller to automatically delete expired tokens.
|
||||
|
||||
## Token Management with `kubeadm`
|
||||
|
||||
|
@ -116,6 +128,13 @@ In addition to authentication, the tokens can be used to sign a ConfigMap. This
|
|||
is used early in a cluster bootstrap process before the client trusts the API
|
||||
server. The signed ConfigMap can be authenticated by the shared token.
|
||||
|
||||
Enable ConfigMap signing by enabling the `bootstrapsigner` controller on the
|
||||
Controller Manager.
|
||||
|
||||
```
|
||||
--controllers=*,bootstrapsigner
|
||||
```
|
||||
|
||||
The ConfigMap that is signed is `cluster-info` in the `kube-public` namespace.
|
||||
The typical flow is that a client reads this ConfigMap while unauthenticated and
|
||||
ignoring TLS errors. It then validates the payload of the ConfigMap by looking
|
||||
|
@ -156,3 +175,11 @@ is then used to form a whole JWS by inserting it between the 2 dots. You can
|
|||
verify the JWS using the `HS256` scheme (HMAC-SHA256) with the full token (e.g.
|
||||
`07401b.f395accd246ae52d`) as the shared secret. Users _must_ verify that HS256
|
||||
is used.
|
||||
|
||||
WARNING: Any party with a bootstrapping token can create a valid signature for that
|
||||
token. When using ConfigMap signing it's discouraged to share the same token with
|
||||
many clients, since a compromised client can potentially man-in-the middle another
|
||||
client relying on the signature to bootstrap TLS trust.
|
||||
|
||||
Consult the [kubeadm security model](/docs/reference/generated/kubeadm/#security-model)
|
||||
section for more information.
|
||||
|
|
|
@ -86,7 +86,7 @@ For example:
|
|||
```yaml
|
||||
containers:
|
||||
- name: fluentd-cloud-logging
|
||||
image: gcr.io/google_containers/fluentd-gcp:1.16
|
||||
image: k8s.gcr.io/fluentd-gcp:1.16
|
||||
resources:
|
||||
limits:
|
||||
cpu: 100m
|
||||
|
|
|
@ -87,9 +87,9 @@ images or you can build them yourself from HEAD.
|
|||
### Using official release images
|
||||
|
||||
As part of every Kubernetes release, official release images are pushed to
|
||||
`gcr.io/google_containers`. To use the images in this repository, you can
|
||||
`k8s.gcr.io`. To use the images in this repository, you can
|
||||
set the container image fields in the following configs to point to the
|
||||
images in this repository. `gcr.io/google_containers/hyperkube` image
|
||||
images in this repository. `k8s.gcr.io/hyperkube` image
|
||||
includes the federation-apiserver and federation-controller-manager
|
||||
binaries, so you can point the corresponding configs for those components
|
||||
to the hyperkube image.
|
||||
|
@ -315,8 +315,8 @@ official release images or you can build from HEAD.
|
|||
|
||||
#### Using official release images
|
||||
|
||||
As part of every release, images are pushed to `gcr.io/google_containers`. To use
|
||||
these images, set env var `FEDERATION_PUSH_REPO_BASE=gcr.io/google_containers`
|
||||
As part of every release, images are pushed to `k8s.gcr.io`. To use
|
||||
these images, set env var `FEDERATION_PUSH_REPO_BASE=k8s.gcr.io`
|
||||
This will always use the latest image.
|
||||
To use the hyperkube image which includes federation-apiserver and
|
||||
federation-controller-manager from a specific release, set the
|
||||
|
@ -345,7 +345,7 @@ Once you have the images, you can run these as pods on your existing kubernetes
|
|||
The command to run these pods on an existing GCE cluster will look like:
|
||||
|
||||
```shell
|
||||
$ KUBERNETES_PROVIDER=gce FEDERATION_DNS_PROVIDER=google-clouddns FEDERATION_NAME=myfederation DNS_ZONE_NAME=myfederation.example FEDERATION_PUSH_REPO_BASE=gcr.io/google_containers ./federation/cluster/federation-up.sh
|
||||
$ KUBERNETES_PROVIDER=gce FEDERATION_DNS_PROVIDER=google-clouddns FEDERATION_NAME=myfederation DNS_ZONE_NAME=myfederation.example FEDERATION_PUSH_REPO_BASE=k8s.gcr.io ./federation/cluster/federation-up.sh
|
||||
```
|
||||
|
||||
`KUBERNETES_PROVIDER` is the cloud provider.
|
||||
|
|
|
@ -5,7 +5,7 @@ metadata:
|
|||
spec:
|
||||
hostNetwork: true
|
||||
containers:
|
||||
- image: gcr.io/google_containers/etcd:3.0.17
|
||||
- image: k8s.gcr.io/etcd:3.0.17
|
||||
name: etcd-container
|
||||
command:
|
||||
- /usr/local/bin/etcd
|
||||
|
|
|
@ -6,7 +6,7 @@ spec:
|
|||
hostNetwork: true
|
||||
containers:
|
||||
- name: kube-apiserver
|
||||
image: gcr.io/google_containers/kube-apiserver:9680e782e08a1a1c94c656190011bd02
|
||||
image: k8s.gcr.io/kube-apiserver:9680e782e08a1a1c94c656190011bd02
|
||||
command:
|
||||
- /bin/sh
|
||||
- -c
|
||||
|
|
|
@ -10,7 +10,7 @@ spec:
|
|||
- /usr/local/bin/kube-controller-manager --master=127.0.0.1:8080 --cluster-name=e2e-test-bburns
|
||||
--cluster-cidr=10.245.0.0/16 --allocate-node-cidrs=true --cloud-provider=gce --service-account-private-key-file=/srv/kubernetes/server.key
|
||||
--v=2 --leader-elect=true 1>>/var/log/kube-controller-manager.log 2>&1
|
||||
image: gcr.io/google_containers/kube-controller-manager:fda24638d51a48baa13c35337fcd4793
|
||||
image: k8s.gcr.io/kube-controller-manager:fda24638d51a48baa13c35337fcd4793
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /healthz
|
||||
|
|
|
@ -6,7 +6,7 @@ spec:
|
|||
hostNetwork: true
|
||||
containers:
|
||||
- name: kube-scheduler
|
||||
image: gcr.io/google_containers/kube-scheduler:34d0b8f8b31e27937327961528739bc9
|
||||
image: k8s.gcr.io/kube-scheduler:34d0b8f8b31e27937327961528739bc9
|
||||
command:
|
||||
- /bin/sh
|
||||
- -c
|
||||
|
|
|
@ -6,7 +6,7 @@ spec:
|
|||
hostNetwork: true
|
||||
containers:
|
||||
- name: scheduler-elector
|
||||
image: gcr.io/google_containers/podmaster:1.1
|
||||
image: k8s.gcr.io/podmaster:1.1
|
||||
command:
|
||||
- /podmaster
|
||||
- --etcd-servers=http://127.0.0.1:4001
|
||||
|
@ -20,7 +20,7 @@ spec:
|
|||
- mountPath: /manifests
|
||||
name: manifests
|
||||
- name: controller-manager-elector
|
||||
image: gcr.io/google_containers/podmaster:1.1
|
||||
image: k8s.gcr.io/podmaster:1.1
|
||||
command:
|
||||
- /podmaster
|
||||
- --etcd-servers=http://127.0.0.1:4001
|
||||
|
|
|
@ -5,7 +5,7 @@ metadata:
|
|||
spec:
|
||||
containers:
|
||||
- name: kubernetes-serve-hostname
|
||||
image: gcr.io/google_containers/serve_hostname
|
||||
image: k8s.gcr.io/serve_hostname
|
||||
resources:
|
||||
limits:
|
||||
cpu: "3"
|
||||
|
|
|
@ -7,7 +7,7 @@ metadata:
|
|||
spec:
|
||||
containers:
|
||||
- name: kubernetes-serve-hostname
|
||||
image: gcr.io/google_containers/serve_hostname
|
||||
image: k8s.gcr.io/serve_hostname
|
||||
resources:
|
||||
limits:
|
||||
cpu: "1"
|
||||
|
|
|
@ -7,4 +7,4 @@ metadata:
|
|||
spec:
|
||||
containers:
|
||||
- name: pod-with-no-annotation-container
|
||||
image: gcr.io/google_containers/pause:2.0
|
||||
image: k8s.gcr.io/pause:2.0
|
||||
|
|
|
@ -8,4 +8,4 @@ spec:
|
|||
schedulerName: default-scheduler
|
||||
containers:
|
||||
- name: pod-with-default-annotation-container
|
||||
image: gcr.io/google_containers/pause:2.0
|
||||
image: k8s.gcr.io/pause:2.0
|
||||
|
|
|
@ -8,4 +8,4 @@ spec:
|
|||
schedulerName: my-scheduler
|
||||
containers:
|
||||
- name: pod-with-second-annotation-container
|
||||
image: gcr.io/google_containers/pause:2.0
|
||||
image: k8s.gcr.io/pause:2.0
|
||||
|
|
|
@ -43,7 +43,7 @@ placement, and so if the zones in your cluster are heterogeneous
|
|||
(e.g. different numbers of nodes, different types of nodes, or
|
||||
different pod resource requirements), this might prevent perfectly
|
||||
even spreading of your pods across zones. If desired, you can use
|
||||
homogenous zones (same number and types of nodes) to reduce the
|
||||
homogeneous zones (same number and types of nodes) to reduce the
|
||||
probability of unequal spreading.
|
||||
|
||||
When persistent volumes are created, the `PersistentVolumeLabel`
|
||||
|
|
|
@ -1,4 +0,0 @@
|
|||
approvers:
|
||||
- derekwaynecarr
|
||||
- janetkuo
|
||||
|
|
@ -1,10 +0,0 @@
|
|||
{
|
||||
"kind": "Namespace",
|
||||
"apiVersion": "v1",
|
||||
"metadata": {
|
||||
"name": "development",
|
||||
"labels": {
|
||||
"name": "development"
|
||||
}
|
||||
}
|
||||
}
|
|
@ -48,7 +48,7 @@ other Kubelet flags you may care:
|
|||
# $LOG_DIR is the test output path.
|
||||
sudo docker run -it --rm --privileged --net=host \
|
||||
-v /:/rootfs -v $CONFIG_DIR:$CONFIG_DIR -v $LOG_DIR:/var/result \
|
||||
gcr.io/google_containers/node-test:0.2
|
||||
k8s.gcr.io/node-test:0.2
|
||||
```
|
||||
|
||||
## Running Node Conformance Test for Other Architectures
|
||||
|
@ -71,7 +71,7 @@ regular expression of tests you want to run.
|
|||
sudo docker run -it --rm --privileged --net=host \
|
||||
-v /:/rootfs:ro -v $CONFIG_DIR:$CONFIG_DIR -v $LOG_DIR:/var/result \
|
||||
-e FOCUS=MirrorPod \ # Only run MirrorPod test
|
||||
gcr.io/google_containers/node-test:0.2
|
||||
k8s.gcr.io/node-test:0.2
|
||||
```
|
||||
|
||||
To skip specific tests, overwrite the environment variable `SKIP` with the
|
||||
|
@ -81,7 +81,7 @@ regular expression of tests you want to skip.
|
|||
sudo docker run -it --rm --privileged --net=host \
|
||||
-v /:/rootfs:ro -v $CONFIG_DIR:$CONFIG_DIR -v $LOG_DIR:/var/result \
|
||||
-e SKIP=MirrorPod \ # Run all conformance tests but skip MirrorPod test
|
||||
gcr.io/google_containers/node-test:0.2
|
||||
k8s.gcr.io/node-test:0.2
|
||||
```
|
||||
|
||||
Node conformance test is a containerized version of [node e2e test](https://github.com/kubernetes/community/blob/{{page.githubbranch}}/contributors/devel/e2e-node-tests.md).
|
||||
|
|
|
@ -56950,7 +56950,7 @@ Appears In:
|
|||
<tbody>
|
||||
<tr>
|
||||
<td>names <br /> <em>string array</em></td>
|
||||
<td>Names by which this image is known. e.g. ["gcr.io/google_containers/hyperkube:v1.0.7", "dockerhub.io/google_containers/hyperkube:v1.0.7"]</td>
|
||||
<td>Names by which this image is known. e.g. ["k8s.gcr.io/hyperkube:v1.0.7", "dockerhub.io/google_containers/hyperkube:v1.0.7"]</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>sizeBytes <br /> <em>integer</em></td>
|
||||
|
|
|
@ -97,7 +97,7 @@ The Node controller contains the cloud-dependent functionality of the kubelet. P
|
|||
|
||||
In this new model, the kubelet initializes a node without cloud-specific information. However, it adds a taint to the newly created node that makes the node unschedulable until the CCM initializes the node with cloud-specific information, and then removes this taint.
|
||||
|
||||
### 3. Kubernets API server
|
||||
### 3. Kubernetes API server
|
||||
|
||||
The PersistentVolumeLabels controller moves the cloud-dependent functionality of the Kubernetes API server to the CCM as described in the preceding sections.
|
||||
|
||||
|
@ -174,7 +174,7 @@ v1/ServiceAccount:
|
|||
The RBAC ClusterRole for the CCM looks like this:
|
||||
|
||||
```yaml
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: cloud-controller-manager
|
||||
|
|
|
@ -265,7 +265,7 @@ metadata:
|
|||
spec:
|
||||
containers:
|
||||
- name: sleep-forever
|
||||
image: gcr.io/google_containers/pause:0.8.0
|
||||
image: k8s.gcr.io/pause:0.8.0
|
||||
resources:
|
||||
requests:
|
||||
cpu: 100m
|
||||
|
|
|
@ -13,10 +13,10 @@ the controller manager.
|
|||
|
||||
Controller manager metrics provide important insight into the performance and health of the controller manager.
|
||||
These metrics include common Go language runtime metrics such as go_routine count and controller specific metrics such as
|
||||
etcd request latencies or Cloudprovider (AWS, GCE, Openstack) API latencies that can be used
|
||||
etcd request latencies or Cloudprovider (AWS, GCE, OpenStack) API latencies that can be used
|
||||
to gauge the health of a cluster.
|
||||
|
||||
Starting from Kubernetes 1.7, detailed Cloudprovider metrics are available for storage operations for GCE, AWS, Vsphere and Openstack.
|
||||
Starting from Kubernetes 1.7, detailed Cloudprovider metrics are available for storage operations for GCE, AWS, Vsphere and OpenStack.
|
||||
These metrics can be used to monitor health of persistent volume operations.
|
||||
|
||||
For example, for GCE these metrics are called:
|
||||
|
|
|
@ -65,7 +65,7 @@ The general workflow of a device plugin includes the following steps:
|
|||
```gRPC
|
||||
service DevicePlugin {
|
||||
// ListAndWatch returns a stream of List of Devices
|
||||
// Whenever a Device state change or a Device disapears, ListAndWatch
|
||||
// Whenever a Device state change or a Device disappears, ListAndWatch
|
||||
// returns the new list
|
||||
rpc ListAndWatch(Empty) returns (stream ListAndWatchResponse) {}
|
||||
|
||||
|
@ -107,8 +107,10 @@ in the plugin's
|
|||
|
||||
## Examples
|
||||
|
||||
For an example device plugin implementation, see
|
||||
[nvidia GPU device plugin for COS base OS](https://github.com/GoogleCloudPlatform/container-engine-accelerators/tree/master/cmd/nvidia_gpu).
|
||||
For examples of device plugin implementations, see:
|
||||
* The official [NVIDIA GPU device plugin](https://github.com/NVIDIA/k8s-device-plugin)
|
||||
* it requires using [nvidia-docker 2.0](https://github.com/NVIDIA/nvidia-docker) which allows you to run GPU enabled docker containers
|
||||
* The [NVIDIA GPU device plugin for COS base OS](https://github.com/GoogleCloudPlatform/container-engine-accelerators/tree/master/cmd/nvidia_gpu).
|
||||
|
||||
{% endcapture %}
|
||||
|
||||
|
|
|
@ -22,7 +22,7 @@ spec:
|
|||
- name: varlog
|
||||
mountPath: /var/log
|
||||
- name: count-agent
|
||||
image: gcr.io/google_containers/fluentd-gcp:1.30
|
||||
image: k8s.gcr.io/fluentd-gcp:1.30
|
||||
env:
|
||||
- name: FLUENTD_ARGS
|
||||
value: -c /etc/fluentd-config/fluentd.conf
|
||||
|
|
|
@ -285,7 +285,7 @@ Conditions:
|
|||
Events:
|
||||
FirstSeen LastSeen Count From SubobjectPath Reason Message
|
||||
Tue, 07 Jul 2015 12:53:51 -0700 Tue, 07 Jul 2015 12:53:51 -0700 1 {scheduler } scheduled Successfully assigned simmemleak-hra99 to kubernetes-node-tf0f
|
||||
Tue, 07 Jul 2015 12:53:51 -0700 Tue, 07 Jul 2015 12:53:51 -0700 1 {kubelet kubernetes-node-tf0f} implicitly required container POD pulled Pod container image "gcr.io/google_containers/pause:0.8.0" already present on machine
|
||||
Tue, 07 Jul 2015 12:53:51 -0700 Tue, 07 Jul 2015 12:53:51 -0700 1 {kubelet kubernetes-node-tf0f} implicitly required container POD pulled Pod container image "k8s.gcr.io/pause:0.8.0" already present on machine
|
||||
Tue, 07 Jul 2015 12:53:51 -0700 Tue, 07 Jul 2015 12:53:51 -0700 1 {kubelet kubernetes-node-tf0f} implicitly required container POD created Created with docker id 6a41280f516d
|
||||
Tue, 07 Jul 2015 12:53:51 -0700 Tue, 07 Jul 2015 12:53:51 -0700 1 {kubelet kubernetes-node-tf0f} implicitly required container POD started Started with docker id 6a41280f516d
|
||||
Tue, 07 Jul 2015 12:53:51 -0700 Tue, 07 Jul 2015 12:53:51 -0700 1 {kubelet kubernetes-node-tf0f} spec.containers{simmemleak} created Created with docker id 87348f12526a
|
||||
|
@ -312,7 +312,7 @@ Kubernetes version 1.8 introduces a new resource, _ephemeral-storage_ for managi
|
|||
|
||||
This partition is “ephemeral” and applications cannot expect any performance SLAs (Disk IOPS for example) from this partition. Local ephemeral storage management only applies for the root partition; the optional partition for image layer and writable layer is out of scope.
|
||||
|
||||
**Note:** If an optional runntime partition is used, root partition will not hold any image layer or writable layers.
|
||||
**Note:** If an optional runtime partition is used, root partition will not hold any image layer or writable layers.
|
||||
{: .note}
|
||||
|
||||
### Requests and limits setting for local ephemeral storage
|
||||
|
|
|
@ -23,4 +23,4 @@ spec:
|
|||
- another-node-label-value
|
||||
containers:
|
||||
- name: with-node-affinity
|
||||
image: gcr.io/google_containers/pause:2.0
|
||||
image: k8s.gcr.io/pause:2.0
|
|
@ -26,4 +26,4 @@ spec:
|
|||
topologyKey: kubernetes.io/hostname
|
||||
containers:
|
||||
- name: with-pod-affinity
|
||||
image: gcr.io/google_containers/pause:2.0
|
||||
image: k8s.gcr.io/pause:2.0
|
||||
|
|
|
@ -618,7 +618,7 @@ spec:
|
|||
secretName: dotfile-secret
|
||||
containers:
|
||||
- name: dotfile-test-container
|
||||
image: gcr.io/google_containers/busybox
|
||||
image: k8s.gcr.io/busybox
|
||||
command:
|
||||
- ls
|
||||
- "-l"
|
||||
|
|
|
@ -58,8 +58,8 @@ The following controllers have cloud provider dependencies:
|
|||
|
||||
### kube-scheduler
|
||||
|
||||
[kube-scheduler](/docs/admin/kube-scheduler/) watches newly created pods that have no node assigned, and
|
||||
selects a node for them to run on.
|
||||
[kube-scheduler](/docs/admin/kube-scheduler/) watches newly created pods that
|
||||
are not assigned to any node, and selects a node for them to run on.
|
||||
|
||||
### addons
|
||||
|
||||
|
|
|
@ -224,7 +224,7 @@ Note that there is a gap between TLS features supported by various Ingress contr
|
|||
|
||||
### Loadbalancing
|
||||
|
||||
An Ingress controller is bootstrapped with some load balancing policy settings that it applies to all Ingress, such as the load balancing algorithm, backend weight scheme, and others. More advanced load balancing concepts (e.g.: persistent sessions, dynamic weights) are not yet exposed through the Ingress. You can still get these features through the [service loadbalancer](https://git.k8s.io/contrib/service-loadbalancer). With time, we plan to distill load balancing patterns that are applicable cross platform into the Ingress resource.
|
||||
An Ingress controller is bootstrapped with some load balancing policy settings that it applies to all Ingress, such as the load balancing algorithm, backend weight scheme, and others. More advanced load balancing concepts (e.g.: persistent sessions, dynamic weights) are not yet exposed through the Ingress. You can still get these features through the [service loadbalancer](https://github.com/kubernetes/ingress-nginx/blob/master/docs/catalog.md). With time, we plan to distill load balancing patterns that are applicable cross platform into the Ingress resource.
|
||||
|
||||
It's also worth noting that even though health checks are not exposed directly through the Ingress, there exist parallel concepts in Kubernetes such as [readiness probes](/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/) which allow you to achieve the same end result. Please review the controller specific docs to see how they handle health checks ([nginx](https://git.k8s.io/ingress-nginx/README.md), [GCE](https://git.k8s.io/ingress-gce/README.md#health-checks)).
|
||||
|
||||
|
|
|
@ -193,15 +193,15 @@ having working [readiness probes](/docs/tasks/configure-pod-container/configure-
|
|||
{% assign for_k8s_version="v1.9" %}{% include feature-state-beta.md %}
|
||||
|
||||
In this mode, kube-proxy watches Kubernetes `services` and `endpoints`,
|
||||
call `netlink` interface create ipvs rules accordingly and sync ipvs rules with Kubernetes
|
||||
calls `netlink` interface to create ipvs rules accordingly and syncs ipvs rules with Kubernetes
|
||||
`services` and `endpoints` periodically, to make sure ipvs status is
|
||||
consistent with the expectation. When access the `service`, traffic will
|
||||
be redirect to one of the backend `pod`.
|
||||
consistent with the expectation. When `service` is accessed, traffic will
|
||||
be redirected to one of the backend `pod`s.
|
||||
|
||||
Similar to iptables, Ipvs is based on netfilter hook function, but use hash
|
||||
table as the underlying data structure and work in the kernal state.
|
||||
That means ipvs redirects traffic can be much faster, and have much
|
||||
better performance when sync proxy rules. Furthermore, ipvs provides more
|
||||
Similar to iptables, Ipvs is based on netfilter hook function, but uses hash
|
||||
table as the underlying data structure and works in the kernel space.
|
||||
That means ipvs redirects traffic much faster, and has much
|
||||
better performance when syncing proxy rules. Furthermore, ipvs provides more
|
||||
options for load balancing algorithm, such as:
|
||||
|
||||
- rr: round-robin
|
||||
|
@ -211,7 +211,7 @@ options for load balancing algorithm, such as:
|
|||
- sed: shortest expected delay
|
||||
- nq: never queue
|
||||
|
||||
**Note:** ipvs mode assumed IPVS kernel modules are installed on the node
|
||||
**Note:** ipvs mode assumes IPVS kernel modules are installed on the node
|
||||
before running kube-proxy. When kube-proxy starts with ipvs proxy mode,
|
||||
kube-proxy would validate if IPVS modules are installed on the node, if
|
||||
it's not installed kube-proxy will fall back to iptables proxy mode.
|
||||
|
|
|
@ -125,7 +125,7 @@ spec:
|
|||
path: /any/path/it/will/be/replaced
|
||||
containers:
|
||||
- name: pv-recycler
|
||||
image: "gcr.io/google_containers/busybox"
|
||||
image: "k8s.gcr.io/busybox"
|
||||
command: ["/bin/sh", "-c", "test -e /scrub && rm -rf /scrub/..?* /scrub/.[!.]* /scrub/* && test -z \"$(ls -A /scrub)\" || exit 1"]
|
||||
volumeMounts:
|
||||
- name: vol
|
||||
|
|
|
@ -131,7 +131,7 @@ metadata:
|
|||
name: test-ebs
|
||||
spec:
|
||||
containers:
|
||||
- image: gcr.io/google_containers/test-webserver
|
||||
- image: k8s.gcr.io/test-webserver
|
||||
name: test-container
|
||||
volumeMounts:
|
||||
- mountPath: /test-ebs
|
||||
|
@ -246,7 +246,7 @@ metadata:
|
|||
name: test-pd
|
||||
spec:
|
||||
containers:
|
||||
- image: gcr.io/google_containers/test-webserver
|
||||
- image: k8s.gcr.io/test-webserver
|
||||
name: test-container
|
||||
volumeMounts:
|
||||
- mountPath: /cache
|
||||
|
@ -326,7 +326,7 @@ metadata:
|
|||
name: test-pd
|
||||
spec:
|
||||
containers:
|
||||
- image: gcr.io/google_containers/test-webserver
|
||||
- image: k8s.gcr.io/test-webserver
|
||||
name: test-container
|
||||
volumeMounts:
|
||||
- mountPath: /test-pd
|
||||
|
@ -432,7 +432,7 @@ metadata:
|
|||
name: test-pd
|
||||
spec:
|
||||
containers:
|
||||
- image: gcr.io/google_containers/test-webserver
|
||||
- image: k8s.gcr.io/test-webserver
|
||||
name: test-container
|
||||
volumeMounts:
|
||||
- mountPath: /test-pd
|
||||
|
@ -665,7 +665,7 @@ metadata:
|
|||
name: test-portworx-volume-pod
|
||||
spec:
|
||||
containers:
|
||||
- image: gcr.io/google_containers/test-webserver
|
||||
- image: k8s.gcr.io/test-webserver
|
||||
name: test-container
|
||||
volumeMounts:
|
||||
- mountPath: /mnt
|
||||
|
@ -736,7 +736,7 @@ metadata:
|
|||
name: pod-0
|
||||
spec:
|
||||
containers:
|
||||
- image: gcr.io/google_containers/test-webserver
|
||||
- image: k8s.gcr.io/test-webserver
|
||||
name: pod-0
|
||||
volumeMounts:
|
||||
- mountPath: /test-pd
|
||||
|
@ -866,7 +866,7 @@ metadata:
|
|||
name: test-vmdk
|
||||
spec:
|
||||
containers:
|
||||
- image: gcr.io/google_containers/test-webserver
|
||||
- image: k8s.gcr.io/test-webserver
|
||||
name: test-container
|
||||
volumeMounts:
|
||||
- mountPath: /test-vmdk
|
||||
|
|
|
@ -87,7 +87,7 @@ spec:
|
|||
terminationGracePeriodSeconds: 10
|
||||
containers:
|
||||
- name: nginx
|
||||
image: gcr.io/google_containers/nginx-slim:0.8
|
||||
image: k8s.gcr.io/nginx-slim:0.8
|
||||
ports:
|
||||
- containerPort: 80
|
||||
name: web
|
||||
|
|
|
@ -198,7 +198,7 @@ spec:
|
|||
containers:
|
||||
- args:
|
||||
- /server
|
||||
image: gcr.io/google_containers/liveness
|
||||
image: k8s.gcr.io/liveness
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
# when "host" is not defined, "PodIP" will be used
|
||||
|
|
|
@ -32,7 +32,9 @@ Containers within a pod share an IP address and port space, and
|
|||
can find each other via `localhost`. They can also communicate with each
|
||||
other using standard inter-process communications like SystemV semaphores or
|
||||
POSIX shared memory. Containers in different pods have distinct IP addresses
|
||||
and can not communicate by IPC.
|
||||
and can not communicate by IPC without
|
||||
[special configuration](/docs/concepts/policy/pod-security-policy/).
|
||||
These containers usually communicate with each other via Pod IP addresses.
|
||||
|
||||
Applications within a pod also have access to shared volumes, which are defined
|
||||
as part of a pod and are made available to be mounted into each application's
|
||||
|
|
|
@ -14,7 +14,7 @@ spec:
|
|||
dnsPolicy: Default
|
||||
containers:
|
||||
- name: fluentd-cloud-logging
|
||||
image: gcr.io/google_containers/fluentd-gcp:2.0.2
|
||||
image: k8s.gcr.io/fluentd-gcp:2.0.2
|
||||
# If fluentd consumes its own logs, the following situation may happen:
|
||||
# fluentd fails to send a chunk to the server => writes it to the log =>
|
||||
# tries to send this message to the server => fails to send a chunk and so on.
|
||||
|
|
|
@ -46,7 +46,7 @@ Running pre-create checks...
|
|||
Creating machine...
|
||||
Starting local Kubernetes cluster...
|
||||
|
||||
$ kubectl run hello-minikube --image=gcr.io/google_containers/echoserver:1.4 --port=8080
|
||||
$ kubectl run hello-minikube --image=k8s.gcr.io/echoserver:1.4 --port=8080
|
||||
deployment "hello-minikube" created
|
||||
$ kubectl expose deployment hello-minikube --type=NodePort
|
||||
service "hello-minikube" exposed
|
||||
|
|
|
@ -27,7 +27,7 @@ is the best fit for your content:
|
|||
|
||||
<tr>
|
||||
<td>Tutorial</td>
|
||||
<td>A tutorial page shows how to accomplish a goal that ties together several Kubernetes features. A tutorial might provide several sequences of steps that readers can actually do as they read the page. Or it might provide explanations of related pieces of code. For example, a tutorial could provide a walkthrough of a code sample. A tutorial can include brief explanations of the Kubernetes features that are being tied togeter, but should link to related concept topics for deep explanations of individual features.</td>
|
||||
<td>A tutorial page shows how to accomplish a goal that ties together several Kubernetes features. A tutorial might provide several sequences of steps that readers can actually do as they read the page. Or it might provide explanations of related pieces of code. For example, a tutorial could provide a walkthrough of a code sample. A tutorial can include brief explanations of the Kubernetes features that are being tied together, but should link to related concept topics for deep explanations of individual features.</td>
|
||||
</tr>
|
||||
|
||||
<tr>
|
||||
|
|
|
@ -40,13 +40,13 @@ kubefed init FEDERATION_NAME --host-cluster-context=HOST_CONTEXT
|
|||
--dns-provider-config string Config file path on local file system for configuring DNS provider.
|
||||
--dns-zone-name string DNS suffix for this federation. Federated Service DNS names are published with this suffix.
|
||||
--dry-run dry run without sending commands to server.
|
||||
--etcd-image string Image to use for etcd server. (default "gcr.io/google_containers/etcd:3.1.10")
|
||||
--etcd-image string Image to use for etcd server. (default "k8s.gcr.io/etcd:3.1.10")
|
||||
--etcd-persistent-storage Use persistent volume for etcd. Defaults to 'true'. (default true)
|
||||
--etcd-pv-capacity string Size of persistent volume claim to be used for etcd. (default "10Gi")
|
||||
--etcd-pv-storage-class string The storage class of the persistent volume claim used for etcd. Must be provided if a default storage class is not enabled for the host cluster.
|
||||
--federation-system-namespace string Namespace in the host cluster where the federation system components are installed (default "federation-system")
|
||||
--host-cluster-context string Host cluster context
|
||||
--image string Image to use for federation API server and controller manager binaries. (default "gcr.io/google_containers/hyperkube-amd64:v0.0.0-master_$Format:%h$")
|
||||
--image string Image to use for federation API server and controller manager binaries. (default "k8s.gcr.io/hyperkube-amd64:v0.0.0-master_$Format:%h$")
|
||||
--image-pull-policy string PullPolicy describes a policy for if/when to pull a container image. The default pull policy is IfNotPresent which will not pull an image if it already exists. (default "IfNotPresent")
|
||||
--image-pull-secrets string Provide secrets that can access the private registry.
|
||||
--kubeconfig string Path to the kubeconfig file to use for CLI requests.
|
||||
|
|
|
@ -167,7 +167,7 @@ VolumeScheduling=true|false (ALPHA - default=false)
|
|||
--node-status-update-frequency duration Specifies how often kubelet posts node status to master. Note: be cautious when changing the constant, it must work with nodeMonitorGracePeriod in nodecontroller. (default 10s)
|
||||
--oom-score-adj int32 The oom-score-adj value for kubelet process. Values must be within the range [-1000, 1000] (default -999)
|
||||
--pod-cidr string The CIDR to use for pod IP addresses, only used in standalone mode. In cluster mode, this is obtained from the master.
|
||||
--pod-infra-container-image string The image whose network/ipc namespaces containers in each pod will use. (default "gcr.io/google_containers/pause-amd64:3.0")
|
||||
--pod-infra-container-image string The image whose network/ipc namespaces containers in each pod will use. (default "k8s.gcr.io/pause-amd64:3.0")
|
||||
--pod-manifest-path string Path to the directory containing pod manifest files to run, or the path to a single pod manifest file. Files starting with dots will be ignored.
|
||||
--pods-per-core int32 Number of Pods per core that can run on this Kubelet. The total number of Pods on this Kubelet cannot exceed max-pods, so max-pods will be used if this calculation results in a larger number of Pods allowed on the Kubelet. A value of 0 disables this limit.
|
||||
--port int32 The port for the Kubelet to serve on. (default 10250)
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -62,3 +62,13 @@ text | the plain text | kind is {.kind} | kind is List
|
|||
?() | filter | {.users[?(@.name=="e2e")].user.password} | secret
|
||||
range, end | iterate list | {range .items[*]}[{.metadata.name}, {.status.capacity}] {end} | [127.0.0.1, map[cpu:4]] [127.0.0.2, map[cpu:8]]
|
||||
"" | quote interpreted string | {range .items[*]}{.metadata.name}{"\t"}{end} | 127.0.0.1 127.0.0.2
|
||||
|
||||
Below are some examples using jsonpath:
|
||||
|
||||
```shell
|
||||
$ kubectl get pods -o json
|
||||
$ kubectl get pods -o=jsonpath='{@}'
|
||||
$ kubectl get pods -o=jsonpath='{.items[0]}'
|
||||
$ kubectl get pods -o=jsonpath='{.items[0].metadata.name}'
|
||||
$ kubectl get pods -o=jsonpath='{range .items[*]}{.metadata.name}{"\t"}{.status.startTime}{"\n"}{end}'
|
||||
```
|
||||
|
|
|
@ -35,7 +35,7 @@ HighAvailability=true|false (ALPHA - default=false)
|
|||
SelfHosting=true|false (BETA - default=false)
|
||||
StoreCertsInSecrets=true|false (ALPHA - default=false)
|
||||
SupportIPVSProxyMode=true|false (ALPHA - default=false)
|
||||
--image-repository string Choose a container registry to pull control plane images from (default "gcr.io/google_containers")
|
||||
--image-repository string Choose a container registry to pull control plane images from (default "k8s.gcr.io")
|
||||
--kubeconfig string The KubeConfig file to use when talking to the cluster (default "/etc/kubernetes/admin.conf")
|
||||
--kubernetes-version string Choose a specific Kubernetes version for the control plane (default "stable-1.8")
|
||||
--pod-network-cidr string The range of IP addresses used for the Pod network
|
||||
|
|
|
@ -24,7 +24,7 @@ HighAvailability=true|false (ALPHA - default=false)
|
|||
SelfHosting=true|false (BETA - default=false)
|
||||
StoreCertsInSecrets=true|false (ALPHA - default=false)
|
||||
SupportIPVSProxyMode=true|false (ALPHA - default=false)
|
||||
--image-repository string Choose a container registry to pull control plane images from (default "gcr.io/google_containers")
|
||||
--image-repository string Choose a container registry to pull control plane images from (default "k8s.gcr.io")
|
||||
--kubeconfig string The KubeConfig file to use when talking to the cluster (default "/etc/kubernetes/admin.conf")
|
||||
--kubernetes-version string Choose a specific Kubernetes version for the control plane (default "stable-1.8")
|
||||
--service-cidr string The range of IP address used for service VIPs (default "10.96.0.0/12")
|
||||
|
|
|
@ -18,7 +18,7 @@ kubeadm alpha phase addon kube-proxy
|
|||
--apiserver-advertise-address string The IP address or DNS name the API server is accessible on
|
||||
--apiserver-bind-port int32 The port the API server is accessible on (default 6443)
|
||||
--config string Path to a kubeadm config file. WARNING: Usage of a configuration file is experimental!
|
||||
--image-repository string Choose a container registry to pull control plane images from (default "gcr.io/google_containers")
|
||||
--image-repository string Choose a container registry to pull control plane images from (default "k8s.gcr.io")
|
||||
--kubeconfig string The KubeConfig file to use when talking to the cluster (default "/etc/kubernetes/admin.conf")
|
||||
--kubernetes-version string Choose a specific Kubernetes version for the control plane (default "stable-1.8")
|
||||
--pod-network-cidr string The range of IP addresses used for the Pod network
|
||||
|
|
|
@ -224,6 +224,8 @@ Alternatively, you can use [kubeadm config](kubeadm-config.md).
|
|||
You can install all the available addons with the `all` subcommand, or
|
||||
install them selectively.
|
||||
|
||||
Please note that if kubeadm is invoked with `--feature-gates=CoreDNS`, CoreDNS is installed instead of `kube-dns`.
|
||||
|
||||
{% capture addon-all %}
|
||||
{% include_relative generated/kubeadm_alpha_phase_addon_all.md %}
|
||||
{% endcapture %}
|
||||
|
|
|
@ -31,6 +31,13 @@ following steps:
|
|||
API server, each with its own identity, as well as an additional
|
||||
kubeconfig file for administration named `admin.conf`.
|
||||
|
||||
1. If kubeadm is invoked with `--feature-gates=DynamicKubeletConfig` enabled,
|
||||
it writes the kubelet init configuration into the `/var/lib/kubelet/config/init/kubelet` file.
|
||||
See [Set Kubelet parameters via a config file](/docs/tasks/administer-cluster/kubelet-config-file.md)
|
||||
and [Reconfigure a Node's Kubelet in a Live Cluster](/docs/tasks/administer-cluster/reconfigure-kubelet.md)
|
||||
for more information about Dynamic Kubelet Configuration.
|
||||
This functionality is now by default disabled as it is behind a feature gate, but is expected to be a default in future versions.
|
||||
|
||||
1. Generates static Pod manifests for the API server,
|
||||
controller manager and scheduler. In case an external etcd is not provided,
|
||||
an additional static Pod manifest are generated for etcd.
|
||||
|
@ -40,6 +47,12 @@ following steps:
|
|||
|
||||
Once control plane Pods are up and running, the `kubeadm init` sequence can continue.
|
||||
|
||||
1. If kubeadm is invoked with `--feature-gates=DynamicKubeletConfig` enabled,
|
||||
it completes the kubelet dynamic configuration by creating a ConfigMap and some RBAC rules that enable
|
||||
kubelets to access to it, and updates the node by pointing `Node.spec.configSource` to the
|
||||
newly-created ConfigMap.
|
||||
This functionality is now by default disabled as it is behind a feature gate, but is expected to be a default in future versions.
|
||||
|
||||
1. Apply labels and taints to the master node so that no additional workloads will
|
||||
run there.
|
||||
|
||||
|
@ -120,6 +133,18 @@ controllerManagerExtraArgs:
|
|||
schedulerExtraArgs:
|
||||
<argument>: <value|string>
|
||||
<argument>: <value|string>
|
||||
apiServerExtraVolumes:
|
||||
- name: <value|string>
|
||||
hostPath: <value|string>
|
||||
mountPath: <value|string>
|
||||
controllerManagerExtraVolumes:
|
||||
- name: <value|string>
|
||||
hostPath: <value|string>
|
||||
mountPath: <value|string>
|
||||
schedulerExtraVolumes:
|
||||
- name: <value|string>
|
||||
hostPath: <value|string>
|
||||
mountPath: <value|string>
|
||||
apiServerCertSANs:
|
||||
- <name1|string>
|
||||
- <name2|string>
|
||||
|
@ -156,7 +181,7 @@ More information on custom arguments can be found here:
|
|||
|
||||
### Using custom images {#custom-images}
|
||||
|
||||
By default, kubeadm pulls images from `gcr.io/google_containers`, unless
|
||||
By default, kubeadm pulls images from `k8s.gcr.io`, unless
|
||||
the requested Kubernetes version is a CI version. In this case,
|
||||
`gcr.io/kubernetes-ci-images` is used.
|
||||
|
||||
|
@ -164,9 +189,9 @@ You can override this behavior by using [kubeadm with a configuration file](#con
|
|||
Allowed customization are:
|
||||
|
||||
* To provide an alternative `imageRepository` to be used instead of
|
||||
`gcr.io/google_containers`.
|
||||
`k8s.gcr.io`.
|
||||
* To provide a `unifiedControlPlaneImage` to be used instead of different images for control plane components.
|
||||
* To provide a specific `etcd.image` to be used instead of the image available at`gcr.io/google_containers`.
|
||||
* To provide a specific `etcd.image` to be used instead of the image available at`k8s.gcr.io`.
|
||||
|
||||
|
||||
### Using custom certificates {#custom-certificates}
|
||||
|
@ -370,15 +395,15 @@ For running kubeadm without an internet connection you have to pre-pull the requ
|
|||
|
||||
| Image Name | v1.8 release branch version | v1.9 release branch version |
|
||||
|----------------------------------------------------------|-----------------------------|-----------------------------|
|
||||
| gcr.io/google_containers/kube-apiserver-${ARCH} | v1.8.x | v1.9.x |
|
||||
| gcr.io/google_containers/kube-controller-manager-${ARCH} | v1.8.x | v1.9.x |
|
||||
| gcr.io/google_containers/kube-scheduler-${ARCH} | v1.8.x | v1.9.x |
|
||||
| gcr.io/google_containers/kube-proxy-${ARCH} | v1.8.x | v1.9.x |
|
||||
| gcr.io/google_containers/etcd-${ARCH} | 3.0.17 | 3.1.10 |
|
||||
| gcr.io/google_containers/pause-${ARCH} | 3.0 | 3.0 |
|
||||
| gcr.io/google_containers/k8s-dns-sidecar-${ARCH} | 1.14.5 | 1.14.7 |
|
||||
| gcr.io/google_containers/k8s-dns-kube-dns-${ARCH} | 1.14.5 | 1.14.7 |
|
||||
| gcr.io/google_containers/k8s-dns-dnsmasq-nanny-${ARCH} | 1.14.5 | 1.14.7 |
|
||||
| k8s.gcr.io/kube-apiserver-${ARCH} | v1.8.x | v1.9.x |
|
||||
| k8s.gcr.io/kube-controller-manager-${ARCH} | v1.8.x | v1.9.x |
|
||||
| k8s.gcr.io/kube-scheduler-${ARCH} | v1.8.x | v1.9.x |
|
||||
| k8s.gcr.io/kube-proxy-${ARCH} | v1.8.x | v1.9.x |
|
||||
| k8s.gcr.io/etcd-${ARCH} | 3.0.17 | 3.1.10 |
|
||||
| k8s.gcr.io/pause-${ARCH} | 3.0 | 3.0 |
|
||||
| k8s.gcr.io/k8s-dns-sidecar-${ARCH} | 1.14.5 | 1.14.7 |
|
||||
| k8s.gcr.io/k8s-dns-kube-dns-${ARCH} | 1.14.5 | 1.14.7 |
|
||||
| k8s.gcr.io/k8s-dns-dnsmasq-nanny-${ARCH} | 1.14.5 | 1.14.7 |
|
||||
|
||||
Here `v1.8.x` means the "latest patch release of the v1.8 branch".
|
||||
|
||||
|
|
|
@ -21,6 +21,13 @@ This action consists of the following steps:
|
|||
authenticity of that data. The root CA can also be discovered directly via a
|
||||
file or URL.
|
||||
|
||||
1. If kubeadm is invoked with `--feature-gates=DynamicKubeletConfig` enabled,
|
||||
it first retrieves the kubelet init configuration from the master and writes it to
|
||||
the disk. When kubelet starts up, kubeadm updates the node `Node.spec.configSource` property of the node.
|
||||
See [Set Kubelet parameters via a config file](/docs/tasks/administer-cluster/kubelet-config-file.md)
|
||||
and [Reconfigure a Node's Kubelet in a Live Cluster](/docs/tasks/administer-cluster/reconfigure-kubelet.md)
|
||||
for more information about Dynamic Kubelet Configuration.
|
||||
|
||||
1. Once the cluster information is known, kubelet can start the TLS bootstrapping
|
||||
process.
|
||||
|
||||
|
|
|
@ -5,7 +5,7 @@ approvers:
|
|||
- jbeda
|
||||
title: Overview of kubeadm
|
||||
---
|
||||
Kubeadm is a tool built to provide `kubeadm init` and `kubeadm join` as best-practice “fast paths” for creating Kubernetes clusters.
|
||||
<img src="https://raw.githubusercontent.com/cncf/artwork/master/kubernetes/certified-kubernetes/versionless/color/certified_kubernetes_color.png" align="right" width="150px">Kubeadm is a tool built to provide `kubeadm init` and `kubeadm join` as best-practice “fast paths” for creating Kubernetes clusters.
|
||||
|
||||
kubeadm performs the actions necessary to get a minimum viable cluster up and running. By design, it cares only about bootstrapping, not about provisioning machines. Likewise, installing various nice-to-have addons, like the Kubernetes Dashboard, monitoring solutions, and cloud-specific addons, is not in scope.
|
||||
|
||||
|
|
|
@ -5643,7 +5643,7 @@ Appears In <a href="#nodestatus-v1">NodeStatus</a> </aside>
|
|||
<tbody>
|
||||
<tr>
|
||||
<td>names <br /> <em>string array</em></td>
|
||||
<td>Names by which this image is known. e.g. ["gcr.io/google_containers/hyperkube:v1.0.7", "dockerhub.io/google_containers/hyperkube:v1.0.7"]</td>
|
||||
<td>Names by which this image is known. e.g. ["k8s.gcr.io/hyperkube:v1.0.7", "dockerhub.io/google_containers/hyperkube:v1.0.7"]</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>sizeBytes <br /> <em>integer</em></td>
|
||||
|
|
|
@ -6169,7 +6169,7 @@ Appears In <a href="#nodestatus-v1-core">NodeStatus</a> </aside>
|
|||
<tbody>
|
||||
<tr>
|
||||
<td>names <br /> <em>string array</em></td>
|
||||
<td>Names by which this image is known. e.g. ["gcr.io/google_containers/hyperkube:v1.0.7", "dockerhub.io/google_containers/hyperkube:v1.0.7"]</td>
|
||||
<td>Names by which this image is known. e.g. ["k8s.gcr.io/hyperkube:v1.0.7", "dockerhub.io/google_containers/hyperkube:v1.0.7"]</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>sizeBytes <br /> <em>integer</em></td>
|
||||
|
|
|
@ -7181,7 +7181,7 @@ Appears In:
|
|||
<tbody>
|
||||
<tr>
|
||||
<td>names <br /> <em>string array</em></td>
|
||||
<td>Names by which this image is known. e.g. ["gcr.io/google_containers/hyperkube:v1.0.7", "dockerhub.io/google_containers/hyperkube:v1.0.7"]</td>
|
||||
<td>Names by which this image is known. e.g. ["k8s.gcr.io/hyperkube:v1.0.7", "dockerhub.io/google_containers/hyperkube:v1.0.7"]</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>sizeBytes <br /> <em>integer</em></td>
|
||||
|
|
|
@ -9,12 +9,12 @@ title: Using kubeadm to Create a Cluster
|
|||
|
||||
{% capture overview %}
|
||||
|
||||
**kubeadm** is a toolkit that help you bootstrap a best-practice Kubernetes
|
||||
<img src="https://raw.githubusercontent.com/cncf/artwork/master/kubernetes/certified-kubernetes/versionless/color/certified_kubernetes_color.png" align="right" width="150px">**kubeadm** is a toolkit that helps you bootstrap a best-practice Kubernetes
|
||||
cluster in an easy, reasonably secure and extensible way. It also supports
|
||||
managing [Bootstrap Tokens](#TODO) for you and upgrading/downgrading clusters.
|
||||
managing [Bootstrap Tokens](/docs/admin/bootstrap-tokens/) for you and upgrading/downgrading clusters.
|
||||
|
||||
kubeadm aims to set up a minimum viable cluster that pass the
|
||||
[Kubernetes Conformance tests](#TODO), but installing other addons than
|
||||
[Kubernetes Conformance tests](http://blog.kubernetes.io/2017/10/software-conformance-certification.html), but installing other addons than
|
||||
really necessary for a functional cluster is out of scope.
|
||||
|
||||
It by design does not install a networking solution for you, which means you
|
||||
|
@ -26,27 +26,29 @@ matter, can be a Linux laptop, virtual machine, physical/cloud server or
|
|||
Raspberry Pi. This makes kubeadm well suited to integrate with provisioning
|
||||
systems of different kinds (e.g. Terraform, Ansible, etc.).
|
||||
|
||||
kubeadm is designed to be a good way for new users to start trying
|
||||
Kubernetes out, possibly for the first time, an way for existing users to
|
||||
test their application on and stich together a cluster easily and to be
|
||||
a building block in a larger ecosystem and/or installer tool with a larger
|
||||
kubeadm is designed to be a simple way for new users to start trying
|
||||
Kubernetes out, possibly for the first time, a way for existing users to
|
||||
test their application on and stich together a cluster easily, and also to be
|
||||
a building block in other ecosystem and/or installer tool with a larger
|
||||
scope.
|
||||
|
||||
You can install _kubeadm_ very easily on operating systems that support
|
||||
installing deb or rpm packages. The responsible SIG for kubeadm,
|
||||
[SIG Cluster Lifecycle](#TODO), provides these packages pre-built for you,
|
||||
[SIG Cluster Lifecycle](https://github.com/kubernetes/community/tree/master/sig-cluster-lifecycle), provides these packages pre-built for you,
|
||||
but you may also on other OSes.
|
||||
|
||||
|
||||
### kubeadm Maturity
|
||||
|
||||
| Area | Maturity Level |
|
||||
|-----------------|--------------- |
|
||||
|---------------------------|--------------- |
|
||||
| Command line UX | beta |
|
||||
| Implementation | beta |
|
||||
| Config file API | alpha |
|
||||
| Self-hosting | alpha |
|
||||
| `kubeadm alpha` | alpha |
|
||||
| kubeadm alpha subcommands | alpha |
|
||||
| CoreDNS | alpha |
|
||||
| DynamicKubeletConfig | alpha |
|
||||
|
||||
|
||||
kubeadm's overall feature state is **Beta** and will soon be graduated to
|
||||
|
@ -64,12 +66,12 @@ period a patch release may be issued from the release branch if a severe bug or
|
|||
security issue is found. Here are the latest Kubernetes releases and the support
|
||||
timeframe; which also applies to `kubeadm`.
|
||||
|
||||
| Kubernetes version | Release date | End-of-life-month |
|
||||
|--------------------|--------------|-------------------|
|
||||
| v1.6.x | TODO | December 2017 |
|
||||
| v1.7.x | TODO | March 2018 |
|
||||
| v1.8.x | TODO | June 2018 |
|
||||
| v1.9.x | TODO | September 2018 |
|
||||
| Kubernetes version | Release month | End-of-life-month |
|
||||
|--------------------|----------------|-------------------|
|
||||
| v1.6.x | March 2017 | December 2017 |
|
||||
| v1.7.x | June 2017 | March 2018 |
|
||||
| v1.8.x | September 2017 | June 2018 |
|
||||
| v1.9.x | December 2017 | September 2018 |
|
||||
|
||||
{% endcapture %}
|
||||
|
||||
|
@ -120,7 +122,7 @@ kubeadm init
|
|||
|
||||
**Notes:**
|
||||
|
||||
- Please refer to the [kubeadm reference guide](/docs/reference/setup-tools/kubeadm/) if you want to
|
||||
- Please refer to the [kubeadm reference guide](/docs/reference/setup-tools/kubeadm/kubeadm/) if you want to
|
||||
read more about the flags `kubeadm init` provides. You can also specify a
|
||||
[configuration file](/docs/reference/setup-tools/kubeadm/kubeadm-init/#config-file) instead of using flags.
|
||||
- You need to choose a Pod Network Plugin in the next step. Depending on what
|
||||
|
@ -330,7 +332,7 @@ please see [here](https://kubernetes.io/docs/concepts/cluster-administration/net
|
|||
|
||||
The official Weave Net set-up guide is [here](https://www.weave.works/docs/net/latest/kube-addon/).
|
||||
|
||||
**Note:** Weave Net works on `amd64`, `arm` and `arm64` without any extra action required.
|
||||
**Note:** Weave Net works on `amd64`, `arm`, `arm64` and `ppc64le` without any extra action required.
|
||||
Weave Net sets hairpin mode by default. This allows Pods to access themselves via their Service IP address
|
||||
if they don't know their PodIP.
|
||||
|
||||
|
@ -386,7 +388,7 @@ The nodes are where your workloads (containers and pods, etc) run. To add new no
|
|||
kubeadm join --token <token> <master-ip>:<master-port> --discovery-token-ca-cert-hash sha256:<hash>
|
||||
```
|
||||
|
||||
**Note:** To specify an IPv6 tuple for <master-ip>:<master-port>, IPv6 address must be enclosed in square brackets, for example: `[fd00::101]:2073`.
|
||||
**Note:** To specify an IPv6 tuple for `<master-ip>:<master-port>`, IPv6 address must be enclosed in square brackets, for example: `[fd00::101]:2073`.
|
||||
{: .note}
|
||||
|
||||
The output should look something like:
|
||||
|
|
|
@ -4,7 +4,7 @@ title: Installing kubeadm
|
|||
|
||||
{% capture overview %}
|
||||
|
||||
This page shows how to use install the `kubeadm` toolbox.
|
||||
<img src="https://raw.githubusercontent.com/cncf/artwork/master/kubernetes/certified-kubernetes/versionless/color/certified_kubernetes_color.png" align="right" width="150px">This page shows how to use install the `kubeadm` toolbox.
|
||||
For information how to create a cluster with kubeadm once you have performed this installation process,
|
||||
see the [Using kubeadm to Create a Cluster](/docs/setup/independent/create-cluster-kubeadm/) page.
|
||||
|
||||
|
@ -19,6 +19,7 @@ see the [Using kubeadm to Create a Cluster](/docs/setup/independent/create-clust
|
|||
- RHEL 7
|
||||
- Fedora 25/26 (best-effort)
|
||||
- HypriotOS v1.0.1+
|
||||
- Container Linux (tested with 1576.4.0)
|
||||
* 2 GB or more of RAM per machine (any less will leave little room for your apps)
|
||||
* 2 CPUs or more
|
||||
* Full network connectivity between all machines in the cluster (public or private network is fine)
|
||||
|
@ -127,6 +128,16 @@ systemctl enable docker && systemctl start docker
|
|||
|
||||
{% endcapture %}
|
||||
|
||||
{% capture docker_coreos %}
|
||||
|
||||
Enable and start Docker:
|
||||
|
||||
```bash
|
||||
systemctl enable docker && systemctl start docker
|
||||
```
|
||||
|
||||
{% endcapture %}
|
||||
|
||||
**Note**: Make sure that the cgroup driver used by kubelet is the same as the one used by
|
||||
Docker. To ensure compatability you can either update Docker, like so:
|
||||
|
||||
|
@ -142,8 +153,8 @@ and restart Docker. Or ensure the `--cgroup-driver` kubelet flag is set to the s
|
|||
as Docker (e.g. `cgroupfs`).
|
||||
|
||||
{% assign tab_set_name = "docker_install" %}
|
||||
{% assign tab_names = "Ubuntu, Debian or HypriotOS;CentOS, RHEL or Fedora" | split: ';' | compact %}
|
||||
{% assign tab_contents = site.emptyArray | push: docker_ubuntu | push: docker_centos %}
|
||||
{% assign tab_names = "Ubuntu, Debian or HypriotOS;CentOS, RHEL or Fedora; Container Linux" | split: ';' | compact %}
|
||||
{% assign tab_contents = site.emptyArray | push: docker_ubuntu | push: docker_centos | push: docker_coreos %}
|
||||
|
||||
{% include tabs.md %}
|
||||
|
||||
|
@ -220,9 +231,42 @@ systemctl enable kubelet && systemctl start kubelet
|
|||
|
||||
{% endcapture %}
|
||||
|
||||
{% capture coreos %}
|
||||
|
||||
Install CNI plugins (required for most pod network):
|
||||
|
||||
```bash
|
||||
CNI_VERSION="v0.6.0"
|
||||
mkdir -p /opt/cni/bin
|
||||
curl -L "https://github.com/containernetworking/plugins/releases/download/${CNI_VERSION}/cni-plugins-amd64-${CNI_VERSION}.tgz" | tar -C /opt/cni/bin -xz
|
||||
```
|
||||
|
||||
Install `kubeadm`, `kubelet`, `kubectl` and add a `kubelet` systemd service:
|
||||
|
||||
```bash
|
||||
RELEASE="$(curl -sSL https://dl.k8s.io/release/stable.txt)"
|
||||
|
||||
mkdir -p /opt/bin
|
||||
cd /opt/bin
|
||||
curl -L --remote-name-all https://storage.googleapis.com/kubernetes-release/release/${RELEASE}/bin/linux/amd64/{kubeadm,kubelet,kubectl}
|
||||
chmod +x {kubeadm,kubelet,kubectl}
|
||||
|
||||
curl -sSL "https://raw.githubusercontent.com/kubernetes/kubernetes/${RELEASE}/build/debs/kubelet.service" | sed "s:/usr/bin:/opt/bin:g" > /etc/systemd/system/kubelet.service
|
||||
mkdir -p /etc/systemd/system/kubelet.service.d
|
||||
curl -sSL "https://raw.githubusercontent.com/kubernetes/kubernetes/${RELEASE}/build/debs/10-kubeadm.conf" | sed "s:/usr/bin:/opt/bin:g" > /etc/systemd/system/kubelet.service.d/10-kubeadm.conf
|
||||
```
|
||||
|
||||
Enable and start `kubelet`:
|
||||
|
||||
```bash
|
||||
systemctl enable kubelet && systemctl start kubelet
|
||||
```
|
||||
|
||||
{% endcapture %}
|
||||
|
||||
{% assign tab_set_name = "k8s_install" %}
|
||||
{% assign tab_names = "Ubuntu, Debian or HypriotOS;CentOS, RHEL or Fedora" | split: ';' | compact %}
|
||||
{% assign tab_contents = site.emptyArray | push: ubuntu | push: centos %}
|
||||
{% assign tab_names = "Ubuntu, Debian or HypriotOS;CentOS, RHEL or Fedora;Container Linux" | split: ';' | compact %}
|
||||
{% assign tab_contents = site.emptyArray | push: ubuntu | push: centos | push: coreos %}
|
||||
|
||||
{% include tabs.md %}
|
||||
|
||||
|
|
|
@ -34,13 +34,32 @@ If you see the following warnings while running `kubeadm init`
|
|||
|
||||
Then you may be missing `ebtables`, `ethtool` or a similar executable on your Linux machine. You can install them with the following commands:
|
||||
|
||||
```
|
||||
# For ubuntu/debian users, try
|
||||
apt install ebtables ethtool
|
||||
- For ubuntu/debian users, run `apt install ebtables ethtool`.
|
||||
- For CentOS/Fedora users, run `yum install ebtables ethtool`.
|
||||
|
||||
#### kubeadm blocks waiting for control plane during installation
|
||||
|
||||
If you notice that `kubeadm init` hangs after printing out the following line:
|
||||
|
||||
# For CentOS/Fedora users, try
|
||||
yum install ebtables ethtool
|
||||
```
|
||||
[apiclient] Created API client, waiting for the control plane to become ready
|
||||
```
|
||||
|
||||
This may be caused by a number of problems. The most common are:
|
||||
|
||||
- network connection problems. Check that your machine has full network connectivity before continuing.
|
||||
- the default cgroup driver configuration for the kubelet differs from that used by Docker.
|
||||
Check the system log file (e.g. `/var/log/message`) or examine the output from `journalctl -u kubelet`. If you see something like the following:
|
||||
|
||||
```shell
|
||||
error: failed to run Kubelet: failed to create kubelet:
|
||||
misconfiguration: kubelet cgroup driver: "systemd" is different from docker cgroup driver: "cgroupfs"
|
||||
```
|
||||
|
||||
you will need to fix the cgroup driver problem by following intstructions
|
||||
[here](/docs/setup/indenpendent/install-kubeadm/#installing-docker).
|
||||
- control plane Docker containers are crashlooping or hanging. You can check this by running `docker ps` and investigating each container by running `docker logs`.
|
||||
|
||||
|
||||
#### Pods in `RunContainerError`, `CrashLoopBackOff` or `Error` state
|
||||
|
||||
|
|
|
@ -9,7 +9,7 @@ metadata:
|
|||
spec:
|
||||
containers:
|
||||
- name: master
|
||||
image: gcr.io/google_containers/redis:v1
|
||||
image: k8s.gcr.io/redis:v1
|
||||
env:
|
||||
- name: MASTER
|
||||
value: "true"
|
||||
|
|
|
@ -42,9 +42,9 @@ spec:
|
|||
serviceAccountName: cloud-controller-manager
|
||||
containers:
|
||||
- name: cloud-controller-manager
|
||||
# for in-tree providers we use gcr.io/google_containers/cloud-controller-manager
|
||||
# for in-tree providers we use k8s.gcr.io/cloud-controller-manager
|
||||
# this can be replaced with any other image for out-of-tree providers
|
||||
image: gcr.io/google_containers/cloud-controller-manager:v1.8.0
|
||||
image: k8s.gcr.io/cloud-controller-manager:v1.8.0
|
||||
command:
|
||||
- /usr/local/bin/cloud-controller-manager
|
||||
- --cloud-provider=<YOUR_CLOUD_PROVIDER> # Add your own cloud provider here!
|
||||
|
|
|
@ -13,7 +13,7 @@ spec:
|
|||
spec:
|
||||
containers:
|
||||
- name: autoscaler
|
||||
image: gcr.io/google_containers/cluster-proportional-autoscaler-amd64:1.0.0
|
||||
image: k8s.gcr.io/cluster-proportional-autoscaler-amd64:1.0.0
|
||||
resources:
|
||||
requests:
|
||||
cpu: "20m"
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
---
|
||||
approvers:
|
||||
- smarterclayton
|
||||
title: Encrypting data at rest
|
||||
title: Encrypting Secret Data at Rest
|
||||
---
|
||||
|
||||
{% capture overview %}
|
||||
|
|
|
@ -66,7 +66,7 @@ $ kubectl create -f docs/admin/namespaces/namespace-dev.json
|
|||
And then let's create the production namespace using kubectl.
|
||||
|
||||
```shell
|
||||
$ kubectl create -f docs/admin/namespaces/namespace-prod.json
|
||||
$ kubectl create -f docs/tasks/administer-cluster/namespace-prod.json
|
||||
```
|
||||
|
||||
To be sure things are right, let's list all of the namespaces in our cluster.
|
||||
|
|
|
@ -152,7 +152,7 @@ $ kubectl create -f docs/admin/namespaces/namespace-dev.json
|
|||
And then let's create the production namespace using kubectl.
|
||||
|
||||
```shell
|
||||
$ kubectl create -f docs/admin/namespaces/namespace-prod.json
|
||||
$ kubectl create -f docs/tasks/administer-cluster/namespace-prod.json
|
||||
```
|
||||
|
||||
To be sure things are right, list all of the namespaces in our cluster.
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue