Merge pull request #39247 from sftim/20230303_new_reg_ja

[ja] Replace k8s.gcr.io with registry.k8s.io
pull/39299/head
Kubernetes Prow Robot 2023-02-06 04:15:00 -08:00 committed by GitHub
commit c7a0cb0265
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
45 changed files with 150 additions and 95 deletions

View File

@ -601,7 +601,7 @@ Conditions:
Events:
FirstSeen LastSeen Count From SubobjectPath Reason Message
Tue, 07 Jul 2015 12:53:51 -0700 Tue, 07 Jul 2015 12:53:51 -0700 1 {scheduler } scheduled Successfully assigned simmemleak-hra99 to kubernetes-node-tf0f
Tue, 07 Jul 2015 12:53:51 -0700 Tue, 07 Jul 2015 12:53:51 -0700 1 {kubelet kubernetes-node-tf0f} implicitly required container POD pulled Pod container image "k8s.gcr.io/pause:0.8.0" already present on machine
Tue, 07 Jul 2015 12:53:51 -0700 Tue, 07 Jul 2015 12:53:51 -0700 1 {kubelet kubernetes-node-tf0f} implicitly required container POD pulled Pod container image "registry.k8s.io/pause:0.8.0" already present on machine
Tue, 07 Jul 2015 12:53:51 -0700 Tue, 07 Jul 2015 12:53:51 -0700 1 {kubelet kubernetes-node-tf0f} implicitly required container POD created Created with docker id 6a41280f516d
Tue, 07 Jul 2015 12:53:51 -0700 Tue, 07 Jul 2015 12:53:51 -0700 1 {kubelet kubernetes-node-tf0f} implicitly required container POD started Started with docker id 6a41280f516d
Tue, 07 Jul 2015 12:53:51 -0700 Tue, 07 Jul 2015 12:53:51 -0700 1 {kubelet kubernetes-node-tf0f} spec.containers{simmemleak} created Created with docker id 87348f12526a

View File

@ -779,7 +779,7 @@ metadata:
spec:
containers:
- name: test-container
image: k8s.gcr.io/busybox
image: registry.k8s.io/busybox
command: [ "/bin/sh", "-c", "env" ]
envFrom:
- secretRef:
@ -994,7 +994,7 @@ spec:
secretName: dotfile-secret
containers:
- name: dotfile-test-container
image: k8s.gcr.io/busybox
image: registry.k8s.io/busybox
command:
- ls
- "-l"

View File

@ -130,7 +130,7 @@ metadata:
spec:
containers:
- name: cuda-test
image: "k8s.gcr.io/cuda-vector-add:v0.1"
image: "registry.k8s.io/cuda-vector-add:v0.1"
resources:
limits:
nvidia.com/gpu: 1

View File

@ -154,7 +154,7 @@ spec:
path: /any/path/it/will/be/replaced
containers:
- name: pv-recycler
image: "k8s.gcr.io/busybox"
image: "registry.k8s.io/busybox"
command: ["/bin/sh", "-c", "test -e /scrub && rm -rf /scrub/..?* /scrub/.[!.]* /scrub/* && test -z \"$(ls -A /scrub)\" || exit 1"]
volumeMounts:
- name: vol

View File

@ -76,7 +76,7 @@ metadata:
name: test-ebs
spec:
containers:
- image: k8s.gcr.io/test-webserver
- image: registry.k8s.io/test-webserver
name: test-container
volumeMounts:
- mountPath: /test-ebs
@ -163,7 +163,7 @@ metadata:
name: test-cinder
spec:
containers:
- image: k8s.gcr.io/test-webserver
- image: registry.k8s.io/test-webserver
name: test-cinder-container
volumeMounts:
- mountPath: /test-cinder
@ -274,7 +274,7 @@ metadata:
name: test-pd
spec:
containers:
- image: k8s.gcr.io/test-webserver
- image: registry.k8s.io/test-webserver
name: test-container
volumeMounts:
- mountPath: /cache
@ -349,7 +349,7 @@ metadata:
name: test-pd
spec:
containers:
- image: k8s.gcr.io/test-webserver
- image: registry.k8s.io/test-webserver
name: test-container
volumeMounts:
- mountPath: /test-pd
@ -507,7 +507,7 @@ metadata:
name: test-pd
spec:
containers:
- image: k8s.gcr.io/test-webserver
- image: registry.k8s.io/test-webserver
name: test-container
volumeMounts:
- mountPath: /test-pd
@ -536,7 +536,7 @@ metadata:
spec:
containers:
- name: test-webserver
image: k8s.gcr.io/test-webserver:latest
image: registry.k8s.io/test-webserver:latest
volumeMounts:
- mountPath: /var/local/aaa
name: mydir
@ -666,7 +666,7 @@ metadata:
name: test-portworx-volume-pod
spec:
containers:
- image: k8s.gcr.io/test-webserver
- image: registry.k8s.io/test-webserver
name: test-container
volumeMounts:
- mountPath: /mnt
@ -848,7 +848,7 @@ metadata:
name: test-vmdk
spec:
containers:
- image: k8s.gcr.io/test-webserver
- image: registry.k8s.io/test-webserver
name: test-container
volumeMounts:
- mountPath: /test-vmdk

View File

@ -72,7 +72,7 @@ spec:
terminationGracePeriodSeconds: 10
containers:
- name: nginx
image: k8s.gcr.io/nginx-slim:0.8
image: registry.k8s.io/nginx-slim:0.8
ports:
- containerPort: 80
name: web

View File

@ -359,8 +359,8 @@ kubectl api-resources --api-group=extensions # "extensions" APIグループの
# クラスター内で実行中のすべてのイメージ名を表示する
kubectl get pods -A -o=custom-columns='DATA:spec.containers[*].image'
# "k8s.gcr.io/coredns:1.6.2"を除いたすべてのイメージ名を表示する
kubectl get pods -A -o=custom-columns='DATA:spec.containers[?(@.image!="k8s.gcr.io/coredns:1.6.2")].image'
# "registry.k8s.io/coredns:1.6.2"を除いたすべてのイメージ名を表示する
kubectl get pods -A -o=custom-columns='DATA:spec.containers[?(@.image!="registry.k8s.io/coredns:1.6.2")].image'
# 名前に関係なくmetadata以下のすべてのフィールドを表示する
kubectl get pods -A -o=custom-columns='DATA:metadata.*'

View File

@ -54,7 +54,7 @@ MinikubeのサポートするKubernetesの機能:
単純なHTTPサーバーである`echoserver`という既存のイメージを使用して、Kubernetes Deploymentを作りましょう。そして`--port`を使用して8080番ポートで公開しましょう。
```shell
kubectl create deployment hello-minikube --image=k8s.gcr.io/echoserver:1.10
kubectl create deployment hello-minikube --image=registry.k8s.io/echoserver:1.10
```
出力はこのようになります:

View File

@ -30,7 +30,7 @@ when using kubeadm to set up a kubernetes cluster.
document assumes these default ports. However, they are configurable through
the kubeadm config file.
* Each host must [have docker, kubelet, and kubeadm installed](/ja/docs/setup/production-environment/tools/kubeadm/install-kubeadm/).
* Each host should have access to the Kubernetes container image registry (`k8s.gcr.io`) or list/pull the required etcd image using `kubeadm config images list/pull`. This guide will setup etcd instances as [static pods](/docs/tasks/configure-pod-container/static-pod/) managed by a kubelet.
* Each host should have access to the Kubernetes container image registry (`registry.k8s.io`) or list/pull the required etcd image using `kubeadm config images list/pull`. This guide will setup etcd instances as [static pods](/docs/tasks/configure-pod-container/static-pod/) managed by a kubelet.
* Some infrastructure to copy files between hosts. For example `ssh` and `scp`
can satisfy this requirement.
@ -251,7 +251,7 @@ this example.
```sh
docker run --rm -it \
--net host \
-v /etc/kubernetes:/etc/kubernetes k8s.gcr.io/etcd:${ETCD_TAG} etcdctl \
-v /etc/kubernetes:/etc/kubernetes registry.k8s.io/etcd:${ETCD_TAG} etcdctl \
--cert /etc/kubernetes/pki/etcd/peer.crt \
--key /etc/kubernetes/pki/etcd/peer.key \
--cacert /etc/kubernetes/pki/etcd/ca.crt \

View File

@ -36,7 +36,7 @@ Readiness Probeによるチェックを無効にし、これらがアプリケ
長期間実行されているアプリケーションの多くは、再起動されるまで回復できないような異常な状態になることがあります。
Kubernetesはこのような状況を検知し、回復するためのLiveness Probeを提供します。
この演習では、`k8s.gcr.io/busybox`イメージのコンテナを起動するPodを作成します。
この演習では、`registry.k8s.io/busybox`イメージのコンテナを起動するPodを作成します。
Podの構成ファイルは次の通りです。
{{< codenew file="pods/probe/exec-liveness.yaml" >}}
@ -76,8 +76,8 @@ kubectl describe pod liveness-exec
FirstSeen LastSeen Count From SubobjectPath Type Reason Message
--------- -------- ----- ---- ------------- -------- ------ -------
24s 24s 1 {default-scheduler } Normal Scheduled Successfully assigned liveness-exec to worker0
23s 23s 1 {kubelet worker0} spec.containers{liveness} Normal Pulling pulling image "k8s.gcr.io/busybox"
23s 23s 1 {kubelet worker0} spec.containers{liveness} Normal Pulled Successfully pulled image "k8s.gcr.io/busybox"
23s 23s 1 {kubelet worker0} spec.containers{liveness} Normal Pulling pulling image "registry.k8s.io/busybox"
23s 23s 1 {kubelet worker0} spec.containers{liveness} Normal Pulled Successfully pulled image "registry.k8s.io/busybox"
23s 23s 1 {kubelet worker0} spec.containers{liveness} Normal Created Created container with docker id 86849c15382e; Security:[seccomp=unconfined]
23s 23s 1 {kubelet worker0} spec.containers{liveness} Normal Started Started container with docker id 86849c15382e
```
@ -94,8 +94,8 @@ kubectl describe pod liveness-exec
FirstSeen LastSeen Count From SubobjectPath Type Reason Message
--------- -------- ----- ---- ------------- -------- ------ -------
37s 37s 1 {default-scheduler } Normal Scheduled Successfully assigned liveness-exec to worker0
36s 36s 1 {kubelet worker0} spec.containers{liveness} Normal Pulling pulling image "k8s.gcr.io/busybox"
36s 36s 1 {kubelet worker0} spec.containers{liveness} Normal Pulled Successfully pulled image "k8s.gcr.io/busybox"
36s 36s 1 {kubelet worker0} spec.containers{liveness} Normal Pulling pulling image "registry.k8s.io/busybox"
36s 36s 1 {kubelet worker0} spec.containers{liveness} Normal Pulled Successfully pulled image "registry.k8s.io/busybox"
36s 36s 1 {kubelet worker0} spec.containers{liveness} Normal Created Created container with docker id 86849c15382e; Security:[seccomp=unconfined]
36s 36s 1 {kubelet worker0} spec.containers{liveness} Normal Started Started container with docker id 86849c15382e
2s 2s 1 {kubelet worker0} spec.containers{liveness} Warning Unhealthy Liveness probe failed: cat: can't open '/tmp/healthy': No such file or directory
@ -117,7 +117,7 @@ liveness-exec 1/1 Running 1 1m
## HTTPリクエストによるLiveness Probeを定義する {#define-a-liveness-http-request}
別の種類のLiveness Probeでは、HTTP GETリクエストを使用します。
次の構成ファイルは、`k8s.gcr.io/liveness`イメージを使用したコンテナを起動するPodを作成します。
次の構成ファイルは、`registry.k8s.io/liveness`イメージを使用したコンテナを起動するPodを作成します。
{{< codenew file="pods/probe/http-liveness.yaml" >}}

View File

@ -37,7 +37,7 @@ kubectl exec <POD-NAME> -c <CONTAINER-NAME> -- <COMMAND>
```shell
kubectl create deployment hostnames --image=k8s.gcr.io/serve_hostname
kubectl create deployment hostnames --image=registry.k8s.io/serve_hostname
```
```none
deployment.apps/hostnames created
@ -76,7 +76,7 @@ spec:
spec:
containers:
- name: hostnames
image: k8s.gcr.io/serve_hostname
image: registry.k8s.io/serve_hostname
```
"app"ラベルは`kubectl create deployment`によって、Deploymentの名前に自動的にセットされます。

View File

@ -46,7 +46,7 @@ spec:
containers:
- name: cuda-vector-add
# https://github.com/kubernetes/kubernetes/blob/v1.7.11/test/images/nvidia-cuda/Dockerfile
image: "k8s.gcr.io/cuda-vector-add:v0.1"
image: "registry.k8s.io/cuda-vector-add:v0.1"
resources:
limits:
nvidia.com/gpu: 1 # 1 GPUをリクエストしています
@ -173,7 +173,7 @@ spec:
containers:
- name: cuda-vector-add
# https://github.com/kubernetes/kubernetes/blob/v1.7.11/test/images/nvidia-cuda/Dockerfile
image: "k8s.gcr.io/cuda-vector-add:v0.1"
image: "registry.k8s.io/cuda-vector-add:v0.1"
resources:
limits:
nvidia.com/gpu: 1

View File

@ -70,7 +70,7 @@ Kubernetesの[*Pod*](/ja/docs/concepts/workloads/pods/) は、コンテナの管
1. `kubectl create` コマンドを使用してPodを管理するDeploymentを作成してください。Podは提供されたDockerイメージを元にコンテナを実行します。
```shell
kubectl create deployment hello-node --image=k8s.gcr.io/echoserver:1.4
kubectl create deployment hello-node --image=registry.k8s.io/echoserver:1.4
```
2. Deploymentを確認します:

View File

@ -41,7 +41,7 @@ the target localization.
以下の例では、HTTPヘッダー経由で受け取ったリクエストの送信元IPをエコーバックする、小さなnginxウェブサーバーを使用します。次のコマンドでウェブサーバーを作成できます。
```shell
kubectl create deployment source-ip-app --image=k8s.gcr.io/echoserver:1.4
kubectl create deployment source-ip-app --image=registry.k8s.io/echoserver:1.4
```
出力は次のようになります。

View File

@ -500,9 +500,9 @@ Podを取得して、コンテナイメージを確認してみましょう。
for p in 0 1 2; do kubectl get pod "web-$p" --template '{{range $i, $c := .spec.containers}}{{$c.image}}{{end}}'; echo; done
```
```
k8s.gcr.io/nginx-slim:0.8
k8s.gcr.io/nginx-slim:0.8
k8s.gcr.io/nginx-slim:0.8
registry.k8s.io/nginx-slim:0.8
registry.k8s.io/nginx-slim:0.8
registry.k8s.io/nginx-slim:0.8
```
@ -528,7 +528,7 @@ statefulset.apps/web patched
StatefulSetに再度patchを当てて、コンテナイメージを変更します。
```shell
kubectl patch statefulset web --type='json' -p='[{"op": "replace", "path": "/spec/template/spec/containers/0/image", "value":"k8s.gcr.io/nginx-slim:0.7"}]'
kubectl patch statefulset web --type='json' -p='[{"op": "replace", "path": "/spec/template/spec/containers/0/image", "value":"registry.k8s.io/nginx-slim:0.7"}]'
```
```
statefulset.apps/web patched
@ -562,7 +562,7 @@ Podのコンテナイメージを取得します。
kubectl get pod web-2 --template '{{range $i, $c := .spec.containers}}{{$c.image}}{{end}}'
```
```
k8s.gcr.io/nginx-slim:0.8
registry.k8s.io/nginx-slim:0.8
```
アップデート戦略が`RollingUpdate`であっても、StatefulSetが元のコンテナを持つPodをリストアしたことがわかります。これは、Podの順序インデックスが`updateStrategy`で指定した`partition`より小さいためです。
@ -599,7 +599,7 @@ Podのコンテナを取得します。
kubectl get pod web-2 --template '{{range $i, $c := .spec.containers}}{{$c.image}}{{end}}'
```
```
k8s.gcr.io/nginx-slim:0.7
registry.k8s.io/nginx-slim:0.7
```
@ -640,7 +640,7 @@ web-1 1/1 Running 0 18s
kubectl get pod web-1 --template '{{range $i, $c := .spec.containers}}{{$c.image}}{{end}}'
```
```
k8s.gcr.io/nginx-slim:0.8
registry.k8s.io/nginx-slim:0.8
```
Podの順序インデックスがpartitionよりも小さいため、`web-1`は元の設定のコンテナイメージにリストアされました。partitionを指定すると、StatefulSetの`.spec.template`が更新されたときに、順序インデックスがそれ以上の値を持つすべてのPodがアップデートされます。partitionよりも小さな順序インデックスを持つPodが削除されたり終了されたりすると、元の設定のPodにリストアされます。
@ -688,9 +688,9 @@ StatefulSet内のPodのコンテナイメージの詳細を取得します。
for p in 0 1 2; do kubectl get pod "web-$p" --template '{{range $i, $c := .spec.containers}}{{$c.image}}{{end}}'; echo; done
```
```
k8s.gcr.io/nginx-slim:0.7
k8s.gcr.io/nginx-slim:0.7
k8s.gcr.io/nginx-slim:0.7
registry.k8s.io/nginx-slim:0.7
registry.k8s.io/nginx-slim:0.7
registry.k8s.io/nginx-slim:0.7
```
`partition`を`0`に移動することで、StatefulSetがアップデート処理を続けられるようにできます。

View File

@ -1,4 +1,4 @@
# This is an example of how to setup cloud-controller-manager as a Daemonset in your cluster.
# This is an example of how to set up cloud-controller-manager as a Daemonset in your cluster.
# It assumes that your masters can run pods and has the role node-role.kubernetes.io/master
# Note that this Daemonset will not work straight out of the box for your cloud, this is
# meant to be a guideline.
@ -10,8 +10,8 @@ metadata:
name: cloud-controller-manager
namespace: kube-system
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: system:cloud-controller-manager
roleRef:
@ -42,12 +42,12 @@ spec:
serviceAccountName: cloud-controller-manager
containers:
- name: cloud-controller-manager
# for in-tree providers we use k8s.gcr.io/cloud-controller-manager
# for in-tree providers we use registry.k8s.io/cloud-controller-manager
# this can be replaced with any other image for out-of-tree providers
image: k8s.gcr.io/cloud-controller-manager:v1.8.0
image: registry.k8s.io/cloud-controller-manager:v1.8.0
command:
- /usr/local/bin/cloud-controller-manager
- --cloud-provider=<YOUR_CLOUD_PROVIDER> # Add your own cloud provider here!
- --cloud-provider=[YOUR_CLOUD_PROVIDER] # Add your own cloud provider here!
- --leader-elect=true
- --use-service-account-credentials
# these flags will vary for every cloud provider
@ -59,9 +59,13 @@ spec:
- key: node.cloudprovider.kubernetes.io/uninitialized
value: "true"
effect: NoSchedule
# this is to have the daemonset runnable on master nodes
# the taint may vary depending on your cluster setup
# these tolerations are to have the daemonset runnable on control plane nodes
# remove them if your control plane nodes should not run pods
- key: node-role.kubernetes.io/control-plane
operator: Exists
effect: NoSchedule
- key: node-role.kubernetes.io/master
operator: Exists
effect: NoSchedule
# this is to restrict CCM to only run on master nodes
# the node selector may vary depending on your cluster setup

View File

@ -1,22 +1,71 @@
kind: ServiceAccount
apiVersion: v1
metadata:
name: kube-dns-autoscaler
namespace: kube-system
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: system:kube-dns-autoscaler
rules:
- apiGroups: [""]
resources: ["nodes"]
verbs: ["list", "watch"]
- apiGroups: [""]
resources: ["replicationcontrollers/scale"]
verbs: ["get", "update"]
- apiGroups: ["apps"]
resources: ["deployments/scale", "replicasets/scale"]
verbs: ["get", "update"]
# Remove the configmaps rule once below issue is fixed:
# kubernetes-incubator/cluster-proportional-autoscaler#16
- apiGroups: [""]
resources: ["configmaps"]
verbs: ["get", "create"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: system:kube-dns-autoscaler
subjects:
- kind: ServiceAccount
name: kube-dns-autoscaler
namespace: kube-system
roleRef:
kind: ClusterRole
name: system:kube-dns-autoscaler
apiGroup: rbac.authorization.k8s.io
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: dns-autoscaler
name: kube-dns-autoscaler
namespace: kube-system
labels:
k8s-app: dns-autoscaler
k8s-app: kube-dns-autoscaler
kubernetes.io/cluster-service: "true"
spec:
selector:
matchLabels:
k8s-app: dns-autoscaler
k8s-app: kube-dns-autoscaler
template:
metadata:
labels:
k8s-app: dns-autoscaler
k8s-app: kube-dns-autoscaler
spec:
priorityClassName: system-cluster-critical
securityContext:
seccompProfile:
type: RuntimeDefault
supplementalGroups: [ 65534 ]
fsGroup: 65534
nodeSelector:
kubernetes.io/os: linux
containers:
- name: autoscaler
image: k8s.gcr.io/cluster-proportional-autoscaler-amd64:1.1.1
image: registry.k8s.io/cpa/cluster-proportional-autoscaler:1.8.4
resources:
requests:
cpu: "20m"
@ -24,10 +73,15 @@ spec:
command:
- /cluster-proportional-autoscaler
- --namespace=kube-system
- --configmap=dns-autoscaler
- --configmap=kube-dns-autoscaler
# Should keep target in sync with cluster/addons/dns/kube-dns.yaml.base
- --target=<SCALE_TARGET>
# When cluster is using large nodes(with more cores), "coresPerReplica" should dominate.
# If using small nodes, "nodesPerReplica" should dominate.
- --default-params={"linear":{"coresPerReplica":256,"nodesPerReplica":16,"min":1}}
- --default-params={"linear":{"coresPerReplica":256,"nodesPerReplica":16,"preventSinglePointFailure":true,"includeUnschedulableNodes":true}}
- --logtostderr=true
- --v=2
tolerations:
- key: "CriticalAddonsOnly"
operator: "Exists"
serviceAccountName: kube-dns-autoscaler

View File

@ -5,7 +5,7 @@ metadata:
spec:
containers:
- name: count
image: busybox
image: busybox:1.28
args:
- /bin/sh
- -c
@ -22,7 +22,7 @@ spec:
- name: varlog
mountPath: /var/log
- name: count-agent
image: k8s.gcr.io/fluentd-gcp:1.30
image: registry.k8s.io/fluentd-gcp:1.30
env:
- name: FLUENTD_ARGS
value: -c /etc/fluentd-config/fluentd.conf

View File

@ -7,4 +7,4 @@ metadata:
spec:
containers:
- name: pod-with-no-annotation-container
image: k8s.gcr.io/pause:2.0
image: registry.k8s.io/pause:2.0

View File

@ -8,4 +8,4 @@ spec:
schedulerName: default-scheduler
containers:
- name: pod-with-default-annotation-container
image: k8s.gcr.io/pause:2.0
image: registry.k8s.io/pause:2.0

View File

@ -8,4 +8,4 @@ spec:
schedulerName: my-scheduler
containers:
- name: pod-with-second-annotation-container
image: k8s.gcr.io/pause:2.0
image: registry.k8s.io/pause:2.0

View File

@ -20,7 +20,7 @@ spec:
spec:
containers:
- name: master
image: k8s.gcr.io/redis:e2e # or just image: redis
image: registry.k8s.io/redis:e2e # or just image: redis
resources:
requests:
cpu: 100m

View File

@ -6,7 +6,6 @@ spec:
selector:
matchLabels:
run: php-apache
replicas: 1
template:
metadata:
labels:
@ -14,7 +13,7 @@ spec:
spec:
containers:
- name: php-apache
image: k8s.gcr.io/hpa-example
image: registry.k8s.io/hpa-example
ports:
- containerPort: 80
resources:

View File

@ -30,7 +30,7 @@ spec:
spec:
containers:
- name: nginx
image: k8s.gcr.io/nginx-slim:0.8
image: registry.k8s.io/nginx-slim:0.8
ports:
- containerPort: 80
name: web

View File

@ -29,7 +29,7 @@ spec:
spec:
containers:
- name: nginx
image: k8s.gcr.io/nginx-slim:0.8
image: registry.k8s.io/nginx-slim:0.8
ports:
- containerPort: 80
name: web

View File

@ -27,7 +27,7 @@ spec:
selector:
app: zk
---
apiVersion: policy/v1beta1
apiVersion: policy/v1
kind: PodDisruptionBudget
metadata:
name: zk-pdb
@ -49,7 +49,7 @@ spec:
replicas: 3
updateStrategy:
type: RollingUpdate
podManagementPolicy: Parallel
podManagementPolicy: OrderedReady
template:
metadata:
labels:
@ -68,7 +68,7 @@ spec:
containers:
- name: kubernetes-zookeeper
imagePullPolicy: Always
image: "k8s.gcr.io/kubernetes-zookeeper:1.0-3.4.10"
image: "registry.k8s.io/kubernetes-zookeeper:1.0-3.4.10"
resources:
requests:
memory: "1Gi"

View File

@ -41,7 +41,7 @@ spec:
serviceAccountName: event-exporter-sa
containers:
- name: event-exporter
image: k8s.gcr.io/event-exporter:v0.2.3
image: registry.k8s.io/event-exporter:v0.2.3
command:
- '/event-exporter'
terminationGracePeriodSeconds: 30

View File

@ -30,7 +30,7 @@ spec:
dnsPolicy: Default
containers:
- name: fluentd-gcp
image: k8s.gcr.io/fluentd-gcp:2.0.2
image: registry.k8s.io/fluentd-gcp:2.0.2
# If fluentd consumes its own logs, the following situation may happen:
# fluentd fails to send a chunk to the server => writes it to the log =>
# tries to send this message to the server => fails to send a chunk and so on.

View File

@ -23,7 +23,7 @@ spec:
hostNetwork: true
containers:
- name: node-problem-detector
image: k8s.gcr.io/node-problem-detector:v0.1
image: registry.k8s.io/node-problem-detector:v0.1
securityContext:
privileged: true
resources:

View File

@ -23,7 +23,7 @@ spec:
hostNetwork: true
containers:
- name: node-problem-detector
image: k8s.gcr.io/node-problem-detector:v0.1
image: registry.k8s.io/node-problem-detector:v0.1
securityContext:
privileged: true
resources:

View File

@ -5,7 +5,7 @@ metadata:
spec:
containers:
- name: test-container
image: k8s.gcr.io/busybox:1.24
image: registry.k8s.io/busybox:1.24
command: [ "sh", "-c"]
args:
- while true; do

View File

@ -5,7 +5,7 @@ metadata:
spec:
containers:
- name: test-container
image: k8s.gcr.io/busybox
image: registry.k8s.io/busybox
command: [ "sh", "-c"]
args:
- while true; do

View File

@ -5,7 +5,7 @@ metadata:
spec:
containers:
- name: client-container
image: k8s.gcr.io/busybox:1.24
image: registry.k8s.io/busybox:1.24
command: ["sh", "-c"]
args:
- while true; do
@ -30,7 +30,6 @@ spec:
volumeMounts:
- name: podinfo
mountPath: /etc/podinfo
readOnly: false
volumes:
- name: podinfo
downwardAPI:

View File

@ -12,7 +12,7 @@ metadata:
spec:
containers:
- name: client-container
image: k8s.gcr.io/busybox
image: registry.k8s.io/busybox
command: ["sh", "-c"]
args:
- while true; do
@ -25,7 +25,6 @@ spec:
volumeMounts:
- name: podinfo
mountPath: /etc/podinfo
readOnly: false
volumes:
- name: podinfo
downwardAPI:

View File

@ -5,8 +5,8 @@ metadata:
spec:
containers:
- name: test-container
image: k8s.gcr.io/busybox
command: [ "/bin/sh", "-c", "echo $(SPECIAL_LEVEL_KEY) $(SPECIAL_TYPE_KEY)" ]
image: registry.k8s.io/busybox
command: [ "/bin/echo", "$(SPECIAL_LEVEL_KEY) $(SPECIAL_TYPE_KEY)" ]
env:
- name: SPECIAL_LEVEL_KEY
valueFrom:

View File

@ -5,7 +5,7 @@ metadata:
spec:
containers:
- name: test-container
image: k8s.gcr.io/busybox
image: registry.k8s.io/busybox
command: [ "/bin/sh", "-c", "env" ]
envFrom:
- configMapRef:

View File

@ -5,7 +5,7 @@ metadata:
spec:
containers:
- name: test-container
image: k8s.gcr.io/busybox
image: registry.k8s.io/busybox
command: [ "/bin/sh","-c","cat /etc/config/keys" ]
volumeMounts:
- name: config-volume

View File

@ -5,7 +5,7 @@ metadata:
spec:
containers:
- name: test-container
image: k8s.gcr.io/busybox
image: registry.k8s.io/busybox
command: [ "/bin/sh", "-c", "ls /etc/config/" ]
volumeMounts:
- name: config-volume

View File

@ -5,7 +5,7 @@ metadata:
spec:
containers:
- name: test-container
image: k8s.gcr.io/busybox
image: registry.k8s.io/busybox
command: [ "/bin/sh", "-c", "env" ]
env:
- name: SPECIAL_LEVEL_KEY

View File

@ -5,7 +5,7 @@ metadata:
spec:
containers:
- name: test-container
image: k8s.gcr.io/busybox
image: registry.k8s.io/busybox
command: [ "/bin/sh", "-c", "env" ]
env:
# 環境変数を定義します

View File

@ -8,11 +8,11 @@ spec:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: kubernetes.io/e2e-az-name
- key: topology.kubernetes.io/zone
operator: In
values:
- e2e-az1
- e2e-az2
- antarctica-east1
- antarctica-west1
preferredDuringSchedulingIgnoredDuringExecution:
- weight: 1
preference:
@ -23,4 +23,4 @@ spec:
- another-node-label-value
containers:
- name: with-node-affinity
image: k8s.gcr.io/pause:2.0
image: registry.k8s.io/pause:2.0

View File

@ -23,7 +23,7 @@ spec:
operator: In
values:
- S2
topologyKey: kubernetes.io/hostname
topologyKey: topology.kubernetes.io/zone
containers:
- name: with-pod-affinity
image: k8s.gcr.io/pause:2.0
image: registry.k8s.io/pause:2.0

View File

@ -7,7 +7,7 @@ metadata:
spec:
containers:
- name: liveness
image: k8s.gcr.io/busybox
image: registry.k8s.io/busybox
args:
- /bin/sh
- -c

View File

@ -7,7 +7,7 @@ metadata:
spec:
containers:
- name: liveness
image: k8s.gcr.io/liveness
image: registry.k8s.io/liveness
args:
- /server
livenessProbe:
@ -15,7 +15,7 @@ spec:
path: /healthz
port: 8080
httpHeaders:
- name: X-Custom-Header
- name: Custom-Header
value: Awesome
initialDelaySeconds: 3
periodSeconds: 3

View File

@ -7,7 +7,7 @@ metadata:
spec:
containers:
- name: goproxy
image: k8s.gcr.io/goproxy:0.1
image: registry.k8s.io/goproxy:0.1
ports:
- containerPort: 8080
readinessProbe: