Consolidate YAML files [part-12] (#9364)
* Consolidate YAML files [part-12] Relocate YAML files referenced by the accessing application topic and the rest of cluster administration. * Adjust json shortcodes.pull/8856/merge
parent
aed6732b4e
commit
ea6004bd4f
|
@ -27,7 +27,7 @@ In this exercise, you create a Pod that runs two Containers. The two containers
|
|||
share a Volume that they can use to communicate. Here is the configuration file
|
||||
for the Pod:
|
||||
|
||||
{{< code file="two-container-pod.yaml" >}}
|
||||
{{< codenew file="pods/two-container-pod.yaml" >}}
|
||||
|
||||
In the configuration file, you can see that the Pod has a Volume named
|
||||
`shared-data`.
|
||||
|
@ -44,7 +44,7 @@ directory of the nginx server.
|
|||
|
||||
Create the Pod and the two Containers:
|
||||
|
||||
kubectl create -f https://k8s.io/docs/tasks/access-application-cluster/two-container-pod.yaml
|
||||
kubectl create -f https://k8s.io/examples/pods/two-container-pod.yaml
|
||||
|
||||
View information about the Pod and the Containers:
|
||||
|
||||
|
|
|
@ -43,12 +43,12 @@ frontend and backend are connected using a Kubernetes Service object.
|
|||
The backend is a simple hello greeter microservice. Here is the configuration
|
||||
file for the backend Deployment:
|
||||
|
||||
{{< code file="hello.yaml" >}}
|
||||
{{< codenew file="service/access/hello.yaml" >}}
|
||||
|
||||
Create the backend Deployment:
|
||||
|
||||
```
|
||||
kubectl create -f https://k8s.io/docs/tasks/access-application-cluster/hello.yaml
|
||||
kubectl create -f https://k8s.io/examples/service/access/hello.yaml
|
||||
```
|
||||
|
||||
View information about the backend Deployment:
|
||||
|
@ -103,7 +103,7 @@ selector labels to find the Pods that it routes traffic to.
|
|||
|
||||
First, explore the Service configuration file:
|
||||
|
||||
{{< code file="hello-service.yaml" >}}
|
||||
{{< codenew file="service/access/hello-service.yaml" >}}
|
||||
|
||||
In the configuration file, you can see that the Service routes traffic to Pods
|
||||
that have the labels `app: hello` and `tier: backend`.
|
||||
|
@ -111,7 +111,7 @@ that have the labels `app: hello` and `tier: backend`.
|
|||
Create the `hello` Service:
|
||||
|
||||
```
|
||||
kubectl create -f https://k8s.io/docs/tasks/access-application-cluster/hello-service.yaml
|
||||
kubectl create -f https://k8s.io/examples/service/access/hello-service.yaml
|
||||
```
|
||||
|
||||
At this point, you have a backend Deployment running, and you have a
|
||||
|
@ -127,18 +127,18 @@ of the `name` field in the preceding Service configuration file.
|
|||
The Pods in the frontend Deployment run an nginx image that is configured
|
||||
to find the hello backend Service. Here is the nginx configuration file:
|
||||
|
||||
{{< code file="frontend/frontend.conf" >}}
|
||||
{{< codenew file="service/access/frontend.conf" >}}
|
||||
|
||||
Similar to the backend, the frontend has a Deployment and a Service. The
|
||||
configuration for the Service has `type: LoadBalancer`, which means that
|
||||
the Service uses the default load balancer of your cloud provider.
|
||||
|
||||
{{< code file="frontend.yaml" >}}
|
||||
{{< codenew file="service/access/frontend.yaml" >}}
|
||||
|
||||
Create the frontend Deployment and Service:
|
||||
|
||||
```
|
||||
kubectl create -f https://k8s.io/docs/tasks/access-application-cluster/frontend.yaml
|
||||
kubectl create -f https://k8s.io/examples/service/access/frontend.yaml
|
||||
```
|
||||
|
||||
The output verifies that both resources were created:
|
||||
|
@ -149,7 +149,7 @@ service "frontend" created
|
|||
```
|
||||
|
||||
**Note**: The nginx configuration is baked into the
|
||||
[container image](/docs/tasks/access-application-cluster/frontend/Dockerfile).
|
||||
[container image](/examples/service/access/Dockerfile).
|
||||
A better way to do this would be to use a
|
||||
[ConfigMap](/docs/tasks/configure-pod-container/configure-pod-configmap/), so
|
||||
that you can change the configuration more easily.
|
||||
|
|
|
@ -1,4 +0,0 @@
|
|||
FROM alpine:3.1
|
||||
MAINTAINER Carter Morgan <askcarter@google.com>
|
||||
COPY hello /usr/bin/
|
||||
CMD ["/usr/bin/hello"]
|
|
@ -1,7 +0,0 @@
|
|||
Build hello go binary first
|
||||
|
||||
go build -tags netgo -ldflags "-extldflags '-lm -lstdc++ -static'" .
|
||||
|
||||
Then build docker image
|
||||
|
||||
docker build -t hello .
|
|
@ -1,74 +0,0 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"log"
|
||||
"net/http"
|
||||
"os"
|
||||
"os/signal"
|
||||
"syscall"
|
||||
|
||||
"github.com/braintree/manners"
|
||||
"github.com/GoogleCloudPlatform/kubernetes-workshops/bundles/kubernetes-101/workshop/app/handlers"
|
||||
"github.com/GoogleCloudPlatform/kubernetes-workshops/bundles/kubernetes-101/workshop/app/health"
|
||||
)
|
||||
|
||||
const version = "1.0.0"
|
||||
|
||||
func main() {
|
||||
var (
|
||||
httpAddr = flag.String("http", "0.0.0.0:80", "HTTP service address.")
|
||||
healthAddr = flag.String("health", "0.0.0.0:81", "Health service address.")
|
||||
)
|
||||
flag.Parse()
|
||||
|
||||
log.Println("Starting server...")
|
||||
log.Printf("Health service listening on %s", *healthAddr)
|
||||
log.Printf("HTTP service listening on %s", *httpAddr)
|
||||
|
||||
errChan := make(chan error, 10)
|
||||
|
||||
hmux := http.NewServeMux()
|
||||
hmux.HandleFunc("/healthz", health.HealthzHandler)
|
||||
hmux.HandleFunc("/readiness", health.ReadinessHandler)
|
||||
hmux.HandleFunc("/healthz/status", health.HealthzStatusHandler)
|
||||
hmux.HandleFunc("/readiness/status", health.ReadinessStatusHandler)
|
||||
healthServer := manners.NewServer()
|
||||
healthServer.Addr = *healthAddr
|
||||
healthServer.Handler = handlers.LoggingHandler(hmux)
|
||||
|
||||
go func() {
|
||||
errChan <- healthServer.ListenAndServe()
|
||||
}()
|
||||
|
||||
mux := http.NewServeMux()
|
||||
mux.HandleFunc("/", handlers.HelloHandler)
|
||||
mux.Handle("/secure", handlers.JWTAuthHandler(handlers.HelloHandler))
|
||||
mux.Handle("/version", handlers.VersionHandler(version))
|
||||
|
||||
httpServer := manners.NewServer()
|
||||
httpServer.Addr = *httpAddr
|
||||
httpServer.Handler = handlers.LoggingHandler(mux)
|
||||
|
||||
go func() {
|
||||
errChan <- httpServer.ListenAndServe()
|
||||
}()
|
||||
|
||||
signalChan := make(chan os.Signal, 1)
|
||||
signal.Notify(signalChan, syscall.SIGINT, syscall.SIGTERM)
|
||||
|
||||
for {
|
||||
select {
|
||||
case err := <-errChan:
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
case s := <-signalChan:
|
||||
log.Println(fmt.Sprintf("Captured %v. Exiting...", s))
|
||||
health.SetReadinessStatus(http.StatusServiceUnavailable)
|
||||
httpServer.BlockingClose()
|
||||
os.Exit(0)
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,33 +0,0 @@
|
|||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
labels:
|
||||
name: redis
|
||||
redis-sentinel: "true"
|
||||
role: master
|
||||
name: redis-master
|
||||
spec:
|
||||
containers:
|
||||
- name: master
|
||||
image: k8s.gcr.io/redis:v1
|
||||
env:
|
||||
- name: MASTER
|
||||
value: "true"
|
||||
ports:
|
||||
- containerPort: 6379
|
||||
resources:
|
||||
limits:
|
||||
cpu: "0.1"
|
||||
volumeMounts:
|
||||
- mountPath: /redis-master-data
|
||||
name: data
|
||||
- name: sentinel
|
||||
image: kubernetes/redis:v1
|
||||
env:
|
||||
- name: SENTINEL
|
||||
value: "true"
|
||||
ports:
|
||||
- containerPort: 26379
|
||||
volumes:
|
||||
- name: data
|
||||
emptyDir: {}
|
|
@ -73,7 +73,7 @@ for this example. A [Deployment](/docs/concepts/workloads/controllers/deployment
|
|||
thereby making the scheduler resilient to failures. Here is the deployment
|
||||
config. Save it as `my-scheduler.yaml`:
|
||||
|
||||
{{< code file="my-scheduler.yaml" >}}
|
||||
{{< codenew file="admin/sched/my-scheduler.yaml" >}}
|
||||
|
||||
An important thing to note here is that the name of the scheduler specified as an
|
||||
argument to the scheduler command in the container spec should be unique. This is the name that is matched against the value of the optional `spec.schedulerName` on pods, to determine whether this scheduler is responsible for scheduling a particular pod.
|
||||
|
@ -149,7 +149,7 @@ scheduler in that pod spec. Let's look at three examples.
|
|||
|
||||
- Pod spec without any scheduler name
|
||||
|
||||
{{< code file="pod1.yaml" >}}
|
||||
{{< codenew file="admin/sched/pod1.yaml" >}}
|
||||
|
||||
When no scheduler name is supplied, the pod is automatically scheduled using the
|
||||
default-scheduler.
|
||||
|
@ -162,7 +162,7 @@ kubectl create -f pod1.yaml
|
|||
|
||||
- Pod spec with `default-scheduler`
|
||||
|
||||
{{< code file="pod2.yaml" >}}
|
||||
{{< codenew file="admin/sched/pod2.yaml" >}}
|
||||
|
||||
A scheduler is specified by supplying the scheduler name as a value to `spec.schedulerName`. In this case, we supply the name of the
|
||||
default scheduler which is `default-scheduler`.
|
||||
|
@ -175,7 +175,7 @@ kubectl create -f pod2.yaml
|
|||
|
||||
- Pod spec with `my-scheduler`
|
||||
|
||||
{{< code file="pod3.yaml" >}}
|
||||
{{< codenew file="admin/sched/pod3.yaml" >}}
|
||||
|
||||
In this case, we specify that this pod should be scheduled using the scheduler that we
|
||||
deployed - `my-scheduler`. Note that the value of `spec.schedulerName` should match the name supplied to the scheduler
|
||||
|
@ -215,4 +215,4 @@ verify that the pods were scheduled by the desired schedulers.
|
|||
kubectl get events
|
||||
```
|
||||
|
||||
{{% /capture %}}
|
||||
{{% /capture %}}
|
||||
|
|
|
@ -22,12 +22,12 @@ This page provides hints on diagnosing DNS problems.
|
|||
|
||||
Create a file named busybox.yaml with the following contents:
|
||||
|
||||
{{< code file="busybox.yaml" >}}
|
||||
{{< codenew file="admin/dns/busybox.yaml" >}}
|
||||
|
||||
Then create a pod using this file and verify its status:
|
||||
|
||||
```shell
|
||||
$ kubectl create -f busybox.yaml
|
||||
$ kubectl create -f https://k8s.io/examples/admin/dns/busybox.yaml
|
||||
pod "busybox" created
|
||||
|
||||
$ kubectl get pods busybox
|
||||
|
|
|
@ -94,7 +94,7 @@ container based on the `cluster-proportional-autoscaler-amd64` image.
|
|||
|
||||
Create a file named `dns-horizontal-autoscaler.yaml` with this content:
|
||||
|
||||
{{< code file="dns-horizontal-autoscaler.yaml" >}}
|
||||
{{< codenew file="admin/dns/dns-horizontal-autoscaler.yaml" >}}
|
||||
|
||||
In the file, replace `<SCALE_TARGET>` with your scale target.
|
||||
|
||||
|
|
|
@ -67,24 +67,24 @@ One pattern this organization could follow is to partition the Kubernetes cluste
|
|||
|
||||
Let's create two new namespaces to hold our work.
|
||||
|
||||
Use the file [`namespace-dev.json`](/docs/tasks/administer-cluster/namespace-dev.json) which describes a development namespace:
|
||||
Use the file [`namespace-dev.json`](/examples/admin/namespace-dev.json) which describes a development namespace:
|
||||
|
||||
{{< code language="json" file="namespace-dev.json" >}}
|
||||
{{< codenew language="json" file="admin/namespace-dev.json" >}}
|
||||
|
||||
Create the development namespace using kubectl.
|
||||
|
||||
```shell
|
||||
$ kubectl create -f https://k8s.io/docs/tasks/administer-cluster/namespace-dev.json
|
||||
$ kubectl create -f https://k8s.io/examples/admin/namespace-dev.json
|
||||
```
|
||||
|
||||
Save the following contents into file [`namespace-prod.json`](/docs/tasks/administer-cluster/namespace-prod.json) which describes a production namespace:
|
||||
Save the following contents into file [`namespace-prod.json`](/examples/admin/namespace-prod.json) which describes a production namespace:
|
||||
|
||||
{{< code language="json" file="namespace-prod.json" >}}
|
||||
{{< codenew language="json" file="admin/namespace-prod.json" >}}
|
||||
|
||||
And then let's create the production namespace using kubectl.
|
||||
|
||||
```shell
|
||||
$ kubectl create -f https://k8s.io/docs/tasks/administer-cluster/namespace-prod.json
|
||||
$ kubectl create -f https://k8s.io/examples/admin/namespace-prod.json
|
||||
```
|
||||
|
||||
To be sure things are right, let's list all of the namespaces in our cluster.
|
||||
|
|
|
@ -140,20 +140,20 @@ One pattern this organization could follow is to partition the Kubernetes cluste
|
|||
|
||||
Let's create two new namespaces to hold our work.
|
||||
|
||||
Use the file [`namespace-dev.json`](/docs/tasks/administer-cluster/namespace-dev.json) which describes a development namespace:
|
||||
Use the file [`namespace-dev.json`](/examples/admin/namespace-dev.json) which describes a development namespace:
|
||||
|
||||
{{< code language="json" file="namespace-dev.json" >}}
|
||||
{{< codenew language="json" file="admin/namespace-dev.json" >}}
|
||||
|
||||
Create the development namespace using kubectl.
|
||||
|
||||
```shell
|
||||
$ kubectl create -f docs/tasks/administer-cluster/namespace-dev.json
|
||||
$ kubectl create -f https://k8s.io/examples/admin/namespace-dev.json
|
||||
```
|
||||
|
||||
And then let's create the production namespace using kubectl.
|
||||
|
||||
```shell
|
||||
$ kubectl create -f docs/tasks/administer-cluster/namespace-prod.json
|
||||
$ kubectl create -f https://k8s.io/examples/admin/namespace-prod.json
|
||||
```
|
||||
|
||||
To be sure things are right, list all of the namespaces in our cluster.
|
||||
|
|
|
@ -41,7 +41,7 @@ Successfully running cloud-controller-manager requires some changes to your clus
|
|||
since the cloud controller manager takes over labeling persistent volumes.
|
||||
* For the `cloud-controller-manager` to label persistent volumes, initializers will need to be enabled and an InitializerConifguration needs to be added to the system. Follow [these instructions](/docs/admin/extensible-admission-controllers.md#enable-initializers-alpha-feature) to enable initializers. Use the following YAML to create the InitializerConfiguration:
|
||||
|
||||
{{< code file="persistent-volume-label-initializer-config.yaml" >}}
|
||||
{{< codenew file="admin/cloud/pvl-initializer-config.yaml" >}}
|
||||
|
||||
Keep in mind that setting up your cluster to use cloud controller manager will change your cluster behaviour in a few ways:
|
||||
|
||||
|
@ -71,7 +71,7 @@ For cloud controller managers not in Kubernetes core, you can find the respectiv
|
|||
|
||||
For providers already in Kubernetes core, you can run the in-tree cloud controller manager as a Daemonset in your cluster, use the following as a guideline:
|
||||
|
||||
{{< code file="cloud-controller-manager-daemonset-example.yaml" >}}
|
||||
{{< codenew file="admin/cloud/ccm-example.yaml" >}}
|
||||
|
||||
|
||||
## Limitations
|
||||
|
|
|
@ -309,11 +309,6 @@ func TestExampleObjectSchemas(t *testing.T) {
|
|||
"docs/concepts/overview/working-with-objects": {
|
||||
"nginx-deployment": {&extensions.Deployment{}},
|
||||
},
|
||||
"docs/concepts/policy": {
|
||||
"privileged-psp": {&policy.PodSecurityPolicy{}},
|
||||
"restricted-psp": {&policy.PodSecurityPolicy{}},
|
||||
"example-psp": {&policy.PodSecurityPolicy{}},
|
||||
},
|
||||
"docs/concepts/services-networking": {
|
||||
"curlpod": {&extensions.Deployment{}},
|
||||
"custom-dns": {&api.Pod{}},
|
||||
|
@ -323,58 +318,27 @@ func TestExampleObjectSchemas(t *testing.T) {
|
|||
"nginx-svc": {&api.Service{}},
|
||||
"run-my-nginx": {&extensions.Deployment{}},
|
||||
},
|
||||
"docs/concepts/workloads/controllers": {
|
||||
"cronjob": {&batch.CronJob{}},
|
||||
"daemonset": {&extensions.DaemonSet{}},
|
||||
"frontend": {&extensions.ReplicaSet{}},
|
||||
"hpa-rs": {&autoscaling.HorizontalPodAutoscaler{}},
|
||||
"job": {&batch.Job{}},
|
||||
"nginx-deployment": {&extensions.Deployment{}},
|
||||
"my-repset": {&extensions.ReplicaSet{}},
|
||||
"replication": {&api.ReplicationController{}},
|
||||
"docs/tutorials/clusters": {
|
||||
"hello-apparmor-pod": {&api.Pod{}},
|
||||
},
|
||||
"docs/tasks/access-application-cluster": {
|
||||
"frontend": {&api.Service{}, &extensions.Deployment{}},
|
||||
"hello-service": {&api.Service{}},
|
||||
"hello": {&extensions.Deployment{}},
|
||||
"redis-master": {&api.Pod{}},
|
||||
"two-container-pod": {&api.Pod{}},
|
||||
"docs/tutorials/configuration/configmap/redis": {
|
||||
"redis-pod": {&api.Pod{}},
|
||||
},
|
||||
"docs/tasks/administer-cluster": {
|
||||
"busybox": {&api.Pod{}},
|
||||
"cloud-controller-manager-daemonset-example": {&api.ServiceAccount{}, &rbac.ClusterRoleBinding{}, &extensions.DaemonSet{}},
|
||||
"dns-horizontal-autoscaler": {&extensions.Deployment{}},
|
||||
"my-scheduler": {&api.ServiceAccount{}, &rbac.ClusterRoleBinding{}, &extensions.Deployment{}},
|
||||
"namespace-dev": {&api.Namespace{}},
|
||||
"namespace-prod": {&api.Namespace{}},
|
||||
"persistent-volume-label-initializer-config": {&admissionregistration.InitializerConfiguration{}},
|
||||
"pod1": {&api.Pod{}},
|
||||
"pod2": {&api.Pod{}},
|
||||
"pod3": {&api.Pod{}},
|
||||
"docs/concepts/overview/object-management-kubectl": {
|
||||
"simple_deployment": {&extensions.Deployment{}},
|
||||
"update_deployment": {&extensions.Deployment{}},
|
||||
},
|
||||
// TODO: decide whether federation examples should be added
|
||||
"docs/tasks/inject-data-application": {
|
||||
"commands": {&api.Pod{}},
|
||||
"dapi-envars-container": {&api.Pod{}},
|
||||
"dapi-envars-pod": {&api.Pod{}},
|
||||
"dapi-volume": {&api.Pod{}},
|
||||
"dapi-volume-resources": {&api.Pod{}},
|
||||
"envars": {&api.Pod{}},
|
||||
"podpreset-allow-db": {&settings.PodPreset{}},
|
||||
"podpreset-allow-db-merged": {&api.Pod{}},
|
||||
"podpreset-configmap": {&api.ConfigMap{}},
|
||||
"podpreset-conflict-pod": {&api.Pod{}},
|
||||
"podpreset-conflict-preset": {&settings.PodPreset{}},
|
||||
"podpreset-merged": {&api.Pod{}},
|
||||
"podpreset-multi-merged": {&api.Pod{}},
|
||||
"podpreset-pod": {&api.Pod{}},
|
||||
"podpreset-preset": {&settings.PodPreset{}},
|
||||
"podpreset-proxy": {&settings.PodPreset{}},
|
||||
"podpreset-replicaset-merged": {&api.Pod{}},
|
||||
"podpreset-replicaset": {&extensions.ReplicaSet{}},
|
||||
"secret": {&api.Secret{}},
|
||||
"secret-envars-pod": {&api.Pod{}},
|
||||
"secret-pod": {&api.Pod{}},
|
||||
"examples/admin": {
|
||||
"namespace-dev": {&api.Namespace{}},
|
||||
"namespace-prod": {&api.Namespace{}},
|
||||
},
|
||||
"examples/admin/cloud": {
|
||||
"ccm-example": {&api.ServiceAccount{}, &rbac.ClusterRoleBinding{}, &extensions.DaemonSet{}},
|
||||
"pvl-initializer-config": {&admissionregistration.InitializerConfiguration{}},
|
||||
},
|
||||
"examples/admin/dns": {
|
||||
"busybox": {&api.Pod{}},
|
||||
"dns-horizontal-autoscaler": {&extensions.Deployment{}},
|
||||
},
|
||||
"examples/admin/resource": {
|
||||
"cpu-constraints": {&api.LimitRange{}},
|
||||
|
@ -404,17 +368,11 @@ func TestExampleObjectSchemas(t *testing.T) {
|
|||
"quota-pod": {&api.ResourceQuota{}},
|
||||
"quota-pod-deployment": {&extensions.Deployment{}},
|
||||
},
|
||||
"examples/application/job": {
|
||||
"job-tmpl": {&batch.Job{}},
|
||||
"cronjob": {&batch.CronJob{}},
|
||||
},
|
||||
"examples/application/job/rabbitmq": {
|
||||
"job": {&batch.Job{}},
|
||||
},
|
||||
"examples/application/job/redis": {
|
||||
"job": {&batch.Job{}},
|
||||
"redis-pod": {&api.Pod{}},
|
||||
"redis-service": {&api.Service{}},
|
||||
"examples/admin/sched": {
|
||||
"my-scheduler": {&api.ServiceAccount{}, &rbac.ClusterRoleBinding{}, &extensions.Deployment{}},
|
||||
"pod1": {&api.Pod{}},
|
||||
"pod2": {&api.Pod{}},
|
||||
"pod3": {&api.Pod{}},
|
||||
},
|
||||
"examples/application": {
|
||||
"deployment": {&extensions.Deployment{}},
|
||||
|
@ -424,6 +382,10 @@ func TestExampleObjectSchemas(t *testing.T) {
|
|||
"nginx-with-request": {&extensions.Deployment{}},
|
||||
"shell-demo": {&api.Pod{}},
|
||||
},
|
||||
"examples/application/cassandra": {
|
||||
"cassandra-service": {&api.Service{}},
|
||||
"cassandra-statefulset": {&apps.StatefulSet{}, &storage.StorageClass{}},
|
||||
},
|
||||
"examples/application/guestbook": {
|
||||
"frontend-deployment": {&extensions.Deployment{}},
|
||||
"frontend-service": {&api.Service{}},
|
||||
|
@ -432,9 +394,27 @@ func TestExampleObjectSchemas(t *testing.T) {
|
|||
"redis-slave-deployment": {&extensions.Deployment{}},
|
||||
"redis-slave-service": {&api.Service{}},
|
||||
},
|
||||
"examples/application/cassandra": {
|
||||
"cassandra-service": {&api.Service{}},
|
||||
"cassandra-statefulset": {&apps.StatefulSet{}, &storage.StorageClass{}},
|
||||
"examples/application/hpa": {
|
||||
"php-apache": {&autoscaling.HorizontalPodAutoscaler{}},
|
||||
},
|
||||
"examples/application/job": {
|
||||
"cronjob": {&batch.CronJob{}},
|
||||
"job-tmpl": {&batch.Job{}},
|
||||
},
|
||||
"examples/application/job/rabbitmq": {
|
||||
"job": {&batch.Job{}},
|
||||
},
|
||||
"examples/application/job/redis": {
|
||||
"job": {&batch.Job{}},
|
||||
"redis-pod": {&api.Pod{}},
|
||||
"redis-service": {&api.Service{}},
|
||||
},
|
||||
"examples/application/mysql": {
|
||||
"mysql-configmap": {&api.ConfigMap{}},
|
||||
"mysql-deployment": {&api.Service{}, &extensions.Deployment{}},
|
||||
"mysql-pv": {&api.PersistentVolume{}, &api.PersistentVolumeClaim{}},
|
||||
"mysql-services": {&api.Service{}, &api.Service{}},
|
||||
"mysql-statefulset": {&apps.StatefulSet{}},
|
||||
},
|
||||
"examples/application/web": {
|
||||
"web": {&api.Service{}, &apps.StatefulSet{}},
|
||||
|
@ -456,6 +436,29 @@ func TestExampleObjectSchemas(t *testing.T) {
|
|||
"replication": {&api.ReplicationController{}},
|
||||
"nginx-deployment": {&extensions.Deployment{}},
|
||||
},
|
||||
"examples/debug": {
|
||||
"counter-pod": {&api.Pod{}},
|
||||
"event-exporter": {&api.ServiceAccount{}, &rbac.ClusterRoleBinding{}, &extensions.Deployment{}},
|
||||
"fluentd-gcp-configmap": {&api.ConfigMap{}},
|
||||
"fluentd-gcp-ds": {&extensions.DaemonSet{}},
|
||||
"node-problem-detector": {&extensions.DaemonSet{}},
|
||||
"node-problem-detector-configmap": {&extensions.DaemonSet{}},
|
||||
"termination": {&api.Pod{}},
|
||||
},
|
||||
"examples/podpreset": {
|
||||
"allow-db": {&settings.PodPreset{}},
|
||||
"allow-db-merged": {&api.Pod{}},
|
||||
"configmap": {&api.ConfigMap{}},
|
||||
"conflict-pod": {&api.Pod{}},
|
||||
"conflict-preset": {&settings.PodPreset{}},
|
||||
"merged": {&api.Pod{}},
|
||||
"multi-merged": {&api.Pod{}},
|
||||
"pod": {&api.Pod{}},
|
||||
"preset": {&settings.PodPreset{}},
|
||||
"proxy": {&settings.PodPreset{}},
|
||||
"replicaset-merged": {&api.Pod{}},
|
||||
"replicaset": {&extensions.ReplicaSet{}},
|
||||
},
|
||||
"examples/pods": {
|
||||
"commands": {&api.Pod{}},
|
||||
"init-containers": {&api.Pod{}},
|
||||
|
@ -465,6 +468,17 @@ func TestExampleObjectSchemas(t *testing.T) {
|
|||
"pod-with-pod-affinity": {&api.Pod{}},
|
||||
"private-reg-pod": {&api.Pod{}},
|
||||
"share-process-namespace": {&api.Pod{}},
|
||||
"two-container-pod": {&api.Pod{}},
|
||||
},
|
||||
"examples/pods/inject": {
|
||||
"dapi-envars-container": {&api.Pod{}},
|
||||
"dapi-envars-pod": {&api.Pod{}},
|
||||
"dapi-volume": {&api.Pod{}},
|
||||
"dapi-volume-resources": {&api.Pod{}},
|
||||
"envars": {&api.Pod{}},
|
||||
"secret": {&api.Secret{}},
|
||||
"secret-envars-pod": {&api.Pod{}},
|
||||
"secret-pod": {&api.Pod{}},
|
||||
},
|
||||
"examples/pods/probe": {
|
||||
"exec-liveness": {&api.Pod{}},
|
||||
|
@ -480,11 +494,11 @@ func TestExampleObjectSchemas(t *testing.T) {
|
|||
"examples/pods/resource": {
|
||||
"cpu-request-limit": {&api.Pod{}},
|
||||
"cpu-request-limit-2": {&api.Pod{}},
|
||||
"extended-resource-pod": {&api.Pod{}},
|
||||
"extended-resource-pod-2": {&api.Pod{}},
|
||||
"memory-request-limit": {&api.Pod{}},
|
||||
"memory-request-limit-2": {&api.Pod{}},
|
||||
"memory-request-limit-3": {&api.Pod{}},
|
||||
"extended-resource-pod": {&api.Pod{}},
|
||||
"extended-resource-pod-2": {&api.Pod{}},
|
||||
},
|
||||
"examples/pods/security": {
|
||||
"security-context": {&api.Pod{}},
|
||||
|
@ -504,39 +518,10 @@ func TestExampleObjectSchemas(t *testing.T) {
|
|||
"restricted-psp": {&policy.PodSecurityPolicy{}},
|
||||
"example-psp": {&policy.PodSecurityPolicy{}},
|
||||
},
|
||||
"docs/tasks/run-application": {
|
||||
"deployment-patch-demo": {&extensions.Deployment{}},
|
||||
"hpa-php-apache": {&autoscaling.HorizontalPodAutoscaler{}},
|
||||
},
|
||||
"examples/debug": {
|
||||
"counter-pod": {&api.Pod{}},
|
||||
"event-exporter": {&api.ServiceAccount{}, &rbac.ClusterRoleBinding{}, &extensions.Deployment{}},
|
||||
"fluentd-gcp-configmap": {&api.ConfigMap{}},
|
||||
"fluentd-gcp-ds": {&extensions.DaemonSet{}},
|
||||
"node-problem-detector": {&extensions.DaemonSet{}},
|
||||
"node-problem-detector-configmap": {&extensions.DaemonSet{}},
|
||||
"termination": {&api.Pod{}},
|
||||
},
|
||||
"examples/application/mysql": {
|
||||
"mysql-configmap": {&api.ConfigMap{}},
|
||||
"mysql-deployment": {&api.Service{}, &extensions.Deployment{}},
|
||||
"mysql-pv": {&api.PersistentVolume{}, &api.PersistentVolumeClaim{}},
|
||||
"mysql-services": {&api.Service{}, &api.Service{}},
|
||||
"mysql-statefulset": {&apps.StatefulSet{}},
|
||||
},
|
||||
"examples/application/hpa": {
|
||||
"php-apache": {&autoscaling.HorizontalPodAutoscaler{}},
|
||||
},
|
||||
"docs/tutorials/clusters": {
|
||||
"hello-apparmor-pod": {&api.Pod{}},
|
||||
"my-scheduler": {&extensions.Deployment{}},
|
||||
},
|
||||
"docs/tutorials/configuration/configmap/redis": {
|
||||
"redis-pod": {&api.Pod{}},
|
||||
},
|
||||
"docs/concepts/overview/object-management-kubectl": {
|
||||
"simple_deployment": {&extensions.Deployment{}},
|
||||
"update_deployment": {&extensions.Deployment{}},
|
||||
"examples/service/access": {
|
||||
"frontend": {&api.Service{}, &extensions.Deployment{}},
|
||||
"hello-service": {&api.Service{}},
|
||||
"hello": {&extensions.Deployment{}},
|
||||
},
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in New Issue