Application of code include widget, hack curl calls for three file downloads, saas code/pre fixes, comment-out of version drop-down

pull/43/head
johndmulhausen 2016-03-01 19:43:10 -08:00
parent df6a183084
commit 50704476d6
17 changed files with 102 additions and 378 deletions

View File

@ -1,9 +1,11 @@
{% capture samplecode %}{% include_relative {{include.file}} %}{% endcapture %}
{% if include.k8slink %}{% capture ghlink %}https://github.com/kubernetes/kubernetes/blob/{{page.githubbranch}}{{include.k8slink}}{% endcapture %}{% endif %}
{% if include.ghlink %}{% capture ghlink %}https://github.com/kubernetes/kubernetes.github.io/blob/master{{include.ghlink}}{% endcapture %}{% endif %}
{% capture mysample %}
```{{include.language}}
{{ samplecode | raw | strip }}
```
{% endcapture %}
<p>&nbsp;</p>
<table class="includecode"><thead><tr><th><a href="{{include.ghlink}}"><code>{{include.file}}</code></a></th></tr></thead>
<table class="includecode"><thead><tr><th>{% if ghlink %}<a href="{{ghlink}}">{% endif %}<code>{{include.file}}</code></a></th></tr></thead>
<tr><td>{{ mysample | markdownify }}</td></tr></table>

View File

@ -77,12 +77,12 @@
<li><a href="/docs/reference">REFERENCE</a></li>
<li><a href="/docs/samples">SAMPLES</a></li>
<li><a href="/docs/troubleshooting/">SUPPORT</a></li>
</ul>
</ul><!--
<div class="dropdown">
<div class="readout"></div>
<a href="/v1.1/">Version 1.1</a>
<a href="/v1.2/">Version 1.2</a>
</div>
</div>-->
<input type="text" id="search" placeholder="Search" onkeydown="if (event.keyCode==13) window.location.replace('/docs/search/?q=' + this.value)">
</div>
</section>

View File

@ -661,7 +661,6 @@ section
font-size: 16px
font-weight: 300
line-height: 1.25em
overflow: auto
p + p
margin-top: 10px
@ -670,7 +669,7 @@ section
background-color: $light-grey
color: $dark-grey
font-family: $mono-font
overflow-x: scroll
overflow-x: auto
font-size: 14px
font-weight: bold
padding: 2px 4px
@ -697,7 +696,7 @@ section
display: block
margin: 20px 0
padding: 15px
overflow-x: scroll
overflow-x: auto
h1 code, h2 code, h3 code, h4 code, h5 code, h6 code
font-family: inherit

View File

@ -0,0 +1,12 @@
apiVersion: v1
kind: Pod
metadata:
name: counter
spec:
containers:
- name: count
image: ubuntu:14.04
args: [bash, -c,
'for ((i = 0; ; i++)); do echo "$i: $(date)"; sleep 1; done']

View File

@ -0,0 +1,31 @@
apiVersion: v1
kind: Pod
metadata:
name: fluentd-cloud-logging
namespace: kube-system
spec:
containers:
- name: fluentd-cloud-logging
image: gcr.io/google_containers/fluentd-gcp:1.15
resources:
limits:
cpu: 100m
memory: 200Mi
env:
- name: FLUENTD_ARGS
value: -q
volumeMounts:
- name: varlog
mountPath: /var/log
- name: varlibdockercontainers
mountPath: /var/lib/docker/containers
readOnly: true
terminationGracePeriodSeconds: 30
volumes:
- name: varlog
hostPath:
path: /var/log
- name: varlibdockercontainers
hostPath:
path: /var/lib/docker/containers

View File

@ -26,23 +26,7 @@ This diagram shows four nodes created on a Google Compute Engine cluster with th
To help explain how cluster level logging works let's start off with a synthetic log generator pod specification [counter-pod.yaml](https://github.com/kubernetes/kubernetes/tree/{{page.githubbranch}}/examples/blog-logging/counter-pod.yaml):
<!-- BEGIN MUNGE: EXAMPLE ../../examples/blog-logging/counter-pod.yaml -->
```yaml
apiVersion: v1
kind: Pod
metadata:
name: counter
spec:
containers:
- name: count
image: ubuntu:14.04
args: [bash, -c,
'for ((i = 0; ; i++)); do echo "$i: $(date)"; sleep 1; done']
```
[Download example](https://github.com/kubernetes/kubernetes/tree/{{page.githubbranch}}/examples/blog-logging/counter-pod.yaml)
<!-- END MUNGE: EXAMPLE ../../examples/blog-logging/counter-pod.yaml -->
{% include code.html language="yaml" file="counter-pod.yaml" k8slink="/examples/blog-logging/counter-pod.yaml" %}
This pod specification has one container which runs a bash script when the container is born. This script simply writes out the value of a counter and the date once per second and runs indefinitely. Let's create the pod in the default
namespace.
@ -126,43 +110,7 @@ When a Kubernetes cluster is created with logging to Google Cloud Logging enable
This log collection pod has a specification which looks something like this:
<!-- BEGIN MUNGE: EXAMPLE ../../cluster/saltbase/salt/fluentd-gcp/fluentd-gcp.yaml -->
```yaml
apiVersion: v1
kind: Pod
metadata:
name: fluentd-cloud-logging
namespace: kube-system
spec:
containers:
- name: fluentd-cloud-logging
image: gcr.io/google_containers/fluentd-gcp:1.14
resources:
limits:
cpu: 100m
memory: 200Mi
env:
- name: FLUENTD_ARGS
value: -q
volumeMounts:
- name: varlog
mountPath: /var/log
- name: varlibdockercontainers
mountPath: /var/lib/docker/containers
readOnly: true
terminationGracePeriodSeconds: 30
volumes:
- name: varlog
hostPath:
path: /var/log
- name: varlibdockercontainers
hostPath:
path: /var/lib/docker/containers
```
[Download example](https://releases.k8s.io/{{page.githubbranch}}/cluster/saltbase/salt/fluentd-gcp/fluentd-gcp.yaml)
<!-- END MUNGE: EXAMPLE ../../cluster/saltbase/salt/fluentd-gcp/fluentd-gcp.yaml -->
{% include code.html language="yaml" file="fluentd-gcp.yaml" k8slink="/cluster/saltbase/salt/fluentd-gcp/fluentd-gcp.yaml" %}
This pod specification maps the directory on the host containing the Docker log files, `/var/lib/docker/containers`, to a directory inside the container which has the same path. The pod runs one image, `gcr.io/google_containers/fluentd-gcp:1.6`, which is configured to collect the Docker log files from the logs directory and ingest them into Google Cloud Logging. One instance of this pod runs on each node of the cluster. Kubernetes will notice if this pod fails and automatically restart it.

View File

@ -0,0 +1,12 @@
apiVersion: v1
kind: Pod
metadata:
name: counter
spec:
containers:
- name: count
image: ubuntu:14.04
args: [bash, -c,
'for ((i = 0; ; i++)); do echo "$i: $(date)"; sleep 1; done']

View File

@ -36,29 +36,7 @@ enabled.
Here is an example Deployment. It creates a replication controller to
bring up 3 nginx pods.
<!-- BEGIN MUNGE: EXAMPLE nginx-deployment.yaml -->
```yaml
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: nginx-deployment
spec:
replicas: 3
template:
metadata:
labels:
app: nginx
spec:
containers:
- name: nginx
image: nginx:1.7.9
ports:
- containerPort: 80
```
[Download example](/docs/user-guide/nginx-deployment.yaml)
<!-- END MUNGE: EXAMPLE nginx-deployment.yaml -->
{% include code.html language="yaml" file="nginx-deployment.yaml" ghlink="/docs/user-guide/nginx-deployment.yaml" %}
Run the example by downloading the example file and then running this command:
@ -114,30 +92,7 @@ Lets say, now we want to update the nginx pods to start using nginx:1.9.1 image
instead of nginx:1.7.9.
For this, we update our deployment to be as follows:
<!-- BEGIN MUNGE: EXAMPLE new-nginx-deployment.yaml -->
```yaml
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: nginx-deployment
spec:
replicas: 3
template:
metadata:
labels:
app: nginx
spec:
containers:
- name: nginx
image: nginx:1.9.1
ports:
- containerPort: 80
```
[Download example](/docs/user-guide/new-nginx-deployment.yaml)
<!-- END MUNGE: EXAMPLE new-nginx-deployment.yaml -->
{% include code.html language="yaml" file="new-nginx-deployment.yaml" ghlink="/docs/user-guide/new-nginx-deployment.yaml" %}
```shell
$ kubectl apply -f docs/user-guide/new-nginx-deployment.yaml
@ -215,7 +170,6 @@ OldReplicationControllers: deploymentrc-1562004724 (3/3 replicas created)
NewReplicationController: <none>
Events:
FirstSeen LastSeen Count From SubobjectPath Reason Message
'<27><>'<27><>'<27><>'<27><>'<27><>'<27><>'<27><>'<27><>'<27><> '<27><>'<27><>'<27><>'<27><>'<27><>'<27><>'<27><>'<27><> '<27><>'<27><>'<27><>'<27><>'<27><> '<27><>'<27><>'<27><>'<27><> '<27><>'<27><>'<27><>'<27><>'<27><>'<27><>'<27><>'<27><>'<27><>'<27><>'<27><>'<27><>'<27><> '<27><>'<27><>'<27><>'<27><>'<27><>'<27><> '<27><>'<27><>'<27><>'<27><>'<27><>'<27><>'<27><>
10m 10m 1 {deployment-controller } ScalingRC Scaled up rc deploymentrc-1975012602 to 3
2m 2m 1 {deployment-controller } ScalingRC Scaled up rc deploymentrc-1562004724 to 1
2m 2m 1 {deployment-controller } ScalingRC Scaled down rc deploymentrc-1975012602 to 1

View File

@ -13,6 +13,7 @@ is often possible to "wrap" such applications, this is tedious and error prone,
and violates the goal of low coupling. Instead, the user should be able to use
the Pod's name, for example, and inject it into this well-known variable.
## Capabilities
The following information is available to a `Pod` through the downward API:
@ -23,11 +24,13 @@ The following information is available to a `Pod` through the downward API:
More information will be exposed through this same API over time.
## Exposing pod information into a container
Containers consume information from the downward API using environment
variables or using a volume plugin.
### Environment variables
Most environment variables in the Kubernetes API use the `value` field to carry
@ -43,41 +46,13 @@ The `fieldRef` is evaluated and the resulting value is used as the value for
the environment variable. This allows users to publish their pod's name in any
environment variable they want.
## Example
This is an example of a pod that consumes its name and namespace via the
downward API:
<!-- BEGIN MUNGE: EXAMPLE downward-api/dapi-pod.yaml -->
```yaml
apiVersion: v1
kind: Pod
metadata:
name: dapi-test-pod
spec:
containers:
- name: test-container
image: gcr.io/google_containers/busybox
command: [ "/bin/sh", "-c", "env" ]
env:
- name: MY_POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: MY_POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: MY_POD_IP
valueFrom:
fieldRef:
fieldPath: status.podIP
restartPolicy: Never
```
[Download example](/docs/user-guide/downward-api/dapi-pod.yaml)
<!-- END MUNGE: EXAMPLE downward-api/dapi-pod.yaml -->
{% include code.html language="yaml" file="downward-api/dapi-pod.yaml" ghlink="/docs/user-guide/downward-api/dapi-pod.yaml" %}
### Downward API volume
@ -106,47 +81,13 @@ The downward API volume refreshes its data in step with the kubelet refresh loop
In future, it will be possible to specify a specific annotation or label.
## Example
This is an example of a pod that consumes its labels and annotations via the downward API volume, labels and annotations are dumped in `/etc/podlabels` and in `/etc/annotations`, respectively:
<!-- BEGIN MUNGE: EXAMPLE downward-api/volume/dapi-volume.yaml -->
{% include code.html language="yaml" file="downward-api/volume/dapi-volume.yaml" ghlink="/docs/user-guide/downward-api/volume/dapi-volume.yaml" %}
```yaml
apiVersion: v1
kind: Pod
metadata:
name: kubernetes-downwardapi-volume-example
labels:
zone: us-est-coast
cluster: test-cluster1
rack: rack-22
annotations:
build: two
builder: john-doe
spec:
containers:
- name: client-container
image: gcr.io/google_containers/busybox
command: ["sh", "-c", "while true; do if [[ -e /etc/labels ]]; then cat /etc/labels; fi; if [[ -e /etc/annotations ]]; then cat /etc/annotations; fi; sleep 5; done"]
volumeMounts:
- name: podinfo
mountPath: /etc
readOnly: false
volumes:
- name: podinfo
downwardAPI:
items:
- path: "labels"
fieldRef:
fieldPath: metadata.labels
- path: "annotations"
fieldRef:
fieldPath: metadata.annotations
```
[Download example](/docs/user-guide/downward-api/volume/dapi-volume.yaml)
<!-- END MUNGE: EXAMPLE downward-api/volume/dapi-volume.yaml -->
Some more thorough examples:

View File

@ -87,21 +87,7 @@ In order for the Ingress resource to work, the cluster must have an Ingress cont
There are existing Kubernetes concepts that allow you to expose a single service (see [alternatives](#alternatives)), however you can do so through an Ingress as well, by specifying a *default backend* with no rules.
<!-- BEGIN MUNGE: EXAMPLE ingress.yaml -->
```yaml
apiVersion: extensions/v1beta1
kind: Ingress
metadata:
name: test-ingress
spec:
backend:
serviceName: testsvc
servicePort: 80
```
[Download example](/docs/user-guide/ingress.yaml)
<!-- END MUNGE: EXAMPLE ingress.yaml -->
{% include code.html language="yaml" file="ingress.yaml" ghlink="/docs/user-guide/ingress.yaml" %}
If you create it using `kubectl -f` you should see:

View File

@ -18,32 +18,8 @@ A Job can also be used to run multiple pods in parallel.
Here is an example Job config. It computes π to 2000 places and prints it out.
It takes around 10s to complete.
<!-- BEGIN MUNGE: EXAMPLE job.yaml -->
```yaml
apiVersion: extensions/v1beta1
kind: Job
metadata:
name: pi
spec:
selector:
matchLabels:
app: pi
template:
metadata:
name: pi
labels:
app: pi
spec:
containers:
- name: pi
image: perl
command: ["perl", "-Mbignum=bpi", "-wle", "print bpi(2000)"]
restartPolicy: Never
```
[Download example](/docs/user-guide/job.yaml)
<!-- END MUNGE: EXAMPLE job.yaml -->
{% include code.html language="yaml" file="job.yaml" ghlink="/docs/user-guide/job.yaml" %}
Run the example job by downloading the example file and then running this command:
@ -66,7 +42,6 @@ Labels: <none>
Pods Statuses: 1 Running / 0 Succeeded / 0 Failed
Events:
FirstSeen LastSeen Count From SubobjectPath Reason Message
'<27><>'<27><>'<27><>'<27><>'<27><>'<27><>'<27><>'<27><>'<27><> '<27><>'<27><>'<27><>'<27><>'<27><>'<27><>'<27><>'<27><> '<27><>'<27><>'<27><>'<27><>'<27><> '<27><>'<27><>'<27><>'<27><> '<27><>'<27><>'<27><>'<27><>'<27><>'<27><>'<27><>'<27><>'<27><>'<27><>'<27><>'<27><>'<27><> '<27><>'<27><>'<27><>'<27><>'<27><>'<27><> '<27><>'<27><>'<27><>'<27><>'<27><>'<27><>'<27><>
1m 1m 1 {job } SuccessfulCreate Created pod: pi-z548a
```

View File

@ -12,24 +12,8 @@ Kubernetes components, such as kubelet and apiserver, use the [glog](https://god
The logs of a running container may be fetched using the command `kubectl logs`. For example, given
this pod specification [counter-pod.yaml](https://github.com/kubernetes/kubernetes/tree/{{page.githubbranch}}/examples/blog-logging/counter-pod.yaml), which has a container which writes out some text to standard
output every second. (You can find different pod specifications [here](/docs/user-guide/logging-demo/).)
<!-- BEGIN MUNGE: EXAMPLE ../../examples/blog-logging/counter-pod.yaml -->
```yaml
apiVersion: v1
kind: Pod
metadata:
name: counter
spec:
containers:
- name: count
image: ubuntu:14.04
args: [bash, -c,
'for ((i = 0; ; i++)); do echo "$i: $(date)"; sleep 1; done']
```
[Download example](https://github.com/kubernetes/kubernetes/tree/{{page.githubbranch}}/examples/blog-logging/counter-pod.yaml)
<!-- END MUNGE: EXAMPLE ../../examples/blog-logging/counter-pod.yaml -->
{% include code.html language="yaml" file="counter-pod.yaml" k8slink="/examples/blog-logging/counter-pod.yaml" %}
we can run the pod:

View File

@ -15,25 +15,7 @@ $ kubectl create -f ./pod.yaml
Where pod.yaml contains something like:
<!-- BEGIN MUNGE: EXAMPLE pod.yaml -->
```yaml
apiVersion: v1
kind: Pod
metadata:
name: nginx
labels:
app: nginx
spec:
containers:
- name: nginx
image: nginx
ports:
- containerPort: 80
```
[Download example](/docs/user-guide/pod.yaml)
<!-- END MUNGE: EXAMPLE pod.yaml -->
{% include code.html language="yaml" file="pod.yaml" ghlink="/docs/user-guide/pod.yaml" %}
You can see your cluster's pods:
@ -60,32 +42,7 @@ $ kubectl create -f ./replication.yaml
Where `replication.yaml` contains:
<!-- BEGIN MUNGE: EXAMPLE replication.yaml -->
```yaml
apiVersion: v1
kind: ReplicationController
metadata:
name: nginx
spec:
replicas: 3
selector:
app: nginx
template:
metadata:
name: nginx
labels:
app: nginx
spec:
containers:
- name: nginx
image: nginx
ports:
- containerPort: 80
```
[Download example](/docs/user-guide/replication.yaml)
<!-- END MUNGE: EXAMPLE replication.yaml -->
{% include code.html language="yaml" file="replication.yaml" ghlink="/docs/user-guide/replication.yaml" %}
To delete the replication controller (and the pods it created):

View File

@ -102,7 +102,7 @@ volumeMounts:
Example Redis pod definition with a persistent storage volume ([pod-redis.yaml](/docs/user-guide/walkthrough/pod-redis.yaml)):
{% include code.html language="yaml" file="pod-redis.yaml" ghlink="https://github.com/kubernetes/kubernetes.github.io/blob/master/docs/user-guide/walkthrough/pod-redis.yaml" %}
{% include code.html language="yaml" file="pod-redis.yaml" ghlink="/docs/user-guide/walkthrough/pod-redis.yaml" %}
Notes:

View File

@ -23,26 +23,8 @@ labels:
```
For example, here is the nginx pod definition with labels ([pod-nginx-with-label.yaml](/docs/user-guide/walkthrough/pod-nginx-with-label.yaml)):
<!-- BEGIN MUNGE: EXAMPLE pod-nginx-with-label.yaml -->
```yaml
apiVersion: v1
kind: Pod
metadata:
name: nginx
labels:
app: nginx
spec:
containers:
- name: nginx
image: nginx
ports:
- containerPort: 80
```
[Download example](/docs/user-guide/walkthrough/pod-nginx-with-label.yaml)
<!-- END MUNGE: EXAMPLE pod-nginx-with-label.yaml -->
{% include code.html language="yaml" file="pod-nginx-with-label.yaml" ghlink="/docs/user-guide/walkthrough/pod-nginx-with-label.yaml" %}
Create the labeled pod ([pod-nginx-with-label.yaml](/docs/user-guide/walkthrough/pod-nginx-with-label.yaml)):
@ -67,38 +49,9 @@ OK, now you know how to make awesome, multi-container, labeled pods and you want
Replication controllers are the objects to answer these questions. A replication controller combines a template for pod creation (a "cookie-cutter" if you will) and a number of desired replicas, into a single Kubernetes object. The replication controller also contains a label selector that identifies the set of objects managed by the replication controller. The replication controller constantly measures the size of this set relative to the desired size, and takes action by creating or deleting pods.
For example, here is a replication controller that instantiates two nginx pods ([replication-controller.yaml](/docs/user-guide/walkthrough/replication-controller.yaml)):
<!-- BEGIN MUNGE: EXAMPLE replication-controller.yaml -->
```yaml
apiVersion: v1
kind: ReplicationController
metadata:
name: nginx-controller
spec:
replicas: 2
# selector identifies the set of Pods that this
# replication controller is responsible for managing
selector:
app: nginx
# podTemplate defines the 'cookie cutter' used for creating
# new pods when necessary
template:
metadata:
labels:
# Important: these labels need to match the selector above
# The api server enforces this constraint.
app: nginx
spec:
containers:
- name: nginx
image: nginx
ports:
- containerPort: 80
```
[Download example](/docs/user-guide/walkthrough/replication-controller.yaml)
<!-- END MUNGE: EXAMPLE replication-controller.yaml -->
{% include code.html language="yaml" file="replication-controller.yaml" ghlink="/docs/user-guide/walkthrough/replication-controller.yaml" %}
#### Replication Controller Management
@ -128,30 +81,9 @@ For more information, see [Replication Controllers](/docs/user-guide/replication
Once you have a replicated set of pods, you need an abstraction that enables connectivity between the layers of your application. For example, if you have a replication controller managing your backend jobs, you don't want to have to reconfigure your front-ends whenever you re-scale your backends. Likewise, if the pods in your backends are scheduled (or rescheduled) onto different machines, you can't be required to re-configure your front-ends. In Kubernetes, the service abstraction achieves these goals. A service provides a way to refer to a set of pods (selected by labels) with a single static IP address. It may also provide load balancing, if supported by the provider.
For example, here is a service that balances across the pods created in the previous nginx replication controller example ([service.yaml](/docs/user-guide/walkthrough/service.yaml)):
<!-- BEGIN MUNGE: EXAMPLE service.yaml -->
```yaml
apiVersion: v1
kind: Service
metadata:
name: nginx-service
spec:
ports:
- port: 8000 # the port that this service should serve on
# the container on each pod to connect to, can be a name
# (e.g. 'www') or a number (e.g. 80)
targetPort: 80
protocol: TCP
# just like the selector in the replication controller,
# but this time it identifies the set of pods to load balance
# traffic to.
selector:
app: nginx
```
[Download example](/docs/user-guide/walkthrough/service.yaml)
<!-- END MUNGE: EXAMPLE service.yaml -->
{% include code.html language="yaml" file="service.yaml" ghlink="/docs/user-guide/walkthrough/service.yaml" %}
#### Service Management
@ -240,34 +172,9 @@ In all cases, if the Kubelet discovers a failure the container is restarted.
The container health checks are configured in the `livenessProbe` section of your container config. There you can also specify an `initialDelaySeconds` that is a grace period from when the container is started to when health checks are performed, to enable your container to perform any necessary initialization.
Here is an example config for a pod with an HTTP health check ([pod-with-http-healthcheck.yaml](/docs/user-guide/walkthrough/pod-with-http-healthcheck.yaml)):
<!-- BEGIN MUNGE: EXAMPLE pod-with-http-healthcheck.yaml -->
```yaml
apiVersion: v1
kind: Pod
metadata:
name: pod-with-healthcheck
spec:
containers:
- name: nginx
image: nginx
# defines the health checking
livenessProbe:
# an http probe
httpGet:
path: /_status/healthz
port: 80
# length of time to wait for a pod to initialize
# after pod startup, before applying health checking
initialDelaySeconds: 30
timeoutSeconds: 1
ports:
- containerPort: 80
```
[Download example](/docs/user-guide/walkthrough/pod-with-http-healthcheck.yaml)
<!-- END MUNGE: EXAMPLE pod-with-http-healthcheck.yaml -->
{% include code.html language="yaml" file="pod-with-http-healthcheck.yaml" ghlink="/docs/user-guide/walkthrough/pod-with-http-healthcheck.yaml" %}
For more information about health checking, see [Container Probes](/docs/user-guide/pod-states/#container-probes).

7
imports.txt Normal file
View File

@ -0,0 +1,7 @@
https://raw.githubusercontent.com/kubernetes/kubernetes/release-1.1/cluster/saltbase/salt/fluentd-gcp/fluentd-gcp.yaml,docs/getting-started-guides/fluentd-gcp.yaml
https://raw.githubusercontent.com/kubernetes/kubernetes/release-1.1/examples/blog-logging/counter-pod.yaml,docs/getting-started-guides/counter-pod.yaml
https://raw.githubusercontent.com/kubernetes/kubernetes/release-1.1/examples/blog-logging/counter-pod.yaml,docs/user-guide/counter-pod.yaml
https://raw.githubusercontent.com/kubernetes/kubernetes/gh-pages/_includes/v1.1/extensions-v1beta1-definitions.html,_includes/v1.1/extensions-v1beta1-definitions.html
https://raw.githubusercontent.com/kubernetes/kubernetes/gh-pages/_includes/v1.1/extensions-v1beta1-operations.html,_includes/v1.1/extensions-v1beta1-operations.html
https://raw.githubusercontent.com/kubernetes/kubernetes/gh-pages/_includes/v1.1/v1-definitions.html,_includes/v1.1/v1-definitions.html
https://raw.githubusercontent.com/kubernetes/kubernetes/gh-pages/_includes/v1.1/v1-operations.html,_includes/v1.1/v1-operations.html

View File

@ -3,6 +3,7 @@ cd k8s
git checkout gh-pages
cd ..
# batch fetches
while read line || [[ -n ${line} ]]; do
IFS=': ' read -a myarray <<< "${line}"
# echo "arraypos0: ${myarray[0]}"
@ -26,6 +27,14 @@ cd ..
cd ..
rm -rf k8s
# Single-file fetches
rm -rf docs/getting-started-guides/fluentd-gcp.yaml
rm -rf docs/getting-started-guides/counter-pod.yaml
rm -rf docs/user-guide/counter-pod.yaml
curl https://raw.githubusercontent.com/kubernetes/kubernetes/release-1.1/cluster/saltbase/salt/fluentd-gcp/fluentd-gcp.yaml > docs/getting-started-guides/fluentd-gcp.yaml
curl https://raw.githubusercontent.com/kubernetes/kubernetes/release-1.1/examples/blog-logging/counter-pod.yaml > docs/getting-started-guides/counter-pod.yaml
curl https://raw.githubusercontent.com/kubernetes/kubernetes/release-1.1/examples/blog-logging/counter-pod.yaml > docs/user-guide/counter-pod.yaml
git add .
git commit -m "Ran update-imported-docs.sh"
echo "Docs imported! Run 'git push' to upload them"