Merge branch 'master' into release-1.4
commit
301a3e4da9
5
404.md
5
404.md
|
@ -65,7 +65,4 @@ $( document ).ready(function() {
|
|||
});
|
||||
</script>
|
||||
|
||||
Sorry, this page was not found. :(
|
||||
|
||||
You can let us know by filling out the "I wish this page" text field at
|
||||
the bottom of this page. Maybe try: "I wish this page _existed_."
|
||||
Sorry, this page was not found. :(
|
||||
|
|
|
@ -18,8 +18,6 @@
|
|||
<a href="https://calendar.google.com/calendar/embed?src=nt2tcnbtbied3l6gi2h29slvc0%40group.calendar.google.com" class="calendar"><span>Events Calendar</span></a>
|
||||
</div>
|
||||
<div>
|
||||
<span>I wish this page</span>
|
||||
<input type="text" id="wishField" name="wishField" placeholder="enter your wish">
|
||||
</div>
|
||||
</div>
|
||||
<div id="miceType" class="center">© {{ 'now' | date: "%Y" }} Kubernetes</div>
|
||||
|
|
|
@ -43,7 +43,11 @@
|
|||
"permalink" : "http://kubernetes.github.io{{page.url}}"
|
||||
};
|
||||
(function(d,c,j){if(!document.getElementById(j)){var pd=d.createElement(c),s;pd.id=j;pd.src=('https:'==document.location.protocol)?'https://polldaddy.com/js/rating/rating.js':'http://i0.poll.fm/js/rating/rating.js';s=document.getElementsByTagName(c)[0];s.parentNode.insertBefore(pd,s);}}(document,'script','pd-rating-js'));
|
||||
</script>{% endif %}
|
||||
</script>
|
||||
<a href="" onclick="window.open('https://github.com/kubernetes/kubernetes.github.io/issues/new?title=Issue%20with%20' +
|
||||
window.location.pathname + '&body=Issue%20with%20' +
|
||||
window.location.pathname)" class="button issue">Create Issue</a>
|
||||
{% endif %}
|
||||
</div>
|
||||
</section>
|
||||
|
||||
|
|
|
@ -874,6 +874,12 @@ dd
|
|||
img
|
||||
max-width: 100%
|
||||
|
||||
a.button
|
||||
border-radius: 2px
|
||||
|
||||
a.issue
|
||||
margin-left: 20px
|
||||
|
||||
.fixed footer
|
||||
position: fixed
|
||||
bottom: 0
|
||||
|
|
|
@ -160,7 +160,7 @@ Lars Kellogg-Stedman.
|
|||
|
||||
### Weave Net from Weaveworks
|
||||
|
||||
[Weave Net](https://www.weave.works/documentation/net-1-6-0-introducing-weave/) is a
|
||||
[Weave Net](https://www.weave.works/products/weave-net/) is a
|
||||
resilient and simple to use network for Kubernetes and its hosted applications.
|
||||
Weave Net runs as a [CNI plug-in](https://www.weave.works/docs/net/latest/cni-plugin/)
|
||||
or stand-alone. In either version, it doesn’t require any configuration or extra code
|
||||
|
|
|
@ -36,8 +36,7 @@ it to [support other log format](/docs/admin/node-problem/#support-other-log-for
|
|||
|
||||
## Enable/Disable in GCE cluster
|
||||
|
||||
Node problem detector is [running as a cluster
|
||||
addon](/docs/admin/cluster-large/#addon-resources) enabled by default in the
|
||||
Node problem detector is [running as a cluster addon](cluster-large.md/#Addon-Resources) enabled by default in the
|
||||
gce cluster.
|
||||
|
||||
You can enable/disable it by setting the environment variable
|
||||
|
|
|
@ -43,7 +43,7 @@ export AZURE_SUBSCRIPTION_ID="<subscription-guid>"
|
|||
export AZURE_TENANT_ID="<tenant-guid>" # only needed for Kubernetes < v1.3.0.
|
||||
```
|
||||
|
||||
These values can be overriden by setting them in `cluster/azure/config-default.sh` or as environment variables. They are shown here with their default values:
|
||||
These values can be overridden by setting them in `cluster/azure/config-default.sh` or as environment variables. They are shown here with their default values:
|
||||
|
||||
```shell
|
||||
export AZURE_DEPLOY_ID="" # autogenerated if blank
|
||||
|
|
|
@ -251,9 +251,9 @@ kubectl cluster-info
|
|||
|
||||
### Accessing the cluster programmatically
|
||||
|
||||
It's possible to use the locally-stored client certificates to access the api server. For example, you may want to use any of the [Kubernetes API client libraries](https://github.com/kubernetes/kubernetes/blob/master/docs/devel/client-libraries.md) to program against your Kubernetes cluster in the programming language of your choice.
|
||||
It's possible to use the locally stored client certificates to access the api server. For example, you may want to use any of the [Kubernetes API client libraries](https://github.com/kubernetes/kubernetes/blob/master/docs/devel/client-libraries.md) to program against your Kubernetes cluster in the programming language of your choice.
|
||||
|
||||
To demostrate how to use these locally stored certificates, we provide the folowing example of using ```curl``` to communicate to the master api server via https:
|
||||
To demonstrate how to use these locally stored certificates, we provide the following example of using ```curl``` to communicate to the master api server via https:
|
||||
|
||||
```shell
|
||||
curl \
|
||||
|
@ -267,7 +267,7 @@ distributed with OSX.
|
|||
|
||||
### Accessing the cluster with a browser
|
||||
|
||||
We install two UIs on Kubernetes. The orginal KubeUI and [the newer kube
|
||||
We install two UIs on Kubernetes. The original KubeUI and [the newer kube
|
||||
dashboard](/docs/user-guide/ui/). When you create a cluster, the script should output URLs for these
|
||||
interfaces like this:
|
||||
|
||||
|
|
|
@ -39,7 +39,7 @@ Download the stable CoreOS bootable ISO from the [CoreOS website](https://coreos
|
|||
|
||||
1. Once you've downloaded the ISO image, burn the ISO to a CD/DVD/USB key and boot from it (if using a virtual machine you can boot directly from the ISO). Once booted, you should be automatically logged in as the `core` user at the terminal. At this point CoreOS is running from the ISO and it hasn't been installed yet.
|
||||
|
||||
2. *On another machine*, download the the [master cloud-config template](https://raw.githubusercontent.com/projectcalico/calico-cni/k8s-1.1-docs/samples/kubernetes/cloud-config/master-config-template.yaml) and save it as `master-config.yaml`.
|
||||
2. *On another machine*, download the [master cloud-config template](https://raw.githubusercontent.com/projectcalico/calico-cni/k8s-1.1-docs/samples/kubernetes/cloud-config/master-config-template.yaml) and save it as `master-config.yaml`.
|
||||
|
||||
3. Replace the following variables in the `master-config.yaml` file.
|
||||
|
||||
|
|
|
@ -23,7 +23,7 @@ Deploy a CoreOS running Kubernetes environment. This particular guide is made to
|
|||
* /tftpboot/pxelinux.0/(MAC) -> linked to Linux image config file
|
||||
2. Update per install the link for pxelinux
|
||||
3. Update the DHCP config to reflect the host needing deployment
|
||||
4. Setup nodes to deploy CoreOS creating a etcd cluster.
|
||||
4. Setup nodes to deploy CoreOS creating an etcd cluster.
|
||||
5. Have no access to the public [etcd discovery tool](https://discovery.etcd.io/).
|
||||
6. Installing the CoreOS slaves to become Kubernetes nodes.
|
||||
|
||||
|
@ -98,7 +98,7 @@ Now you should have a working PXELINUX setup to image CoreOS nodes. You can veri
|
|||
|
||||
This section describes how to setup the CoreOS images to live alongside a pre-existing PXELINUX environment.
|
||||
|
||||
1. Find or create the TFTP root directory that everything will be based off of.
|
||||
1. Find or create the TFTP root directory that everything will be based on.
|
||||
* For this document we will assume `/tftpboot/` is our root directory.
|
||||
2. Once we know and have our tftp root directory we will create a new directory structure for our CoreOS images.
|
||||
3. Download the CoreOS PXE files provided by the CoreOS team.
|
||||
|
|
|
@ -93,7 +93,7 @@ asks you to configure your view of the ingested logs. Select the option for
|
|||
timeseries values and select `@timestamp`. On the following page select the
|
||||
`Discover` tab and then you should be able to see the ingested logs.
|
||||
You can set the refresh interval to 5 seconds to have the logs
|
||||
regulary refreshed.
|
||||
regularly refreshed.
|
||||
|
||||
Here is a typical view of ingested logs from the Kibana viewer:
|
||||
|
||||
|
|
|
@ -64,7 +64,7 @@ RUN npm install
|
|||
CMD ["node", "app.js"]
|
||||
```
|
||||
|
||||
A `Dockerfile` is pretty self explanatory, and this one is dead simple.
|
||||
A `Dockerfile` is pretty self-explanatory, and this one is dead simple.
|
||||
|
||||
First, it uses the official Node.js LTS image as the base image.
|
||||
|
||||
|
|
|
@ -86,7 +86,7 @@ If you do not have your environment variables set, or do not want them consumed,
|
|||
- **[config-default.sh](http://releases.k8s.io/{{page.githubbranch}}/cluster/openstack-heat/config-default.sh)** Sets all parameters needed for heat template.
|
||||
- **[config-image.sh](http://releases.k8s.io/{{page.githubbranch}}/cluster/openstack-heat/config-image.sh)** Sets parameters needed to download and create new OpenStack image via glance.
|
||||
- **[openrc-default.sh](http://releases.k8s.io/{{page.githubbranch}}/cluster/openstack-heat/openrc-default.sh)** Sets environment variables for communicating to OpenStack. These are consumed by the cli tools (heat, glance, swift, nova).
|
||||
- **[openrc-swift.sh](http://releases.k8s.io/{{page.githubbranch}}/cluster/openstack-heat/openrc-swift.sh)** Some OpenStack setups require the use of seperate swift credentials. Put those credentials in this file.
|
||||
- **[openrc-swift.sh](http://releases.k8s.io/{{page.githubbranch}}/cluster/openstack-heat/openrc-swift.sh)** Some OpenStack setups require the use of separate swift credentials. Put those credentials in this file.
|
||||
|
||||
Please see the contents of these files for documentation regarding each variable's function.
|
||||
|
||||
|
|
|
@ -59,7 +59,7 @@ $ export ETCD_VERSION=2.2.0
|
|||
For users who want to bring up a cluster with k8s version v1.1.1, `controller manager` may fail to start
|
||||
due to [a known issue](https://github.com/kubernetes/kubernetes/issues/17109). You could raise it
|
||||
up manually by using following command on the remote master server. Note that
|
||||
you should do this only after `api-server` is up. Moreover this issue is fixed in v1.1.2 and later.
|
||||
you should do this only after `api-server` is up. Moreover, this issue is fixed in v1.1.2 and later.
|
||||
|
||||
```shell
|
||||
$ sudo service kube-controller-manager start
|
||||
|
|
|
@ -122,11 +122,11 @@ runner (Docker or rkt).
|
|||
When using Docker:
|
||||
|
||||
- The `spec.container[].resources.requests.cpu` is converted to its core value (potentially fractional),
|
||||
and multipled by 1024, and used as the value of the [`--cpu-shares`](
|
||||
and multiplied by 1024, and used as the value of the [`--cpu-shares`](
|
||||
https://docs.docker.com/reference/run/#runtime-constraints-on-resources) flag to the `docker run`
|
||||
command.
|
||||
- The `spec.container[].resources.limits.cpu` is converted to its millicore value,
|
||||
multipled by 100000, and then divided by 1000, and used as the value of the [`--cpu-quota`](
|
||||
multiplied by 100000, and then divided by 1000, and used as the value of the [`--cpu-quota`](
|
||||
https://docs.docker.com/reference/run/#runtime-constraints-on-resources) flag to the `docker run`
|
||||
command. The [`--cpu-period`] flag is set to 100000 which represents the default 100ms period
|
||||
for measuring quota usage. The kubelet enforces cpu limits if it was started with the
|
||||
|
|
|
@ -10,7 +10,7 @@ assignees:
|
|||
|
||||
## Configuration in Kubernetes
|
||||
|
||||
In addition to the imperative-style commands, such as `kubectl run` and `kubectl expose`, described [elsewhere](/docs/user-guide/quick-start), Kubernetes supports declarative configuration. Often times, configuration files are preferable to imperative commands, since they can be checked into version control and changes to the files can be code reviewed, which is especially important for more complex configurations, producing a more robust, reliable and archival system.
|
||||
In addition to the imperative-style commands, such as `kubectl run` and `kubectl expose`, described [elsewhere](/docs/user-guide/quick-start), Kubernetes supports declarative configuration. Oftentimes, configuration files are preferable to imperative commands, since they can be checked into version control and changes to the files can be code reviewed, which is especially important for more complex configurations, producing a more robust, reliable and archival system.
|
||||
|
||||
In the declarative style, all configuration is stored in YAML or JSON configuration files using Kubernetes's API resource schemas as the configuration schemas. `kubectl` can create, update, delete, and get API resources. The `apiVersion` (currently 'v1'?), resource `kind`, and resource `name` are used by `kubectl` to construct the appropriate API path to invoke for the specified operation.
|
||||
|
||||
|
|
|
@ -78,7 +78,7 @@ nginx-deployment-2035384211-qqcnn 1/1 Running 0 18s app
|
|||
|
||||
The created Replica Set will ensure that there are three nginx Pods at all times.
|
||||
|
||||
**Note:** You must specify appropriate selector and pod template labels of a Deployment (in this case, `app = nginx`), i.e. don't overlap with other controllers (including Deployments, Replica Sets, Replication Controllers, etc.) Kubernetes won't stop you from doing that, and if you end up with multiple controllers that have overlapping selectors, those controllers will fight with each others and won't behave correctly.
|
||||
**Note:** You must specify appropriate selector and pod template labels of a Deployment (in this case, `app = nginx`), i.e. don't overlap with other controllers (including Deployments, Replica Sets, Replication Controllers, etc.) Kubernetes won't stop you from doing that, and if you end up with multiple controllers that have overlapping selectors, those controllers will fight with each other's and won't behave correctly.
|
||||
|
||||
## The Status of a Deployment
|
||||
|
||||
|
@ -503,7 +503,7 @@ number of Pods are less than the desired number.
|
|||
|
||||
Note that you should not create other pods whose labels match this selector, either directly, via another Deployment or via another controller such as Replica Sets or Replication Controllers. Otherwise, the Deployment will think that those pods were created by it. Kubernetes will not stop you from doing this.
|
||||
|
||||
If you have multiple controllers that have overlapping selectors, the controllers will fight with each others and won't behave correctly.
|
||||
If you have multiple controllers that have overlapping selectors, the controllers will fight with each other's and won't behave correctly.
|
||||
|
||||
### Strategy
|
||||
|
||||
|
|
|
@ -7,7 +7,7 @@ assignees:
|
|||
|
||||
This guide explains how to use Kubernetes Federated Services to deploy
|
||||
a common Service across multiple Kubernetes clusters. This makes it
|
||||
easy to achieve cross-cluster service discovery and availibility zone
|
||||
easy to achieve cross-cluster service discovery and availability zone
|
||||
fault tolerance for your Kubernetes applications.
|
||||
|
||||
|
||||
|
@ -42,7 +42,7 @@ Once created, the Federated Service automatically:
|
|||
|
||||
1. creates matching Kubernetes Services in every cluster underlying your Cluster Federation,
|
||||
2. monitors the health of those service "shards" (and the clusters in which they reside), and
|
||||
3. manages a set of DNS records in a public DNS provder (like Google Cloud DNS, or AWS Route 53), thus ensuring that clients
|
||||
3. manages a set of DNS records in a public DNS provider (like Google Cloud DNS, or AWS Route 53), thus ensuring that clients
|
||||
of your federated service can seamlessly locate an appropriate healthy service endpoint at all times, even in the event of cluster,
|
||||
availability zone or regional outages.
|
||||
|
||||
|
@ -200,7 +200,7 @@ nginx.mynamespace.myfederation.svc.asia-east1-b.example.com. CNAME 180 ngin
|
|||
nginx.mynamespace.myfederation.svc.asia-east1-c.example.com. A 180 130.211.56.221
|
||||
nginx.mynamespace.myfederation.svc.asia-east1.example.com. A 180 130.211.57.243, 130.211.56.221
|
||||
nginx.mynamespace.myfederation.svc.europe-west1.example.com. CNAME 180 nginx.mynamespace.myfederation.svc.example.com.
|
||||
nginx.mynamespace.myfederation.svc.europe-west1-d.example.com. CNAME 180 nginx.mynamespace.myfederation.svc.europe-west1.example.com.
|
||||
nginx.mynamespace.myfederation.svc.europe-west1-d.example.com. CNAME 180 nginx.mynamespace.myfederation.svc.europe-west1.example.com.
|
||||
... etc.
|
||||
```
|
||||
|
||||
|
@ -224,7 +224,7 @@ due to caching by intermediate DNS servers.
|
|||
|
||||
### Some notes about the above example
|
||||
|
||||
1. Notice that there is a normal ('A') record for each service shard that has at least one healthy backend endpoint. For example in us-central1-a, 104.197.247.191 is the external IP address of the service shard in that zone, and in asia-east1-a the address is 130.211.56.221.
|
||||
1. Notice that there is a normal ('A') record for each service shard that has at least one healthy backend endpoint. For example, in us-central1-a, 104.197.247.191 is the external IP address of the service shard in that zone, and in asia-east1-a the address is 130.211.56.221.
|
||||
2. Similarly, there are regional 'A' records which include all healthy shards in that region. For example, 'us-central1'. These regional records are useful for clients which do not have a particular zone preference, and as a building block for the automated locality and failover mechanism described below.
|
||||
2. For zones where there are currently no healthy backend endpoints, a CNAME ('Canonical Name') record is used to alias (automatically redirect) those queries to the next closest healthy zone. In the example, the service shard in us-central1-f currently has no healthy backend endpoints (i.e. Pods), so a CNAME record has been created to automatically redirect queries to other shards in that region (us-central1 in this case).
|
||||
3. Similarly, if no healthy shards exist in the enclosing region, the search progresses further afield. In the europe-west1-d availability zone, there are no healthy backends, so queries are redirected to the broader europe-west1 region (which also has no healthy backends), and onward to the global set of healthy addresses (' nginx.mynamespace.myfederation.svc.example.com.')
|
||||
|
@ -295,7 +295,7 @@ availability zones and regions other than the ones local to a Pod by
|
|||
specifying the appropriate DNS names explicitly, and not relying on
|
||||
automatic DNS expansion. For example,
|
||||
"nginx.mynamespace.myfederation.svc.europe-west1.example.com" will
|
||||
resolve to all of the currently healthy service shards in Europe, even
|
||||
resolve to all of the currently healthy service shards in europe, even
|
||||
if the Pod issuing the lookup is located in the U.S., and irrespective
|
||||
of whether or not there are healthy shards of the service in the U.S.
|
||||
This is useful for remote monitoring and other similar applications.
|
||||
|
@ -366,7 +366,7 @@ Check that:
|
|||
1. Your federation name, DNS provider, DNS domain name are configured correctly. Consult the [federation admin guide](/docs/admin/federation/) or [tutorial](https://github.com/kelseyhightower/kubernetes-cluster-federation) to learn
|
||||
how to configure your Cluster Federation system's DNS provider (or have your cluster administrator do this for you).
|
||||
2. Confirm that the Cluster Federation's service-controller is successfully connecting to and authenticating against your selected DNS provider (look for `service-controller` errors or successes in the output of `kubectl logs federation-controller-manager --namespace federation`)
|
||||
3. Confirm that the Cluster Federation's service-controller is successfully creating DNS records in your DNS provider (or outputting errors in it's logs explaining in more detail what's failing).
|
||||
3. Confirm that the Cluster Federation's service-controller is successfully creating DNS records in your DNS provider (or outputting errors in its logs explaining in more detail what's failing).
|
||||
|
||||
#### Matching DNS records are created in my DNS provider, but clients are unable to resolve against those names
|
||||
Check that:
|
||||
|
|
|
@ -167,7 +167,7 @@ parallelism, for a variety or reasons:
|
|||
A Container in a Pod may fail for a number of reasons, such as because the process in it exited with
|
||||
a non-zero exit code, or the Container was killed for exceeding a memory limit, etc. If this
|
||||
happens, and the `.spec.template.containers[].restartPolicy = "OnFailure"`, then the Pod stays
|
||||
on the node, but the Container is re-run. Therefore, your program needs to handle the the case when it is
|
||||
on the node, but the Container is re-run. Therefore, your program needs to handle the case when it is
|
||||
restarted locally, or else specify `.spec.template.containers[].restartPolicy = "Never"`.
|
||||
See [pods-states](/docs/user-guide/pod-states) for more information on `restartPolicy`.
|
||||
|
||||
|
|
|
@ -54,7 +54,7 @@ job-banana.yaml
|
|||
job-cherry.yaml
|
||||
```
|
||||
|
||||
Here, we used `sed` to replace the string `$ITEM` with the the loop variable.
|
||||
Here, we used `sed` to replace the string `$ITEM` with the loop variable.
|
||||
You could use any type of template language (jinja2, erb) or write a program
|
||||
to generate the Job objects.
|
||||
|
||||
|
|
|
@ -122,8 +122,7 @@ root@temp-loe07:/#
|
|||
```
|
||||
|
||||
In the last command, the `amqp-consume` tool takes one message (`-c 1`)
|
||||
from the queue, and passes that message to the standard input of an
|
||||
an arbitrary command. In this case, the program `cat` is just printing
|
||||
from the queue, and passes that message to the standard input of an arbitrary command. In this case, the program `cat` is just printing
|
||||
out what it gets on the standard input, and the echo is just to add a carriage
|
||||
return so the example is readable.
|
||||
|
||||
|
@ -169,7 +168,7 @@ example program:
|
|||
|
||||
{% include code.html language="python" file="worker.py" ghlink="/docs/user-guide/job/work-queue-1/worker.py" %}
|
||||
|
||||
Now, build an an image. If you are working in the source
|
||||
Now, build an image. If you are working in the source
|
||||
tree, then change directory to `examples/job/work-queue-1`.
|
||||
Otherwise, make a temporary directory, change to it,
|
||||
download the [Dockerfile](Dockerfile?raw=true),
|
||||
|
@ -275,7 +274,7 @@ not all items will be processed.
|
|||
If the number of completions is set to more than the number of items in the queue,
|
||||
then the Job will not appear to be completed, even though all items in the queue
|
||||
have been processed. It will start additional pods which will block waiting
|
||||
for a mesage.
|
||||
for a message.
|
||||
|
||||
There is an unlikely race with this pattern. If the container is killed in between the time
|
||||
that the message is acknowledged by the amqp-consume command and the time that the container
|
||||
|
|
|
@ -31,7 +31,7 @@ Here is an overview of the steps in this example:
|
|||
|
||||
For this example, for simplicitly, we will start a single instance of Redis.
|
||||
See the [Redis Example](https://github.com/kubernetes/kubernetes/tree/{{page.githubbranch}}/examples/redis/README.md) for an example
|
||||
of deploying Redis scaleably and redundantly.
|
||||
of deploying Redis scalably and redundantly.
|
||||
|
||||
Start a temporary Pod running Redis and a service so we can find it.
|
||||
|
||||
|
|
|
@ -16,7 +16,7 @@ So in order to easily switch between multiple clusters, for multiple users, a ku
|
|||
|
||||
This file contains a series of authentication mechanisms and cluster connection information associated with nicknames. It also introduces the concept of a tuple of authentication information (user) and cluster connection information called a context that is also associated with a nickname.
|
||||
|
||||
Multiple kubeconfig files are allowed, if specified explicitly. At runtime they are loaded and merged together along with override options specified from the command line (see [rules](#loading-and-merging) below).
|
||||
Multiple kubeconfig files are allowed, if specified explicitly. At runtime they are loaded and merged along with override options specified from the command line (see [rules](#loading-and-merging) below).
|
||||
|
||||
## Related discussion
|
||||
|
||||
|
|
|
@ -266,7 +266,7 @@ $ kubectl exec -ti <pod-name> /bin/bash
|
|||
// Return a snapshot of the logs from pod <pod-name>.
|
||||
$ kubectl logs <pod-name>
|
||||
|
||||
// Start streaming the logs from pod <pod-name>. This is similiar to the 'tail -f' Linux command.
|
||||
// Start streaming the logs from pod <pod-name>. This is similar to the 'tail -f' Linux command.
|
||||
$ kubectl logs -f <pod-name>
|
||||
```
|
||||
|
||||
|
|
|
@ -21,7 +21,6 @@ kubectl autoscale (-f FILENAME | TYPE NAME | TYPE/NAME) [--min=MINPODS] --max=MA
|
|||
### Examples
|
||||
|
||||
```
|
||||
|
||||
# Auto scale a deployment "foo", with the number of pods between 2 and 10, target CPU utilization specified so a default autoscaling policy will be used:
|
||||
kubectl autoscale deployment foo --min=2 --max=10
|
||||
|
||||
|
|
|
@ -104,7 +104,7 @@ LIST and WATCH operations may specify label selectors to filter the sets of obje
|
|||
* _equality-based_ requirements: `?labelSelector=environment%3Dproduction,tier%3Dfrontend`
|
||||
* _set-based_ requirements: `?labelSelector=environment+in+%28production%2Cqa%29%2Ctier+in+%28frontend%29`
|
||||
|
||||
Both label selector styles can be used to list or watch resources via a REST client. For example targeting `apiserver` with `kubectl` and using _equality-based_ one may write:
|
||||
Both label selector styles can be used to list or watch resources via a REST client. For example, targeting `apiserver` with `kubectl` and using _equality-based_ one may write:
|
||||
|
||||
```shell
|
||||
$ kubectl get pods -l environment=production,tier=frontend
|
||||
|
|
|
@ -226,7 +226,7 @@ It's not necessary to "discover" the governing Service of a Pet Set, since it's
|
|||
|
||||
Usually pets also need to find their peers. In the previous nginx example, we just used `kubectl` to get the names of existing pods, and as humans, we could tell which ones belonged to a given Pet Set. Another way to find peers is by contacting the API server, just like `kubectl`, but that has several disadvantages (you end up implementing a Kubernetes specific init system that runs as pid 1 in your application container).
|
||||
|
||||
Pet Set gives you a way to disover your peers using DNS records. To illustrate this we can use the previous example (note: one usually doesn't `apt-get` in a container).
|
||||
Pet Set gives you a way to discover your peers using DNS records. To illustrate this we can use the previous example (note: one usually doesn't `apt-get` in a container).
|
||||
|
||||
```shell
|
||||
$ kubectl exec -it web-0 /bin/sh
|
||||
|
|
|
@ -19,7 +19,7 @@ This example shows you how to "carry over" runtime state across Pet restart by s
|
|||
|
||||
### Background
|
||||
|
||||
Applications that incrementally build state usually need strong guarantees that they will not restart for extended durations. This is tricky to achieve with containers, so instead, we will ensure that the results of previous computations are trasferred to future pets. Doing so is straight-forward using vanilla Persistent Volumes (which Pet Set already gives you), unless the volume mount point itself needs to be initialized for the Pet to start. This is exactly the case with "virtual machine" docker images, like those based on ubuntu or fedora. Such images embed the entier rootfs of the distro, including package managers like `apt-get` that assume a certain layout of the filesystem. Meaning:
|
||||
Applications that incrementally build state usually need strong guarantees that they will not restart for extended durations. This is tricky to achieve with containers, so instead, we will ensure that the results of previous computations are trasferred to future pets. Doing so is straightforward using vanilla Persistent Volumes (which Pet Set already gives you), unless the volume mount point itself needs to be initialized for the Pet to start. This is exactly the case with "virtual machine" docker images, like those based on ubuntu or fedora. Such images embed the entier rootfs of the distro, including package managers like `apt-get` that assume a certain layout of the filesystem. Meaning:
|
||||
|
||||
* If you mount an empty volume under `/usr`, you won't be able to `apt-get`
|
||||
* If you mount an empty volume under `/lib`, all your `apt-gets` will fail because there are no system libraries
|
||||
|
@ -166,7 +166,7 @@ vm-1.ub.default.svc.cluster.local
|
|||
|
||||
### Nginx master/slave cluster
|
||||
|
||||
Lets create a Pet Set that writes out its own config based on a list of peers at initalization time, as described above.
|
||||
Lets create a Pet Set that writes out its own config based on a list of peers at initialization time, as described above.
|
||||
|
||||
Download and create [this](petset_peers.yaml) petset. It will setup 2 nginx webservers, but the second one will proxy all requests to the first:
|
||||
|
||||
|
@ -192,7 +192,7 @@ web-0 1/1 Running 0 1m
|
|||
web-1 1/1 Running 0 47s
|
||||
```
|
||||
|
||||
web-1 will redirect all requests to it's "master":
|
||||
web-1 will redirect all requests to its "master":
|
||||
|
||||
```shell
|
||||
$ kubectl exec -it web-1 -- curl localhost
|
||||
|
|
|
@ -177,7 +177,7 @@ To consume a Secret in a volume in a Pod:
|
|||
1. Create a secret or use an existing one. Multiple pods can reference the same secret.
|
||||
1. Modify your Pod definition to add a volume under `spec.volumes[]`. Name the volume anything, and have a `spec.volumes[].secret.secretName` field equal to the name of the secret object.
|
||||
1. Add a `spec.containers[].volumeMounts[]` to each container that needs the secret. Specify `spec.containers[].volumeMounts[].readOnly = true` and `spec.containers[].volumeMounts[].mountPath` to an unused directory name where you would like the secrets to appear.
|
||||
1. Modify your image and/or command line so that the the program looks for files in that directory. Each key in the secret `data` map becomes the filename under `mountPath`.
|
||||
1. Modify your image and/or command line so that the program looks for files in that directory. Each key in the secret `data` map becomes the filename under `mountPath`.
|
||||
|
||||
This is an example of a pod that mounts a secret in a volume:
|
||||
|
||||
|
@ -293,7 +293,7 @@ To use a secret in an environment variable in a pod:
|
|||
|
||||
1. Create a secret or use an existing one. Multiple pods can reference the same secret.
|
||||
1. Modify your Pod definition in each container that you wish to consume the value of a secret key to add an environment variable for each secret key you wish to consume. The environment variable that consumes the secret key should populate the secret's name and key in `env[x].valueFrom.secretKeyRef`.
|
||||
1. Modify your image and/or command line so that the the program looks for values in the specified environment variables
|
||||
1. Modify your image and/or command line so that the program looks for values in the specified environment variables
|
||||
|
||||
This is an example of a pod that mounts a secret in a volume:
|
||||
|
||||
|
|
|
@ -48,7 +48,7 @@ for more details.
|
|||
|
||||
#### `selinuxOptions`
|
||||
|
||||
Volumes which support SELinux labeling are relabled to be accessable
|
||||
Volumes which support SELinux labeling are relabled to be accessible
|
||||
by the label specified unders `seLinuxOptions`. Usually you will only
|
||||
need to set the `level` section. This sets the SELinux MCS label given
|
||||
to all containers within the pod as well as the volume.
|
||||
|
|
|
@ -32,7 +32,7 @@ $ kubectl proxy --www=docs/user-guide/update-demo/local/ &
|
|||
I0218 15:18:31.623279 67480 proxy.go:36] Starting to serve on localhost:8001
|
||||
```
|
||||
|
||||
Now visit the the [demo website](http://localhost:8001/static). You won't see anything much quite yet.
|
||||
Now visit the [demo website](http://localhost:8001/static). You won't see anything much quite yet.
|
||||
|
||||
### Step Two: Run the replication controller
|
||||
|
||||
|
|
|
@ -125,7 +125,7 @@ Watch out when using this type of volume, because:
|
|||
* when Kubernetes adds resource-aware scheduling, as is planned, it will not be
|
||||
able to account for resources used by a `hostPath`
|
||||
* the directories created on the underlying hosts are only writable by root, you either need
|
||||
to run your process as root in a priveleged container or modify the file permissions on
|
||||
to run your process as root in a privileged container or modify the file permissions on
|
||||
the host to be able to write to a `hostPath` volume
|
||||
|
||||
#### Example pod
|
||||
|
@ -244,7 +244,7 @@ There are some restrictions when using an awsElasticBlockStore volume:
|
|||
|
||||
#### Creating an EBS volume
|
||||
|
||||
Before you can use a EBS volume with a pod, you need to create it.
|
||||
Before you can use an EBS volume with a pod, you need to create it.
|
||||
|
||||
```shell
|
||||
aws ec2 create-volume --availability-zone eu-west-1a --size 10 --volume-type gp2
|
||||
|
@ -379,7 +379,7 @@ mounts an empty directory and clones a git repository into it for your pod to
|
|||
use. In the future, such volumes may be moved to an even more decoupled model,
|
||||
rather than extending the Kubernetes API for every such use case.
|
||||
|
||||
Here is a example for gitRepo volume:
|
||||
Here is an example for gitRepo volume:
|
||||
|
||||
```yaml
|
||||
apiVersion: v1
|
||||
|
|
21
js/script.js
21
js/script.js
|
@ -92,14 +92,13 @@ function px(n){
|
|||
|
||||
var kub = (function () {
|
||||
var HEADER_HEIGHT;
|
||||
var html, header, mainNav, quickstartButton, hero, encyclopedia, footer, wishField, headlineWrapper;
|
||||
var html, header, mainNav, quickstartButton, hero, encyclopedia, footer, headlineWrapper;
|
||||
|
||||
$(document).ready(function () {
|
||||
html = $('html');
|
||||
body = $('body');
|
||||
header = $('header');
|
||||
mainNav = $('#mainNav');
|
||||
wishField = $('#wishField');
|
||||
quickstartButton = $('#quickstartButton');
|
||||
hero = $('#hero');
|
||||
encyclopedia = $('#encyclopedia');
|
||||
|
@ -112,13 +111,11 @@ var kub = (function () {
|
|||
window.addEventListener('resize', resetTheView);
|
||||
window.addEventListener('scroll', resetTheView);
|
||||
window.addEventListener('keydown', handleKeystrokes);
|
||||
wishField[0].addEventListener('keydown', handleKeystrokes);
|
||||
|
||||
document.onunload = function(){
|
||||
window.removeEventListener('resize', resetTheView);
|
||||
window.removeEventListener('scroll', resetTheView);
|
||||
window.removeEventListener('keydown', handleKeystrokes);
|
||||
wishField[0].removeEventListener('keydown', handleKeystrokes);
|
||||
};
|
||||
|
||||
setInterval(setFooterType, 10);
|
||||
|
@ -189,24 +186,8 @@ var kub = (function () {
|
|||
}
|
||||
}
|
||||
|
||||
function submitWish(textfield) {
|
||||
window.location.replace("https://github.com/kubernetes/kubernetes.github.io/issues/new?title=I%20wish%20" +
|
||||
window.location.pathname + "%20" + textfield.value + "&body=I%20wish%20" +
|
||||
window.location.pathname + "%20" + textfield.value);
|
||||
|
||||
textfield.value = '';
|
||||
textfield.blur();
|
||||
}
|
||||
|
||||
function handleKeystrokes(e) {
|
||||
switch (e.which) {
|
||||
case 13: {
|
||||
if (e.currentTarget === wishField[0]) {
|
||||
submitWish(wishField[0]);
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
case 27: {
|
||||
if (html.hasClass('open-nav')) {
|
||||
toggleMenu();
|
||||
|
|
Loading…
Reference in New Issue