Merge branch 'master' into fix-autoscaler-doc
commit
fb8062ec66
|
@ -14,6 +14,8 @@ toc:
|
|||
path: /docs/getting-started-guides/kops/
|
||||
- title: Hello World on Google Container Engine
|
||||
path: /docs/hellonode/
|
||||
- title: Installing kubectl
|
||||
path: /docs/getting-started-guides/kubectl/
|
||||
- title: Downloading or Building Kubernetes
|
||||
path: /docs/getting-started-guides/binary_release/
|
||||
- title: Online Training Course
|
||||
|
|
|
@ -15,6 +15,14 @@ toc:
|
|||
section:
|
||||
- title: Using Port Forwarding to Access Applications in a Cluster
|
||||
path: /docs/tasks/access-application-cluster/port-forward-access-application-cluster/
|
||||
|
||||
|
||||
- title: Debugging Applications in a Cluster
|
||||
section:
|
||||
- title: Determining the Reason for Pod Failure
|
||||
path: /docs/tasks/debug-application-cluster/determine-reason-pod-failure/
|
||||
|
||||
|
||||
- title: Accessing the Kubernetes API
|
||||
section:
|
||||
- title: Using an HTTP Proxy to Access the Kubernetes API
|
||||
|
|
|
@ -51,3 +51,7 @@ toc:
|
|||
path: /docs/tutorials/stateless-application/expose-external-ip-address-service/
|
||||
- title: Exposing an External IP Address to Access an Application in a Cluster
|
||||
path: /docs/tutorials/stateless-application/expose-external-ip-address/
|
||||
- title: Stateful Applications
|
||||
section:
|
||||
- title: Running a Single-Instance Stateful Application
|
||||
path: /docs/tutorials/stateful-application/run-stateful-application/
|
||||
|
|
|
@ -20,6 +20,8 @@
|
|||
<a href="https://calendar.google.com/calendar/embed?src=nt2tcnbtbied3l6gi2h29slvc0%40group.calendar.google.com" class="calendar"><span>Events Calendar</span></a>
|
||||
</div>
|
||||
<div>
|
||||
<a href="//get.k8s.io" class="button">Download K8s</a>
|
||||
<a href="https://github.com/kubernetes/kubernetes" class="button">Contribute to the K8s codebase</a>
|
||||
</div>
|
||||
</div>
|
||||
<div id="miceType" class="center">© {{ 'now' | date: "%Y" }} Kubernetes</div>
|
||||
|
|
|
@ -80,6 +80,34 @@
|
|||
})(window,document,'script','//www.google-analytics.com/analytics.js','ga');
|
||||
ga('create', 'UA-36037335-10', 'auto');
|
||||
ga('send', 'pageview');
|
||||
|
||||
// hide docs nav area if no nav is present, or if nav only contains a link to the current page
|
||||
(function () {
|
||||
window.addEventListener('DOMContentLoaded', init)
|
||||
|
||||
// play nice with our neighbors
|
||||
function init() {
|
||||
window.removeEventListener('DOMContentLoaded', init)
|
||||
hideNav()
|
||||
}
|
||||
|
||||
function hideNav(toc){
|
||||
if (!toc) toc = document.querySelector('#docsToc')
|
||||
var container = toc.querySelector('.container')
|
||||
|
||||
// container is built dynamically, so it may not be present on the first runloop
|
||||
if (container) {
|
||||
if (container.childElementCount === 0 || toc.querySelectorAll('a.item').length === 1) {
|
||||
toc.style.display = 'none'
|
||||
document.getElementById('docsContent').style.width = '100%'
|
||||
}
|
||||
} else {
|
||||
requestAnimationFrame(function () {
|
||||
hideNav(toc)
|
||||
})
|
||||
}
|
||||
}
|
||||
})();
|
||||
</script>
|
||||
<!-- Commenting out AnswerDash for now; we need to work on our list of questions/answers/design first
|
||||
<!-- Start of AnswerDash script <script>var AnswerDash;!function(e,t,n,s,a){if(!t.getElementById(s)){var i,r=t.createElement(n),c=t.getElementsByTagName(n)[0];e[a]||(i=e[a]=function(){i.__oninit.push(arguments)},i.__oninit=[]),r.type="text/javascript",r.async=!0,r.src="https://p1.answerdash.com/answerdash.min.js?siteid=756",r.setAttribute("id",s),c.parentNode.insertBefore(r,c)}}(window,document,"script","answerdash-script","AnswerDash");</script> <!-- End of AnswerDash script -->
|
||||
|
|
|
@ -389,6 +389,14 @@ footer
|
|||
display: block
|
||||
height: 0
|
||||
overflow: hidden
|
||||
|
||||
&.button
|
||||
background-image: none
|
||||
width: auto
|
||||
height: auto
|
||||
|
||||
&:hover
|
||||
color: $blue
|
||||
|
||||
a.twitter
|
||||
background-position: 0 0
|
||||
|
@ -874,8 +882,19 @@ dd
|
|||
img
|
||||
max-width: 100%
|
||||
|
||||
a
|
||||
//font-weight: 700
|
||||
text-decoration: underline
|
||||
|
||||
a:visited
|
||||
color: blueviolet
|
||||
|
||||
a.button
|
||||
border-radius: 2px
|
||||
text-decoration: none
|
||||
|
||||
&:visited
|
||||
color: white
|
||||
|
||||
a.issue
|
||||
margin-left: 20px
|
||||
|
|
|
@ -15,7 +15,7 @@ ul, li
|
|||
ul
|
||||
margin: 0
|
||||
padding: 0
|
||||
|
||||
|
||||
a
|
||||
text-decoration: none
|
||||
|
||||
|
|
|
@ -7,18 +7,20 @@ Add-ons extend the functionality of Kubernetes.
|
|||
|
||||
This page lists some of the available add-ons and links to their respective installation instructions.
|
||||
|
||||
Add-ons in each section are sorted alphabetically - the ordering does not imply any preferential status.
|
||||
|
||||
## Networking and Network Policy
|
||||
|
||||
* [Weave Net](https://github.com/weaveworks/weave-kube) provides networking and network policy, will carry on working on both sides of a network partition, and does not require an external database.
|
||||
* [Calico](http://docs.projectcalico.org/v1.5/getting-started/kubernetes/installation/hosted/) is a secure L3 networking and network policy provider.
|
||||
* [Flannel](https://github.com/coreos/flannel/blob/master/Documentation/kube-flannel.yml) is a overlay network provider that can be used with Kubernetes.
|
||||
* [Calico](http://docs.projectcalico.org/v1.6/getting-started/kubernetes/installation/hosted/) is a secure L3 networking and network policy provider.
|
||||
* [Canal](https://github.com/tigera/canal/tree/master/k8s-install/kubeadm) unites Flannel and Calico, providing networking and network policy.
|
||||
* [Flannel](https://github.com/coreos/flannel/blob/master/Documentation/kube-flannel.yml) is a overlay network provider that can be used with Kubernetes.
|
||||
* [Romana](http://romana.io) is a Layer 3 networking solution for pod networks that also supports the [NetworkPolicy API](/docs/user-guide/networkpolicies/). Kubeadm add-on installation details available [here](https://github.com/romana/romana/tree/master/containerize).
|
||||
* [Weave Net](https://github.com/weaveworks/weave-kube) provides networking and network policy, will carry on working on both sides of a network partition, and does not require an external database.
|
||||
|
||||
## Visualization & Control
|
||||
|
||||
* [Weave Scope](https://www.weave.works/documentation/scope-latest-installing/#k8s) is a tool for graphically visualizing your containers, pods, services etc. Use it in conjunction with a [Weave Cloud account](https://cloud.weave.works/) or host the UI yourself.
|
||||
* [Dashboard](https://github.com/kubernetes/dashboard#kubernetes-dashboard) is a dashboard web interface for Kubernetes.
|
||||
* [Weave Scope](https://www.weave.works/documentation/scope-latest-installing/#k8s) is a tool for graphically visualizing your containers, pods, services etc. Use it in conjunction with a [Weave Cloud account](https://cloud.weave.works/) or host the UI yourself.
|
||||
|
||||
## Legacy Add-ons
|
||||
|
||||
|
|
|
@ -100,8 +100,19 @@ existence or non-existence of host ports.
|
|||
There are a number of ways that this network model can be implemented. This
|
||||
document is not an exhaustive study of the various methods, but hopefully serves
|
||||
as an introduction to various technologies and serves as a jumping-off point.
|
||||
If some techniques become vastly preferable to others, we might detail them more
|
||||
here.
|
||||
|
||||
The following networking options are sorted alphabetically - the order does not
|
||||
imply any preferential status.
|
||||
|
||||
### Contiv
|
||||
|
||||
[Contiv](https://github.com/contiv/netplugin) provides configurable networking (native l3 using BGP, overlay using vxlan, classic l2, or Cisco-SDN/ACI) for various use cases. [Contiv](http://contiv.io) is all open sourced.
|
||||
|
||||
### Flannel
|
||||
|
||||
[Flannel](https://github.com/coreos/flannel#flannel) is a very simple overlay
|
||||
network that satisfies the Kubernetes requirements. Many
|
||||
people have reported success with Flannel and Kubernetes.
|
||||
|
||||
### Google Compute Engine (GCE)
|
||||
|
||||
|
@ -158,29 +169,12 @@ Follow the "With Linux Bridge devices" section of [this very nice
|
|||
tutorial](http://blog.oddbit.com/2014/08/11/four-ways-to-connect-a-docker/) from
|
||||
Lars Kellogg-Stedman.
|
||||
|
||||
### Weave Net from Weaveworks
|
||||
|
||||
[Weave Net](https://www.weave.works/products/weave-net/) is a
|
||||
resilient and simple to use network for Kubernetes and its hosted applications.
|
||||
Weave Net runs as a [CNI plug-in](https://www.weave.works/docs/net/latest/cni-plugin/)
|
||||
or stand-alone. In either version, it doesn't require any configuration or extra code
|
||||
to run, and in both cases, the network provides one IP address per pod - as is standard for Kubernetes.
|
||||
|
||||
|
||||
### Flannel
|
||||
|
||||
[Flannel](https://github.com/coreos/flannel#flannel) is a very simple overlay
|
||||
network that satisfies the Kubernetes requirements. It installs in minutes and
|
||||
should get you up and running if the above techniques are not working. Many
|
||||
people have reported success with Flannel and Kubernetes.
|
||||
|
||||
### OpenVSwitch
|
||||
|
||||
[OpenVSwitch](/docs/admin/ovs-networking) is a somewhat more mature but also
|
||||
complicated way to build an overlay network. This is endorsed by several of the
|
||||
"Big Shops" for networking.
|
||||
|
||||
|
||||
### Project Calico
|
||||
|
||||
[Project Calico](https://github.com/projectcalico/calico-containers/blob/master/docs/cni/kubernetes/README.md) is an open source container networking provider and network policy engine.
|
||||
|
@ -193,9 +187,13 @@ Calico can also be run in policy enforcement mode in conjunction with other netw
|
|||
|
||||
[Romana](http://romana.io) is an open source network and security automation solution that lets you deploy Kubernetes without an overlay network. Romana supports Kubernetes [Network Policy](/docs/user-guide/networkpolicies/) to provide isolation across network namespaces.
|
||||
|
||||
### Contiv
|
||||
### Weave Net from Weaveworks
|
||||
|
||||
[Contiv](https://github.com/contiv/netplugin) provides configurable networking (native l3 using BGP, overlay using vxlan, classic l2, or Cisco-SDN/ACI) for various use cases. [Contiv](http://contiv.io) is all open sourced.
|
||||
[Weave Net](https://www.weave.works/products/weave-net/) is a
|
||||
resilient and simple to use network for Kubernetes and its hosted applications.
|
||||
Weave Net runs as a [CNI plug-in](https://www.weave.works/docs/net/latest/cni-plugin/)
|
||||
or stand-alone. In either version, it doesn't require any configuration or extra code
|
||||
to run, and in both cases, the network provides one IP address per pod - as is standard for Kubernetes.
|
||||
|
||||
## Other reading
|
||||
|
||||
|
|
|
@ -0,0 +1,110 @@
|
|||
---
|
||||
---
|
||||
|
||||
<style>
|
||||
li>.highlighter-rouge {position:relative; top:3px;}
|
||||
</style>
|
||||
|
||||
## Overview
|
||||
|
||||
kubectl is the command line tool you use to interact with Kubernetes clusters.
|
||||
|
||||
You should use a version of kubectl that is at least as new as your server.
|
||||
`kubectl version` will print the server and client versions. Using the same version of kubectl
|
||||
as your server naturally works; using a newer kubectl than your server also works; but if you use
|
||||
an older kubectl with a newer server you may see odd validation errors .
|
||||
|
||||
## Download a release
|
||||
|
||||
Download kubectl from the [official Kubernetes releases](https://console.cloud.google.com/storage/browser/kubernetes-release/release/):
|
||||
|
||||
On MacOS:
|
||||
|
||||
```shell
|
||||
wget https://storage.googleapis.com/kubernetes-release/release/v1.4.4/bin/darwin/amd64/kubectl
|
||||
chmod +x kubectl
|
||||
mv kubectl /usr/local/bin/kubectl
|
||||
```
|
||||
|
||||
On Linux:
|
||||
|
||||
```shell
|
||||
wget https://storage.googleapis.com/kubernetes-release/release/v1.4.4/bin/linux/amd64/kubectl
|
||||
chmod +x kubectl
|
||||
mv kubectl /usr/local/bin/kubectl
|
||||
```
|
||||
|
||||
|
||||
You may need to `sudo` the `mv`; you can put it anywhere in your `PATH` - some people prefer to install to `~/bin`.
|
||||
|
||||
|
||||
## Alternatives
|
||||
|
||||
### Download as part of the Google Cloud SDK
|
||||
|
||||
kubectl can be installed as part of the Google Cloud SDK:
|
||||
|
||||
First install the [Google Cloud SDK](https://cloud.google.com/sdk/).
|
||||
|
||||
After Google Cloud SDK installs, run the following command to install `kubectl`:
|
||||
|
||||
```shell
|
||||
gcloud components install kubectl
|
||||
```
|
||||
|
||||
Do check that the version is sufficiently up-to-date using `kubectl version`.
|
||||
|
||||
### Install with brew
|
||||
|
||||
If you are on MacOS and using brew, you can install with:
|
||||
|
||||
```shell
|
||||
brew install kubectl
|
||||
```
|
||||
|
||||
The homebrew project is independent from kubernetes, so do check that the version is
|
||||
sufficiently up-to-date using `kubectl version`.
|
||||
|
||||
|
||||
# Enabling shell autocompletion
|
||||
|
||||
kubectl includes autocompletion support, which can save a lot of typing!
|
||||
|
||||
The completion script itself is generated by kubectl, so you typically just need to invoke it from your profile.
|
||||
|
||||
Common examples are provided here, but for more details please consult `kubectl completion -h`
|
||||
|
||||
## On Linux, using bash
|
||||
|
||||
To add it to your current shell: `source <(kubectl completion bash)`
|
||||
|
||||
To add kubectl autocompletion to your profile (so it is automatically loaded in future shells):
|
||||
|
||||
```shell
|
||||
echo "source <(kubectl completion bash)" >> ~/.bashrc
|
||||
```
|
||||
|
||||
## On MacOS, using bash
|
||||
|
||||
On MacOS, you will need to install the bash-completion support first:
|
||||
|
||||
```shell
|
||||
brew install bash-completion
|
||||
```
|
||||
|
||||
To add it to your current shell:
|
||||
|
||||
```shell
|
||||
source $(brew --prefix)/etc/bash_completion
|
||||
source <(kubectl completion bash)
|
||||
```
|
||||
|
||||
To add kubectl autocompletion to your profile (so it is automatically loaded in future shells):
|
||||
|
||||
```shell
|
||||
echo "source $(brew --prefix)/etc/bash_completion" >> ~/.bash_profile
|
||||
echo "source <(kubectl completion bash)" >> ~/.bash_profile
|
||||
```
|
||||
|
||||
Please note that this only appears to work currently if you install using `brew install kubectl`,
|
||||
and not if you downloaded kubectl directly.
|
|
@ -81,12 +81,12 @@ to implement one of the above options:
|
|||
|
||||
- **Use a network plugin which is called by Kubernetes**
|
||||
- Kubernetes supports the [CNI](https://github.com/containernetworking/cni) network plugin interface.
|
||||
- There are a number of solutions which provide plugins for Kubernetes:
|
||||
- There are a number of solutions which provide plugins for Kubernetes (listed alphabetically):
|
||||
- [Calico](http://docs.projectcalico.org/)
|
||||
- [Flannel](https://github.com/coreos/flannel)
|
||||
- [Calico](https://github.com/projectcalico/calico-containers)
|
||||
- [Weave](https://weave.works/)
|
||||
- [Romana](http://romana.io/)
|
||||
- [Open vSwitch (OVS)](http://openvswitch.org/)
|
||||
- [Romana](http://romana.io/)
|
||||
- [Weave](http://weave.works/)
|
||||
- [More found here](/docs/admin/networking#how-to-achieve-this)
|
||||
- You can also write your own.
|
||||
- **Compile support directly into Kubernetes**
|
||||
|
|
|
@ -65,6 +65,7 @@ export GOVC_DATACENTER='ha-datacenter' # The datacenter to be used by vSphere cl
|
|||
```
|
||||
|
||||
Sample environment
|
||||
|
||||
```shell
|
||||
export GOVC_URL='10.161.236.217'
|
||||
export GOVC_USERNAME='administrator'
|
||||
|
@ -79,6 +80,7 @@ export GOVC_DATACENTER='Datacenter'
|
|||
```
|
||||
|
||||
Import this VMDK into your vSphere datastore:
|
||||
|
||||
```shell
|
||||
govc import.vmdk kube.vmdk ./kube/
|
||||
```
|
||||
|
|
|
@ -5,9 +5,7 @@ assignees:
|
|||
|
||||
---
|
||||
|
||||
<p>The Kubernetes documentation can help you set up Kubernetes, learn about the system, or get your applications and workloads running on Kubernetes.</p>
|
||||
|
||||
<p><a href="/docs/whatisk8s/" class="button">Read the Kubernetes Overview</a></p>
|
||||
<p>Kubernetes documentation can help you set up Kubernetes, learn about the system, or get your applications and workloads running on Kubernetes. To learn the basics of what Kubernetes is and how it works, read "<a href="/docs/whatisk8s/">What is Kubernetes</a>". </p>
|
||||
|
||||
<h2>Interactive Tutorial</h2>
|
||||
|
||||
|
@ -40,4 +38,4 @@ assignees:
|
|||
|
||||
<h2>Tools</h2>
|
||||
|
||||
<p>The <a href="/docs/tools/">tools</a> page contains a list of native and third-party tools for Kubernetes.</p>
|
||||
<p>The <a href="/docs/tools/">tools</a> page contains a list of native and third-party tools for Kubernetes.</p>
|
||||
|
|
|
@ -0,0 +1,110 @@
|
|||
---
|
||||
---
|
||||
|
||||
{% capture overview %}
|
||||
|
||||
This page shows how to write and read a Container
|
||||
termination message.
|
||||
|
||||
Termination messages provide a way for containers to write
|
||||
information about fatal events to a location where it can
|
||||
be easily retrieved and surfaced by tools like dashboards
|
||||
and monitoring software. In most cases, information that you
|
||||
put in a termination message should also be written to
|
||||
the general
|
||||
[Kubernetes logs](/docs/user-guide/logging/).
|
||||
|
||||
{% endcapture %}
|
||||
|
||||
|
||||
{% capture prerequisites %}
|
||||
|
||||
{% include task-tutorial-prereqs.md %}
|
||||
|
||||
{% endcapture %}
|
||||
|
||||
|
||||
{% capture steps %}
|
||||
|
||||
### Writing and reading a termination message
|
||||
|
||||
In this exercise, you create a Pod that runs one container.
|
||||
The configuration file specifies a command that runs when
|
||||
the container starts.
|
||||
|
||||
{% include code.html language="yaml" file="termination.yaml" ghlink="/docs/tasks/debug-pod-container/termination.yaml" %}
|
||||
|
||||
1. Create a Pod based on the YAML configuration file:
|
||||
|
||||
export REPO=https://raw.githubusercontent.com/kubernetes/kubernetes.github.io/master
|
||||
kubectl create -f $REPO/docs/tasks/debug-pod-container/termination.yaml
|
||||
|
||||
In the YAML file, in the `cmd` and `args` fields, you can see that the
|
||||
container sleeps for 10 seconds and then writes "Sleep expired" to
|
||||
the `/dev/termination-log` file. After the container writes
|
||||
the "Sleep expired" message, it terminates.
|
||||
|
||||
1. Display information about the Pod:
|
||||
|
||||
kubectl get pod termination-demo
|
||||
|
||||
Repeat the preceding command until the Pod is no longer running.
|
||||
|
||||
1. Display detailed information about the Pod:
|
||||
|
||||
kubectl get pod --output=yaml
|
||||
|
||||
The output includes the "Sleep expired" message:
|
||||
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
...
|
||||
lastState:
|
||||
terminated:
|
||||
containerID: ...
|
||||
exitCode: 0
|
||||
finishedAt: ...
|
||||
message: |
|
||||
Sleep expired
|
||||
...
|
||||
|
||||
1. Use a Go template to filter the output so that it includes
|
||||
only the termination message:
|
||||
|
||||
```
|
||||
{% raw %} kubectl get pod termination-demo -o go-template="{{range .status.containerStatuses}}{{.lastState.terminated.message}}{{end}}"{% endraw %}
|
||||
```
|
||||
|
||||
### Setting the termination log file
|
||||
|
||||
By default Kubernetes retrieves termination messages from
|
||||
`/dev/termination-log`. To change this to a different file,
|
||||
specify a `terminationMessagePath` field for your Container.
|
||||
|
||||
For example, suppose your Container writes termination messages to
|
||||
`/tmp/my-log`, and you want Kubernetes to retrieve those messages.
|
||||
Set `terminationMessagePath` as shown here:
|
||||
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: msg-path-demo
|
||||
spec:
|
||||
containers:
|
||||
- name: msg-path-demo-container
|
||||
image: debian
|
||||
terminationMessagePath: "/tmp/my-log"
|
||||
|
||||
{% endcapture %}
|
||||
|
||||
{% capture whatsnext %}
|
||||
|
||||
* See the `terminationMessagePath` field in
|
||||
[Container](/docs/api-reference/v1/definitions#_v1_container).
|
||||
* Learn about [retrieving logs](/docs/user-guide/logging/).
|
||||
* Learn about [Go templates](https://golang.org/pkg/text/template/).
|
||||
|
||||
{% endcapture %}
|
||||
|
||||
|
||||
{% include templates/task.md %}
|
|
@ -0,0 +1,10 @@
|
|||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: termination-demo
|
||||
spec:
|
||||
containers:
|
||||
- name: termination-demo-container
|
||||
image: debian
|
||||
command: ["/bin/sh"]
|
||||
args: ["-c", "sleep 10 && echo Sleep expired > /dev/termination-log"]
|
|
@ -15,6 +15,10 @@ The Tutorials section of the Kubernetes documentation is a work in progress.
|
|||
|
||||
* [Exposing an External IP Address to Access an Application in a Cluster](/docs/tutorials/stateless-application/expose-external-ip-address/)
|
||||
|
||||
#### Stateful Applications
|
||||
|
||||
* [Running a Single-Instance Stateful Application](/docs/tutorials/stateful-application/run-stateful-application/)
|
||||
|
||||
### What's next
|
||||
|
||||
If you would like to write a tutorial, see
|
||||
|
|
|
@ -0,0 +1,12 @@
|
|||
apiVersion: v1
|
||||
kind: PersistentVolume
|
||||
metadata:
|
||||
name: mysql-pv
|
||||
spec:
|
||||
capacity:
|
||||
storage: 20Gi
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
gcePersistentDisk:
|
||||
pdName: mysql-disk
|
||||
fsType: ext4
|
|
@ -0,0 +1,51 @@
|
|||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: mysql
|
||||
spec:
|
||||
ports:
|
||||
- port: 3306
|
||||
selector:
|
||||
app: mysql
|
||||
clusterIP: None
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: PersistentVolumeClaim
|
||||
metadata:
|
||||
name: mysql-pv-claim
|
||||
spec:
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
resources:
|
||||
requests:
|
||||
storage: 20Gi
|
||||
---
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: mysql
|
||||
spec:
|
||||
strategy:
|
||||
type: Recreate
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: mysql
|
||||
spec:
|
||||
containers:
|
||||
- image: mysql:5.6
|
||||
name: mysql
|
||||
env:
|
||||
# Use secret in real usage
|
||||
- name: MYSQL_ROOT_PASSWORD
|
||||
value: password
|
||||
ports:
|
||||
- containerPort: 3306
|
||||
name: mysql
|
||||
volumeMounts:
|
||||
- name: mysql-persistent-storage
|
||||
mountPath: /var/lib/mysql
|
||||
volumes:
|
||||
- name: mysql-persistent-storage
|
||||
persistentVolumeClaim:
|
||||
claimName: mysql-pv-claim
|
|
@ -0,0 +1,220 @@
|
|||
---
|
||||
---
|
||||
|
||||
{% capture overview %}
|
||||
|
||||
This page shows you how to run a single-instance stateful application
|
||||
in Kubernetes using a PersistentVolume and a Deployment. The
|
||||
application is MySQL.
|
||||
|
||||
{% endcapture %}
|
||||
|
||||
|
||||
{% capture objectives %}
|
||||
|
||||
* Create a PersistentVolume referencing a disk in your environment.
|
||||
* Create a MySQL Deployment.
|
||||
* Expose MySQL to other pods in the cluster at a known DNS name.
|
||||
|
||||
{% endcapture %}
|
||||
|
||||
|
||||
{% capture prerequisites %}
|
||||
|
||||
* {% include task-tutorial-prereqs.md %}
|
||||
|
||||
* For data persistence we will create a Persistent Volume that
|
||||
references a disk in your
|
||||
environment. See
|
||||
[here](/docs/user-guide/persistent-volumes/#types-of-persistent-volumes) for
|
||||
the types of environments supported. This Tutorial will demonstrate
|
||||
`GCEPersistentDisk` but any type will work. `GCEPersistentDisk`
|
||||
volumes only work on Google Compute Engine.
|
||||
|
||||
{% endcapture %}
|
||||
|
||||
|
||||
{% capture lessoncontent %}
|
||||
|
||||
### Set up a disk in your environment
|
||||
|
||||
You can use any type of persistent volume for your stateful app. See
|
||||
[Types of Persistent Volumes](/docs/user-guide/persistent-volumes/#types-of-persistent-volumes)
|
||||
for a list of supported environment disks. For Google Compute Engine, run:
|
||||
|
||||
```
|
||||
gcloud compute disks create --size=20GB mysql-disk
|
||||
```
|
||||
|
||||
Next create a PersistentVolume that points to the `mysql-disk`
|
||||
disk just created. Here is a configuration file for a PersistentVolume
|
||||
that points to the Compute Engine disk above:
|
||||
|
||||
{% include code.html language="yaml" file="gce-volume.yaml" ghlink="/docs/tutorials/stateful-application/gce-volume.yaml" %}
|
||||
|
||||
Notice that the `pdName: mysql-disk` line matches the name of the disk
|
||||
in the Compute Engine environment. See the
|
||||
[Persistent Volumes](/docs/user-guide/persistent-volumes/)
|
||||
for details on writing a PersistentVolume configuration file for other
|
||||
environments.
|
||||
|
||||
Create the persistent volume:
|
||||
|
||||
```
|
||||
kubectl create -f http://k8s.io/docs/tutorials/stateful-application/gce-volume.yaml
|
||||
```
|
||||
|
||||
|
||||
### Deploy MySQL
|
||||
|
||||
You can run a stateful application by creating a Kubernetes Deployment
|
||||
and connecting it to an existing PersistentVolume using a
|
||||
PersistentVolumeClaim. For example, this YAML file describes a
|
||||
Deployment that runs MySQL and references the PersistentVolumeClaim. The file
|
||||
defines a volume mount for /var/lib/mysql, and then creates a
|
||||
PersistentVolumeClaim that looks for a 20G volume. This claim is
|
||||
satisfied by any volume that meets the requirements, in this case, the
|
||||
volume created above.
|
||||
|
||||
Note: The password is defined in the config yaml, and this is insecure. See
|
||||
[Kubernetes Secrets](/docs/user-guide/secrets/)
|
||||
for a secure solution.
|
||||
|
||||
{% include code.html language="yaml" file="mysql-deployment.yaml" ghlink="/docs/tutorials/stateful-application/mysql-deployment.yaml" %}
|
||||
|
||||
1. Deploy the contents of the YAML file:
|
||||
|
||||
kubectl create -f http://k8s.io/docs/tutorials/stateful-application/mysql-deployment.yaml
|
||||
|
||||
1. Display information about the Deployment:
|
||||
|
||||
kubectl describe deployment mysql
|
||||
|
||||
Name: mysql
|
||||
Namespace: default
|
||||
CreationTimestamp: Tue, 01 Nov 2016 11:18:45 -0700
|
||||
Labels: app=mysql
|
||||
Selector: app=mysql
|
||||
Replicas: 1 updated | 1 total | 0 available | 1 unavailable
|
||||
StrategyType: Recreate
|
||||
MinReadySeconds: 0
|
||||
OldReplicaSets: <none>
|
||||
NewReplicaSet: mysql-63082529 (1/1 replicas created)
|
||||
Events:
|
||||
FirstSeen LastSeen Count From SubobjectPath Type Reason Message
|
||||
--------- -------- ----- ---- ------------- -------- ------ -------
|
||||
33s 33s 1 {deployment-controller } Normal ScalingReplicaSet Scaled up replica set mysql-63082529 to 1
|
||||
|
||||
1. List the pods created by the Deployment:
|
||||
|
||||
kubectl get pods -l app=mysql
|
||||
|
||||
NAME READY STATUS RESTARTS AGE
|
||||
mysql-63082529-2z3ki 1/1 Running 0 3m
|
||||
|
||||
1. Inspect the Persistent Volume:
|
||||
|
||||
kubectl describe pv mysql-pv
|
||||
|
||||
Name: mysql-pv
|
||||
Labels: <none>
|
||||
Status: Bound
|
||||
Claim: default/mysql-pv-claim
|
||||
Reclaim Policy: Retain
|
||||
Access Modes: RWO
|
||||
Capacity: 20Gi
|
||||
Message:
|
||||
Source:
|
||||
Type: GCEPersistentDisk (a Persistent Disk resource in Google Compute Engine)
|
||||
PDName: mysql-disk
|
||||
FSType: ext4
|
||||
Partition: 0
|
||||
ReadOnly: false
|
||||
No events.
|
||||
|
||||
1. Inspect the PersistentVolumeClaim:
|
||||
|
||||
kubectl describe pvc mysql-pv-claim
|
||||
|
||||
Name: mysql-pv-claim
|
||||
Namespace: default
|
||||
Status: Bound
|
||||
Volume: mysql-pv
|
||||
Labels: <none>
|
||||
Capacity: 20Gi
|
||||
Access Modes: RWO
|
||||
No events.
|
||||
|
||||
### Accessing the MySQL instance
|
||||
|
||||
The preceding YAML file creates a service that
|
||||
allows other Pods in the cluster to access the database. The Service option
|
||||
`clusterIP: None` lets the Service DNS name resolve directly to the
|
||||
Pod's IP address. This is optimal when you have only one Pod
|
||||
behind a Service and you don't intend to increase the number of Pods.
|
||||
|
||||
Run a MySQL client to connect to the server:
|
||||
|
||||
```
|
||||
kubectl run -it --rm --image=mysql:5.6 mysql-client -- mysql -h mysql -ppassword
|
||||
```
|
||||
|
||||
This command creates a new Pod in the cluster running a mysql client
|
||||
and connects it to the server through the Service. If it connects, you
|
||||
know your stateful MySQL database is up and running.
|
||||
|
||||
```
|
||||
Waiting for pod default/mysql-client-274442439-zyp6i to be running, status is Pending, pod ready: false
|
||||
If you don't see a command prompt, try pressing enter.
|
||||
|
||||
mysql>
|
||||
```
|
||||
|
||||
### Updating
|
||||
|
||||
The image or any other part of the Deployment can be updated as usual
|
||||
with the `kubectl apply` command. Here are some precautions that are
|
||||
specific to stateful apps:
|
||||
|
||||
* Don't scale the app. This setup is for single-instance apps
|
||||
only. The underlying PersistentVolume can only be mounted to one
|
||||
Pod. For clustered stateful apps, see the
|
||||
[StatefulSet documentation](/docs/user-guide/petset/).
|
||||
* Use `strategy:` `type: Recreate` in the Deployment configuration
|
||||
YAML file. This instructs Kubernetes to _not_ use rolling
|
||||
updates. Rolling updates will not work, as you cannot have more than
|
||||
one Pod running at a time. The `Recreate` strategy will stop the
|
||||
first pod before creating a new one with the updated configuration.
|
||||
|
||||
### Deleting a deployment
|
||||
|
||||
Delete the deployed objects by name:
|
||||
|
||||
```
|
||||
kubectl delete deployment,svc mysql
|
||||
kubectl delete pvc mysql-pv-claim
|
||||
kubectl delete pv mysql-pv
|
||||
```
|
||||
|
||||
Also, if you are using Compute Engine disks:
|
||||
|
||||
```
|
||||
gcloud compute disks delete mysql-disk
|
||||
```
|
||||
|
||||
{% endcapture %}
|
||||
|
||||
|
||||
{% capture whatsnext %}
|
||||
|
||||
* Learn more about [Deployment objects](/docs/user-guide/deployments/).
|
||||
|
||||
* Learn more about [Deploying applications](/docs/user-guide/deploying-applications/)
|
||||
|
||||
* [kubectl run documentation](/docs/user-guide/kubectl/kubectl_run/)
|
||||
|
||||
* [Volumes](/docs/user-guide/volumes/) and [Persistent Volumes](/docs/user-guide/persistent-volumes/)
|
||||
|
||||
{% endcapture %}
|
||||
|
||||
{% include templates/tutorial.md %}
|
|
@ -129,7 +129,7 @@ To use it,
|
|||
* Write an application atop of the client-go clients. Note that client-go defines its own API objects, so if needed, please import API definitions from client-go rather than from the main repository, e.g., `import "k8s.io/client-go/1.4/pkg/api/v1"` is correct.
|
||||
|
||||
The Go client can use the same [kubeconfig file](/docs/user-guide/kubeconfig-file)
|
||||
as the kubectl CLI does to locate and authenticate to the apiserver. See this [example](https://github.com/kubernetes/client-go/examples/out-of-cluster.go):
|
||||
as the kubectl CLI does to locate and authenticate to the apiserver. See this [example](https://github.com/kubernetes/client-go/blob/master/examples/out-of-cluster/main.go):
|
||||
|
||||
```golang
|
||||
import (
|
||||
|
@ -183,7 +183,8 @@ From within a pod the recommended ways to connect to API are:
|
|||
in any container of the pod can access it. See this [example of using kubectl proxy
|
||||
in a pod](https://github.com/kubernetes/kubernetes/tree/{{page.githubbranch}}/examples/kubectl-container/).
|
||||
- use the Go client library, and create a client using the `client.NewInCluster()` factory.
|
||||
This handles locating and authenticating to the apiserver. [example](https://github.com/kubernetes/client-go/examples/in-cluster.go)
|
||||
This handles locating and authenticating to the apiserver. See this [example of using Go client
|
||||
library in a pod](https://github.com/kubernetes/client-go/blob/master/examples/in-cluster/main.go).
|
||||
|
||||
In each case, the credentials of the pod are used to communicate securely with the apiserver.
|
||||
|
||||
|
|
|
@ -173,7 +173,7 @@ on node N if node N has a label with key `failure-domain.beta.kubernetes.io/zone
|
|||
such that there is at least one node in the cluster with key `failure-domain.beta.kubernetes.io/zone` and
|
||||
value V that is running a pod that has a label with key "security" and value "S1".) The pod anti-affinity
|
||||
rule says that the pod cannot schedule onto a node if that node is already running a pod with label
|
||||
having key "security" and value "S2". (If the `topologyKey` were `failure-domain.beta.kuberntes.io/zone` then
|
||||
having key "security" and value "S2". (If the `topologyKey` were `failure-domain.beta.kubernetes.io/zone` then
|
||||
it would mean that the pod cannot schedule onto a node if that node is in the same zone as a pod with
|
||||
label having key "security" and value "S2".) See the [design doc](https://github.com/kubernetes/kubernetes/blob/{{page.githubbranch}}/docs/design/podaffinity.md).
|
||||
for many more examples of pod affinity and anti-affinity, both the `requiredDuringSchedulingIgnoredDuringExecution`
|
||||
|
|
|
@ -66,8 +66,8 @@ The possible values for RestartPolicy are `Always`, `OnFailure`, or `Never`. If
|
|||
Three types of controllers are currently available:
|
||||
|
||||
- Use a [`Job`](/docs/user-guide/jobs/) for pods which are expected to terminate (e.g. batch computations).
|
||||
- Use a [`ReplicationController`](/docs/user-guide/replication-controller/) for pods which are not expected to
|
||||
terminate (e.g. web servers).
|
||||
- Use a [`ReplicationController`](/docs/user-guide/replication-controller/) or [`Deployment`](/docs/user-guide/deployments/)
|
||||
for pods which are not expected to terminate (e.g. web servers).
|
||||
- Use a [`DaemonSet`](/docs/admin/daemons/): Use for pods which need to run 1 per machine because they provide a
|
||||
machine-specific system service.
|
||||
If you are unsure whether to use ReplicationController or Daemon, then see [Daemon Set versus
|
||||
|
|
Loading…
Reference in New Issue