Merge branch 'master' into patch-2
|
@ -174,14 +174,7 @@ example. If creating an image for a doc, follow the
|
|||
section on "Docker images" from the Kubernetes repository.
|
||||
|
||||
## Partners
|
||||
Kubernetes partners refers to the companies who contribute to the Kubernetes core codebase and/or extend their platform to support Kubernetes. Partners can get their logos added to the partner section of the [community page](http://k8s.io/community) by following the below steps and meeting the below logo specifications. Partners will also need to have a URL that is specific to integrating with Kubernetes ready; this URL will be the destination when the logo is clicked.
|
||||
|
||||
* The partner product logo should be a transparent png image centered in a 215x125 px frame. (look at the existing logos for reference)
|
||||
* The logo must link to a URL that is specific to integrating with Kubernetes, hosted on the partner's site.
|
||||
* The logo should be named *product-name*_logo.png and placed in the `/images/community_logos` folder.
|
||||
* The image reference (including the link to the partner URL) should be added in `community.html` under `<div class="partner-logos" > ...</div>`.
|
||||
* Please do not change the order of the existing partner images. Append your logo to the end of the list.
|
||||
* Once completed and tested the look and feel, submit the pull request.
|
||||
Kubernetes partners refers to the companies who contribute to the Kubernetes core codebase, extend their platform to support Kubernetes or provide managed services to users centered around the Kubernetes platform. Partners can get their services and offerings added to the [partner page](https://k8s.io/partners) by completing and submitting the [partner request form](https://goo.gl/qcSnZF). Once the information and assets are verified, the partner product/services will be listed in the partner page. This would typically take 7-10 days.
|
||||
|
||||
## Thank you!
|
||||
|
||||
|
|
|
@ -1,4 +1,5 @@
|
|||
bigheader: "Concepts"
|
||||
abstract: "Detailed explanations of Kubernetes system concepts and abstractions."
|
||||
toc:
|
||||
- title: Concepts
|
||||
path: /docs/concepts/
|
||||
|
|
|
@ -0,0 +1,5 @@
|
|||
bigheader: "Kubernetes Documentation"
|
||||
abstract: "Documentation for using and learning about Kubernetes."
|
||||
toc:
|
||||
- title: Kubernetes Documentation
|
||||
path: /docs/
|
|
@ -1,4 +1,5 @@
|
|||
tocs:
|
||||
- docs-home
|
||||
- guides
|
||||
- tutorials
|
||||
- tasks
|
||||
|
|
|
@ -1,8 +1,8 @@
|
|||
bigheader: "Guides"
|
||||
abstract: "How to get started, and achieve tasks, using Kubernetes"
|
||||
abstract: "How to get started, and accomplish tasks, using Kubernetes."
|
||||
toc:
|
||||
- title: Guides
|
||||
path: /docs/
|
||||
path: /docs/user-guide/
|
||||
|
||||
- title: Getting Started
|
||||
section:
|
||||
|
@ -14,6 +14,8 @@ toc:
|
|||
path: /docs/getting-started-guides/kops/
|
||||
- title: Hello World on Google Container Engine
|
||||
path: /docs/hellonode/
|
||||
- title: Installing kubectl
|
||||
path: /docs/getting-started-guides/kubectl/
|
||||
- title: Downloading or Building Kubernetes
|
||||
path: /docs/getting-started-guides/binary_release/
|
||||
- title: Online Training Course
|
||||
|
|
|
@ -14,6 +14,8 @@ toc:
|
|||
path: /docs/contribute/stage-documentation-changes/
|
||||
- title: Using Page Templates
|
||||
path: /docs/contribute/page-templates/
|
||||
- title: Documentation Style Guide
|
||||
path: /docs/contribute/style-guide/
|
||||
|
||||
- title: Troubleshooting
|
||||
section:
|
||||
|
|
|
@ -1,4 +1,5 @@
|
|||
bigheader: "Tasks"
|
||||
abstract: "Step-by-step instructions for performing operations with Kuberentes."
|
||||
toc:
|
||||
- title: Tasks
|
||||
path: /docs/tasks/
|
||||
|
@ -14,6 +15,14 @@ toc:
|
|||
section:
|
||||
- title: Using Port Forwarding to Access Applications in a Cluster
|
||||
path: /docs/tasks/access-application-cluster/port-forward-access-application-cluster/
|
||||
|
||||
|
||||
- title: Debugging Applications in a Cluster
|
||||
section:
|
||||
- title: Determining the Reason for Pod Failure
|
||||
path: /docs/tasks/debug-application-cluster/determine-reason-pod-failure/
|
||||
|
||||
|
||||
- title: Accessing the Kubernetes API
|
||||
section:
|
||||
- title: Using an HTTP Proxy to Access the Kubernetes API
|
||||
|
|
|
@ -1,4 +1,5 @@
|
|||
bigheader: "Tools"
|
||||
abstract: "Tools to help you use and enhance Kubernetes."
|
||||
toc:
|
||||
- title: Tools
|
||||
path: /docs/tools/
|
||||
|
|
|
@ -1,4 +1,5 @@
|
|||
bigheader: "Tutorials"
|
||||
abstract: "Detailed walkthroughs of common Kubernetes operations and workflows."
|
||||
toc:
|
||||
- title: Tutorials
|
||||
path: /docs/tutorials/
|
||||
|
@ -50,3 +51,7 @@ toc:
|
|||
path: /docs/tutorials/stateless-application/expose-external-ip-address-service/
|
||||
- title: Exposing an External IP Address to Access an Application in a Cluster
|
||||
path: /docs/tutorials/stateless-application/expose-external-ip-address/
|
||||
- title: Stateful Applications
|
||||
section:
|
||||
- title: Running a Single-Instance Stateful Application
|
||||
path: /docs/tutorials/stateful-application/run-stateful-application/
|
||||
|
|
|
@ -20,6 +20,8 @@
|
|||
<a href="https://calendar.google.com/calendar/embed?src=nt2tcnbtbied3l6gi2h29slvc0%40group.calendar.google.com" class="calendar"><span>Events Calendar</span></a>
|
||||
</div>
|
||||
<div>
|
||||
<a href="//get.k8s.io" class="button">Download K8s</a>
|
||||
<a href="https://github.com/kubernetes/kubernetes" class="button">Contribute to the K8s codebase</a>
|
||||
</div>
|
||||
</div>
|
||||
<div id="miceType" class="center">© {{ 'now' | date: "%Y" }} Kubernetes</div>
|
||||
|
|
|
@ -35,6 +35,27 @@
|
|||
link: 'http://wercker.com/workflows/partners/kubernetes/',
|
||||
blurb: 'Netscaler CPX gives app developers all the features they need to load balance their microservices and containerized apps with Kubernetes.'
|
||||
},
|
||||
{
|
||||
type: 0,
|
||||
name: 'Cockroach Labs',
|
||||
logo: 'cockroach_labs',
|
||||
link: 'https://www.cockroachlabs.com/blog/running-cockroachdb-on-kubernetes/',
|
||||
blurb: 'CockroachDB is a distributed SQL database whose built-in replication and survivability model pair with Kubernetes to truly make data easy.'
|
||||
},
|
||||
{
|
||||
type: 0,
|
||||
name: 'Skippbox',
|
||||
logo: 'skippbox',
|
||||
link: 'http://www.skippbox.com/tag/products/',
|
||||
blurb: 'Creator of Cabin the first mobile application for Kubernetes, and kompose. Skippbox’s solutions distill all the power of k8s in simple easy to use interfaces.'
|
||||
},
|
||||
{
|
||||
type: 0,
|
||||
name: 'Weave Works',
|
||||
logo: 'weave_works',
|
||||
link: ' https://weave.works/kubernetes',
|
||||
blurb: 'Weaveworks enables Developers and Dev/Ops teams to easily connect, deploy, secure, manage, and troubleshoot microservices in Kubernetes.'
|
||||
},
|
||||
{
|
||||
type: 0,
|
||||
name: 'Wercker',
|
||||
|
@ -140,6 +161,34 @@
|
|||
link: 'https://www.diamanti.com/products/',
|
||||
blurb: 'Diamanti deploys containers with guaranteed performance using Kubernetes in the first hyperconverged appliance purpose built for containerized applications.'
|
||||
},
|
||||
{
|
||||
type: 0,
|
||||
name: 'Apprenda',
|
||||
logo: 'apprenda',
|
||||
link: 'https://apprenda.com/kubernetes-support/',
|
||||
blurb: 'Apprenda creates and supports modern, enterprise-ready application platforms for both cloud native and traditional application workloads.'
|
||||
},
|
||||
{
|
||||
type: 0,
|
||||
name: 'Aporeto',
|
||||
logo: 'aporeto',
|
||||
link: 'https://aporeto.com/trireme',
|
||||
blurb: 'Aporeto makes cloud-native applications secure by default without impacting developer velocity and works at any scale, on any cloud.'
|
||||
},
|
||||
{
|
||||
type: 0,
|
||||
name: 'Giant Swarm',
|
||||
logo: 'giant_swarm',
|
||||
link: 'https://giantswarm.io',
|
||||
blurb: 'Giant Swarm provides fully-managed Kubernetes Clusters in your location of choice, so you can focus on your product.'
|
||||
},
|
||||
{
|
||||
type: 0,
|
||||
name: 'Mirantis',
|
||||
logo: 'mirantis',
|
||||
link: 'https://content.mirantis.com/Containerizing-OpenStack-on-Kubernetes-Video-Landing-Page.html',
|
||||
blurb: 'Mirantis builds and manages private clouds with open source software such as OpenStack, deployed as containers orchestrated by Kubernetes.'
|
||||
},
|
||||
{
|
||||
type: 1,
|
||||
name: 'Apprenda',
|
||||
|
@ -190,12 +239,26 @@
|
|||
blurb: 'Jetstack is an organisation focused entirely on Kubernetes. They will help you to get the most out of Kubernetes through expert professional services and open source tooling. Get in touch, and accelerate your project.'
|
||||
},
|
||||
{
|
||||
type: 1,
|
||||
type: 0,
|
||||
name: 'Tigera',
|
||||
logo: 'tigera',
|
||||
link: 'http://docs.projectcalico.org/v1.5/getting-started/kubernetes/',
|
||||
blurb: 'Tigera builds high performance, policy driven, cloud native networking solutions for Kubernetes.'
|
||||
}
|
||||
},
|
||||
{
|
||||
type: 1,
|
||||
name: 'Skippbox',
|
||||
logo: 'skippbox',
|
||||
link: 'http://www.skippbox.com/services/',
|
||||
blurb: 'Skippbox brings its Kubernetes expertise to help companies embrace Kubernetes on their way to digital transformation. Skippbox offers both professional services and expert training.'
|
||||
},
|
||||
{
|
||||
type: 1,
|
||||
name: 'Endocode',
|
||||
logo: 'endocode',
|
||||
link: 'https://endocode.com/kubernetes/',
|
||||
blurb: 'Endocode practices and teaches the open source way. Kernel to cluster - Dev to Ops. We offer Kubernetes trainings, services and support.'
|
||||
}
|
||||
]
|
||||
|
||||
var isvContainer = document.getElementById('isvContainer')
|
||||
|
|
|
@ -11,13 +11,14 @@
|
|||
<h5>{{ site.data[foundTOC].abstract }}</h5>
|
||||
<div id="vendorStrip" class="light-text">
|
||||
<ul>
|
||||
<li><a href="/docs/" {% if site.data[foundTOC].bigheader == "Guides" %}class="YAH"{% endif %}>GUIDES</a></li>
|
||||
<li><a href="/docs/" {% if site.data[foundTOC].bigheader == "Kubernetes Documentation" %}class="YAH"{% endif %}>DOCS HOME</a></li>
|
||||
<li><a href="/docs/user-guide/" {% if site.data[foundTOC].bigheader == "Guides" %}class="YAH"{% endif %}>GUIDES</a></li>
|
||||
<li><a href="/docs/tutorials/" {% if site.data[foundTOC].bigheader == "Tutorials" %}class="YAH"{% endif %}>TUTORIALS</a></li>
|
||||
<li><a href="/docs/tasks/" {% if site.data[foundTOC].bigheader == "Tasks" %}class="YAH"{% endif %}>TASKS</a></li>
|
||||
<li><a href="/docs/concepts/" {% if site.data[foundTOC].bigheader == "Concepts" %}class="YAH"{% endif %}>CONCEPTS</a></li>
|
||||
<li><a href="/docs/reference" {% if site.data[foundTOC].bigheader == "Reference Documentation" %}class="YAH"{% endif %}>REFERENCE</a></li>
|
||||
<li><a href="/docs/tools" {% if site.data[foundTOC].bigheader == "Tools" %}class="YAH"{% endif %}>TOOLS</a></li>
|
||||
<li><a href="/docs/samples" {% if site.data[foundTOC].bigheader == "Samples" %}class="YAH"{% endif %}>SAMPLES</a></li>
|
||||
<li><a href="/docs/reference/" {% if site.data[foundTOC].bigheader == "Reference Documentation" %}class="YAH"{% endif %}>REFERENCE</a></li>
|
||||
<li><a href="/docs/tools/" {% if site.data[foundTOC].bigheader == "Tools" %}class="YAH"{% endif %}>TOOLS</a></li>
|
||||
<li><a href="/docs/samples/" {% if site.data[foundTOC].bigheader == "Samples" %}class="YAH"{% endif %}>SAMPLES</a></li>
|
||||
<li><a href="/docs/troubleshooting/" {% if site.data[foundTOC].bigheader == "Support" %}class="YAH"{% endif %}>SUPPORT</a></li>
|
||||
</ul>
|
||||
<div id="searchBox">
|
||||
|
@ -29,7 +30,9 @@
|
|||
<section id="encyclopedia">
|
||||
<div id="docsToc">
|
||||
<div class="pi-accordion">
|
||||
{% if site.data[foundTOC].bigheader != "Kubernetes Documentation" %}
|
||||
{% assign tree = site.data[foundTOC].toc %}{% include tree.html %}
|
||||
{% endif %}
|
||||
</div> <!-- /pi-accordion -->
|
||||
<button class="push-menu-close-button" onclick="kub.toggleToc()"></button>
|
||||
</div> <!-- /docsToc -->
|
||||
|
@ -49,8 +52,8 @@
|
|||
(function(d,c,j){if(!document.getElementById(j)){var pd=d.createElement(c),s;pd.id=j;pd.src=('https:'==document.location.protocol)?'https://polldaddy.com/js/rating/rating.js':'http://i0.poll.fm/js/rating/rating.js';s=document.getElementsByTagName(c)[0];s.parentNode.insertBefore(pd,s);}}(document,'script','pd-rating-js'));
|
||||
</script>
|
||||
<a href="" onclick="window.open('https://github.com/kubernetes/kubernetes.github.io/issues/new?title=Issue%20with%20' +
|
||||
window.location.pathname)" class="button issue">Create Issue</a>
|
||||
<a href="/editdocs#{{ page.path }}" class="button issue">Edit This Page</a>
|
||||
window.location.pathname)" class="button issue">Create an Issue</a>
|
||||
<a href="/editdocs#{{ page.path }}" class="button issue">Edit this Page</a>
|
||||
{% endif %}
|
||||
</div>
|
||||
</section>
|
||||
|
@ -77,6 +80,34 @@
|
|||
})(window,document,'script','//www.google-analytics.com/analytics.js','ga');
|
||||
ga('create', 'UA-36037335-10', 'auto');
|
||||
ga('send', 'pageview');
|
||||
|
||||
// hide docs nav area if no nav is present, or if nav only contains a link to the current page
|
||||
(function () {
|
||||
window.addEventListener('DOMContentLoaded', init)
|
||||
|
||||
// play nice with our neighbors
|
||||
function init() {
|
||||
window.removeEventListener('DOMContentLoaded', init)
|
||||
hideNav()
|
||||
}
|
||||
|
||||
function hideNav(toc){
|
||||
if (!toc) toc = document.querySelector('#docsToc')
|
||||
var container = toc.querySelector('.container')
|
||||
|
||||
// container is built dynamically, so it may not be present on the first runloop
|
||||
if (container) {
|
||||
if (container.childElementCount === 0 || toc.querySelectorAll('a.item').length === 1) {
|
||||
toc.style.display = 'none'
|
||||
document.getElementById('docsContent').style.width = '100%'
|
||||
}
|
||||
} else {
|
||||
requestAnimationFrame(function () {
|
||||
hideNav(toc)
|
||||
})
|
||||
}
|
||||
}
|
||||
})();
|
||||
</script>
|
||||
<!-- Commenting out AnswerDash for now; we need to work on our list of questions/answers/design first
|
||||
<!-- Start of AnswerDash script <script>var AnswerDash;!function(e,t,n,s,a){if(!t.getElementById(s)){var i,r=t.createElement(n),c=t.getElementsByTagName(n)[0];e[a]||(i=e[a]=function(){i.__oninit.push(arguments)},i.__oninit=[]),r.type="text/javascript",r.async=!0,r.src="https://p1.answerdash.com/answerdash.min.js?siteid=756",r.setAttribute("id",s),c.parentNode.insertBefore(r,c)}}(window,document,"script","answerdash-script","AnswerDash");</script> <!-- End of AnswerDash script -->
|
||||
|
|
|
@ -389,6 +389,14 @@ footer
|
|||
display: block
|
||||
height: 0
|
||||
overflow: hidden
|
||||
|
||||
&.button
|
||||
background-image: none
|
||||
width: auto
|
||||
height: auto
|
||||
|
||||
&:hover
|
||||
color: $blue
|
||||
|
||||
a.twitter
|
||||
background-position: 0 0
|
||||
|
@ -874,8 +882,19 @@ dd
|
|||
img
|
||||
max-width: 100%
|
||||
|
||||
a
|
||||
//font-weight: 700
|
||||
text-decoration: underline
|
||||
|
||||
a:visited
|
||||
color: blueviolet
|
||||
|
||||
a.button
|
||||
border-radius: 2px
|
||||
text-decoration: none
|
||||
|
||||
&:visited
|
||||
color: white
|
||||
|
||||
a.issue
|
||||
margin-left: 20px
|
||||
|
|
|
@ -15,7 +15,7 @@ ul, li
|
|||
ul
|
||||
margin: 0
|
||||
padding: 0
|
||||
|
||||
|
||||
a
|
||||
text-decoration: none
|
||||
|
||||
|
|
|
@ -7,17 +7,20 @@ Add-ons extend the functionality of Kubernetes.
|
|||
|
||||
This page lists some of the available add-ons and links to their respective installation instructions.
|
||||
|
||||
Add-ons in each section are sorted alphabetically - the ordering does not imply any preferential status.
|
||||
|
||||
## Networking and Network Policy
|
||||
|
||||
* [Weave Net](https://github.com/weaveworks/weave-kube) provides networking and network policy, will carry on working on both sides of a network partition, and does not require an external database.
|
||||
* [Calico](http://docs.projectcalico.org/v1.5/getting-started/kubernetes/installation/hosted/) is a secure L3 networking and network policy provider.
|
||||
* [Calico](http://docs.projectcalico.org/v1.6/getting-started/kubernetes/installation/hosted/) is a secure L3 networking and network policy provider.
|
||||
* [Canal](https://github.com/tigera/canal/tree/master/k8s-install/kubeadm) unites Flannel and Calico, providing networking and network policy.
|
||||
* [Flannel](https://github.com/coreos/flannel/blob/master/Documentation/kube-flannel.yml) is a overlay network provider that can be used with Kubernetes.
|
||||
* [Romana](http://romana.io) is a Layer 3 networking solution for pod networks that also supports the [NetworkPolicy API](/docs/user-guide/networkpolicies/). Kubeadm add-on installation details available [here](https://github.com/romana/romana/tree/master/containerize).
|
||||
* [Weave Net](https://github.com/weaveworks/weave-kube) provides networking and network policy, will carry on working on both sides of a network partition, and does not require an external database.
|
||||
|
||||
## Visualization & Control
|
||||
|
||||
* [Weave Scope](https://www.weave.works/documentation/scope-latest-installing/#k8s) is a tool for graphically visualizing your containers, pods, services etc. Use it in conjunction with a [Weave Cloud account](https://cloud.weave.works/) or host the UI yourself.
|
||||
* [Dashboard](https://github.com/kubernetes/dashboard#kubernetes-dashboard) is a dashboard web interface for Kubernetes.
|
||||
* [Weave Scope](https://www.weave.works/documentation/scope-latest-installing/#k8s) is a tool for graphically visualizing your containers, pods, services etc. Use it in conjunction with a [Weave Cloud account](https://cloud.weave.works/) or host the UI yourself.
|
||||
|
||||
## Legacy Add-ons
|
||||
|
||||
|
|
|
@ -124,7 +124,7 @@ With v1.3, the following annotations are deprecated: `pod.beta.kubernetes.io/hos
|
|||
|
||||
## How do I test if it is working?
|
||||
|
||||
### Create a simple Pod to use as a test environment.
|
||||
### Create a simple Pod to use as a test environment
|
||||
|
||||
Create a file named busybox.yaml with the
|
||||
following contents:
|
||||
|
@ -152,7 +152,7 @@ Then create a pod using this file:
|
|||
kubectl create -f busybox.yaml
|
||||
```
|
||||
|
||||
### Wait for this pod to go into the running state.
|
||||
### Wait for this pod to go into the running state
|
||||
|
||||
You can get its status with:
|
||||
```
|
||||
|
@ -165,7 +165,7 @@ NAME READY STATUS RESTARTS AGE
|
|||
busybox 1/1 Running 0 <some-time>
|
||||
```
|
||||
|
||||
### Validate DNS works
|
||||
### Validate that DNS is working
|
||||
|
||||
Once that pod is running, you can exec nslookup in that environment:
|
||||
|
||||
|
@ -185,6 +185,115 @@ Address 1: 10.0.0.1
|
|||
|
||||
If you see that, DNS is working correctly.
|
||||
|
||||
### Troubleshooting Tips
|
||||
|
||||
If the nslookup command fails, check the following:
|
||||
|
||||
#### Check the local DNS configuration first
|
||||
Take a look inside the resolv.conf file. (See "Inheriting DNS from the node" and "Known issues" below for more information)
|
||||
|
||||
```
|
||||
cat /etc/resolv.conf
|
||||
```
|
||||
|
||||
Verify that the search path and name server are set up like the following (note that seach path may vary for different cloud providers):
|
||||
|
||||
```
|
||||
search default.svc.cluster.local svc.cluster.local cluster.local google.internal c.gce_project_id.internal
|
||||
nameserver 10.0.0.10
|
||||
options ndots:5
|
||||
```
|
||||
|
||||
#### Quick diagnosis
|
||||
|
||||
Errors such as the following indicate a problem with the kube-dns add-on or associated Services:
|
||||
|
||||
```
|
||||
$ kubectl exec busybox -- nslookup kubernetes.default
|
||||
Server: 10.0.0.10
|
||||
Address 1: 10.0.0.10
|
||||
|
||||
nslookup: can't resolve 'kubernetes.default'
|
||||
```
|
||||
|
||||
or
|
||||
|
||||
```
|
||||
$ kubectl exec busybox -- nslookup kubernetes.default
|
||||
Server: 10.0.0.10
|
||||
Address 1: 10.0.0.10 kube-dns.kube-system.svc.cluster.local
|
||||
|
||||
nslookup: can't resolve 'kubernetes.default'
|
||||
```
|
||||
|
||||
#### Check if the DNS pod is running
|
||||
|
||||
Use the kubectl get pods command to verify that the DNS pod is running.
|
||||
|
||||
```
|
||||
kubectl get pods --namespace=kube-system -l k8s-app=kube-dns
|
||||
```
|
||||
|
||||
You should see something like:
|
||||
|
||||
```
|
||||
NAME READY STATUS RESTARTS AGE
|
||||
...
|
||||
kube-dns-v19-ezo1y 3/3 Running 0 1h
|
||||
...
|
||||
```
|
||||
|
||||
If you see that no pod is running or that the pod has failed/completed, the dns add-on may not be deployed by default in your current environment and you will have to deploy it manually.
|
||||
|
||||
#### Check for Errors in the DNS pod
|
||||
|
||||
Use `kubectl logs` command to see logs for the DNS daemons.
|
||||
|
||||
```
|
||||
kubectl logs --namespace=kube-system $(kubectl get pods --namespace=kube-system -l k8s-app=kube-dns -o name) -c kubedns
|
||||
kubectl logs --namespace=kube-system $(kubectl get pods --namespace=kube-system -l k8s-app=kube-dns -o name) -c dnsmasq
|
||||
kubectl logs --namespace=kube-system $(kubectl get pods --namespace=kube-system -l k8s-app=kube-dns -o name) -c healthz
|
||||
```
|
||||
|
||||
See if there is any suspicious log. W, E, F letter at the beginning represent Warning, Error and Failure. Please search for entries that have these as the logging level and use [kubernetes issues](https://github.com/kubernetes/kubernetes/issues) to report unexpected errors.
|
||||
|
||||
#### Is dns service up?
|
||||
|
||||
Verify that the DNS service is up by using the `kubectl get service` command.
|
||||
|
||||
```
|
||||
kubectl get svc --namespace=kube-system
|
||||
```
|
||||
|
||||
You should see:
|
||||
|
||||
```
|
||||
NAME CLUSTER-IP EXTERNAL-IP PORT(S) AGE
|
||||
...
|
||||
kube-dns 10.0.0.10 <none> 53/UDP,53/TCP 1h
|
||||
...
|
||||
```
|
||||
|
||||
If you have created the service or in the case it should be created by default but it does not appear, see this [debugging services page](http://kubernetes.io/docs/user-guide/debugging-services/) for more information.
|
||||
|
||||
#### Are dns endpoints exposed?
|
||||
|
||||
You can verify that dns endpoints are exposed by using the `kubectl get endpoints` command.
|
||||
|
||||
```
|
||||
kubectl get ep kube-dns --namespace=kube-system
|
||||
```
|
||||
|
||||
You should see something like:
|
||||
```
|
||||
NAME ENDPOINTS AGE
|
||||
kube-dns 10.180.3.17:53,10.180.3.17:53 1h
|
||||
```
|
||||
|
||||
If you do not see the endpoints, see endpoints section in the [debugging services documentation](http://kubernetes.io/docs/user-guide/debugging-services/).
|
||||
|
||||
For additional Kubernetes DNS examples, see the [cluster-dns examples](https://github.com/kubernetes/kubernetes/tree/master/examples/cluster-dns) in the Kubernetes GitHub repository.
|
||||
|
||||
## Kubernetes Federation (Multiple Zone support)
|
||||
|
||||
Release 1.3 introduced Cluster Federation support for multi-site
|
||||
|
@ -213,6 +322,34 @@ the flag `--cluster-domain=<default local domain>`
|
|||
The Kubernetes cluster DNS server (based off the [SkyDNS](https://github.com/skynetservices/skydns) library)
|
||||
supports forward lookups (A records), service lookups (SRV records) and reverse IP address lookups (PTR records).
|
||||
|
||||
## Inheriting DNS from the node
|
||||
When running a pod, kubelet will prepend the cluster DNS server and search
|
||||
paths to the node's own DNS settings. If the node is able to resolve DNS names
|
||||
specific to the larger environment, pods should be able to, also. See "Known
|
||||
issues" below for a caveat.
|
||||
|
||||
If you don't want this, or if you want a different DNS config for pods, you can
|
||||
use the kubelet's `--resolv-conf` flag. Setting it to "" means that pods will
|
||||
not inherit DNS. Setting it to a valid file path means that kubelet will use
|
||||
this file instead of `/etc/resolv.conf` for DNS inheritance.
|
||||
|
||||
## Known issues
|
||||
Kubernetes installs do not configure the nodes' resolv.conf files to use the
|
||||
cluster DNS by default, because that process is inherently distro-specific.
|
||||
This should probably be implemented eventually.
|
||||
|
||||
Linux's libc is impossibly stuck ([see this bug from
|
||||
2005](https://bugzilla.redhat.com/show_bug.cgi?id=168253)) with limits of just
|
||||
3 DNS `nameserver` records and 6 DNS `search` records. Kubernetes needs to
|
||||
consume 1 `nameserver` record and 3 `search` records. This means that if a
|
||||
local installation already uses 3 `nameserver`s or uses more than 3 `search`es,
|
||||
some of those settings will be lost. As a partial workaround, the node can run
|
||||
`dnsmasq` which will provide more `nameserver` entries, but not more `search`
|
||||
entries. You can also use kubelet's `--resolv-conf` flag.
|
||||
|
||||
If you are using Alpine version 3.3 or earlier as your base image, dns may not
|
||||
work properly owing to a known issue with Alpine. Check [here](https://github.com/kubernetes/kubernetes/issues/30215)
|
||||
for more information.
|
||||
|
||||
## References
|
||||
|
||||
|
|
|
@ -3,6 +3,7 @@ assignees:
|
|||
- mikedanese
|
||||
- luxas
|
||||
- errordeveloper
|
||||
- jbeda
|
||||
|
||||
---
|
||||
|
||||
|
@ -104,17 +105,16 @@ and `--external-etcd-keyfile` flags.
|
|||
|
||||
- `--pod-network-cidr`
|
||||
|
||||
By default, `kubeadm init` does not set node CIDR's for pods and allows you to
|
||||
bring your own networking configuration through a CNI compatible network
|
||||
controller addon such as [Weave Net](https://github.com/weaveworks/weave-kube),
|
||||
[Calico](https://github.com/projectcalico/calico-containers/tree/master/docs/cni/kubernetes/manifests/kubeadm)
|
||||
or [Canal](https://github.com/tigera/canal/tree/master/k8s-install/kubeadm).
|
||||
If you are using a compatible cloud provider or flannel, you can specify a
|
||||
subnet to use for each pod on the cluster with the `--pod-network-cidr` flag.
|
||||
This should be a minimum of a /16 so that kubeadm is able to assign /24 subnets
|
||||
to each node in the cluster.
|
||||
For certain networking solutions the Kubernetes master can also play a role in
|
||||
allocating network ranges (CIDRs) to each node. This includes many cloud providers
|
||||
and flannel. You can specify a subnet range that will be broken down and handed out
|
||||
to each node with the `--pod-network-cidr` flag. This should be a minimum of a /16 so
|
||||
controller-manager is able to assign /24 subnets to each node in the cluster.
|
||||
If you are using flannel with [this manifest](https://github.com/coreos/flannel/blob/master/Documentation/kube-flannel.yml)
|
||||
you should use `--pod-network-cidr=10.244.0.0/16`. Most CNI based networking solutions
|
||||
do not require this flag.
|
||||
|
||||
- `--service-cidr` (default '10.12.0.0/12')
|
||||
- `--service-cidr` (default '10.96.0.0/12')
|
||||
|
||||
You can use the `--service-cidr` flag to override the subnet Kubernetes uses to
|
||||
assign pods IP addresses. If you do, you will also need to update the
|
||||
|
@ -141,7 +141,7 @@ By default, `kubeadm init` automatically generates the token used to initialise
|
|||
each new node. If you would like to manually specify this token, you can use the
|
||||
`--token` flag. The token must be of the format `<6 character string>.<16 character string>`.
|
||||
|
||||
- `--use-kubernetes-version` (default 'v1.4.1') the kubernetes version to initialise
|
||||
- `--use-kubernetes-version` (default 'v1.4.4') the kubernetes version to initialise
|
||||
|
||||
`kubeadm` was originally built for Kubernetes version **v1.4.0**, older versions are not
|
||||
supported. With this flag you can try any future version, e.g. **v1.5.0-beta.1**
|
||||
|
@ -203,6 +203,27 @@ There are some environment variables that modify the way that `kubeadm` works.
|
|||
| `KUBE_COMPONENT_LOGLEVEL` | `--v=4` | Logging configuration for all Kubernetes components |
|
||||
|
||||
|
||||
## Releases and release notes
|
||||
|
||||
If you already have kubeadm installed and want to upgrade, run `apt-get update && apt-get upgrade` or `yum update` to get the latest version of kubeadm.
|
||||
|
||||
- Second release between v1.4 and v1.5: `v1.5.0-alpha.2.421+a6bea3d79b8bba`
|
||||
- Switch to the 10.96.0.0/12 subnet: [#35290](https://github.com/kubernetes/kubernetes/pull/35290)
|
||||
- Fix kubeadm on AWS by including /etc/ssl/certs in the controller-manager [#33681](https://github.com/kubernetes/kubernetes/pull/33681)
|
||||
- The API was refactored and is now componentconfig: [#33728](https://github.com/kubernetes/kubernetes/pull/33728), [#34147](https://github.com/kubernetes/kubernetes/pull/34147) and [#34555](https://github.com/kubernetes/kubernetes/pull/34555)
|
||||
- Allow kubeadm to get config options from a file: [#34501](https://github.com/kubernetes/kubernetes/pull/34501), [#34885](https://github.com/kubernetes/kubernetes/pull/34885) and [#34891](https://github.com/kubernetes/kubernetes/pull/34891)
|
||||
- Implement preflight checks: [#34341](https://github.com/kubernetes/kubernetes/pull/34341) and [#35843](https://github.com/kubernetes/kubernetes/pull/35843)
|
||||
- Using kubernetes v1.4.4 by default: [#34419](https://github.com/kubernetes/kubernetes/pull/34419) and [#35270](https://github.com/kubernetes/kubernetes/pull/35270)
|
||||
- Make api and discovery ports configurable and default to 6443: [#34719](https://github.com/kubernetes/kubernetes/pull/34719)
|
||||
- Implement kubeadm reset: [#34807](https://github.com/kubernetes/kubernetes/pull/34807)
|
||||
- Make kubeadm poll/wait for endpoints instead of directly fail when the master isn't available [#34703](https://github.com/kubernetes/kubernetes/pull/34703) and [#34718](https://github.com/kubernetes/kubernetes/pull/34718)
|
||||
- Allow empty directories in the directory preflight check: [#35632](https://github.com/kubernetes/kubernetes/pull/35632)
|
||||
- Started adding unit tests: [#35231](https://github.com/kubernetes/kubernetes/pull/35231), [#35326](https://github.com/kubernetes/kubernetes/pull/35326) and [#35332](https://github.com/kubernetes/kubernetes/pull/35332)
|
||||
- Various enhancements: [#35075](https://github.com/kubernetes/kubernetes/pull/35075), [#35111](https://github.com/kubernetes/kubernetes/pull/35111), [#35119](https://github.com/kubernetes/kubernetes/pull/35119), [#35124](https://github.com/kubernetes/kubernetes/pull/35124), [#35265](https://github.com/kubernetes/kubernetes/pull/35265) and [#35777](https://github.com/kubernetes/kubernetes/pull/35777)
|
||||
- Bug fixes: [#34352](https://github.com/kubernetes/kubernetes/pull/34352), [#34558](https://github.com/kubernetes/kubernetes/pull/34558), [#34573](https://github.com/kubernetes/kubernetes/pull/34573), [#34834](https://github.com/kubernetes/kubernetes/pull/34834), [#34607](https://github.com/kubernetes/kubernetes/pull/34607), [#34907](https://github.com/kubernetes/kubernetes/pull/34907) and [#35796](https://github.com/kubernetes/kubernetes/pull/35796)
|
||||
- Initial v1.4 release: `v1.5.0-alpha.0.1534+cf7301f16c0363`
|
||||
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
* Some users on RHEL/CentOS 7 have reported issues with traffic being routed incorrectly due to iptables being bypassed. You should ensure `net.bridge.bridge-nf-call-iptables` is set to 1 in your sysctl config, eg.
|
||||
|
|
|
@ -44,7 +44,11 @@ In addition to the CNI plugin specified by the configuration file, Kubernetes re
|
|||
|
||||
### kubenet
|
||||
|
||||
The Linux-only kubenet plugin provides functionality similar to the `--configure-cbr0` kubelet command-line option. It creates a Linux bridge named `cbr0` and creates a veth pair for each pod with the host end of each pair connected to `cbr0`. The pod end of the pair is assigned an IP address allocated from a range assigned to the node either through configuration or by the controller-manager. `cbr0` is assigned an MTU matching the smallest MTU of an enabled normal interface on the host. The kubenet plugin is currently mutually exclusive with, and will eventually replace, the --configure-cbr0 option. It is also currently incompatible with the flannel experimental overlay.
|
||||
Kubenet is a very basic, simple network plugin, on Linux only. It does not, of itself, implement more advanced features like cross-node networking or network policy. It is typically used together with a cloud provider that sets up routing rules for communication between nodes, or in single-node environments.
|
||||
|
||||
Kubenet creates a Linux bridge named `cbr0` and creates a veth pair for each pod with the host end of each pair connected to `cbr0`. The pod end of the pair is assigned an IP address allocated from a range assigned to the node either through configuration or by the controller-manager. `cbr0` is assigned an MTU matching the smallest MTU of an enabled normal interface on the host.
|
||||
|
||||
The kubenet plugin is mutually exclusive with the --configure-cbr0 option.
|
||||
|
||||
The plugin requires a few things:
|
||||
|
||||
|
|
|
@ -83,7 +83,7 @@ talk to other VMs in your project. This is the same basic model.
|
|||
Until now this document has talked about containers. In reality, Kubernetes
|
||||
applies IP addresses at the `Pod` scope - containers within a `Pod` share their
|
||||
network namespaces - including their IP address. This means that containers
|
||||
within a `Pod` can all reach each other’s ports on `localhost`. This does imply
|
||||
within a `Pod` can all reach each other's ports on `localhost`. This does imply
|
||||
that containers within a `Pod` must coordinate port usage, but this is no
|
||||
different than processes in a VM. We call this the "IP-per-pod" model. This
|
||||
is implemented in Docker as a "pod container" which holds the network namespace
|
||||
|
@ -100,8 +100,19 @@ existence or non-existence of host ports.
|
|||
There are a number of ways that this network model can be implemented. This
|
||||
document is not an exhaustive study of the various methods, but hopefully serves
|
||||
as an introduction to various technologies and serves as a jumping-off point.
|
||||
If some techniques become vastly preferable to others, we might detail them more
|
||||
here.
|
||||
|
||||
The following networking options are sorted alphabetically - the order does not
|
||||
imply any preferential status.
|
||||
|
||||
### Contiv
|
||||
|
||||
[Contiv](https://github.com/contiv/netplugin) provides configurable networking (native l3 using BGP, overlay using vxlan, classic l2, or Cisco-SDN/ACI) for various use cases. [Contiv](http://contiv.io) is all open sourced.
|
||||
|
||||
### Flannel
|
||||
|
||||
[Flannel](https://github.com/coreos/flannel#flannel) is a very simple overlay
|
||||
network that satisfies the Kubernetes requirements. Many
|
||||
people have reported success with Flannel and Kubernetes.
|
||||
|
||||
### Google Compute Engine (GCE)
|
||||
|
||||
|
@ -158,29 +169,12 @@ Follow the "With Linux Bridge devices" section of [this very nice
|
|||
tutorial](http://blog.oddbit.com/2014/08/11/four-ways-to-connect-a-docker/) from
|
||||
Lars Kellogg-Stedman.
|
||||
|
||||
### Weave Net from Weaveworks
|
||||
|
||||
[Weave Net](https://www.weave.works/products/weave-net/) is a
|
||||
resilient and simple to use network for Kubernetes and its hosted applications.
|
||||
Weave Net runs as a [CNI plug-in](https://www.weave.works/docs/net/latest/cni-plugin/)
|
||||
or stand-alone. In either version, it doesn’t require any configuration or extra code
|
||||
to run, and in both cases, the network provides one IP address per pod - as is standard for Kubernetes.
|
||||
|
||||
|
||||
### Flannel
|
||||
|
||||
[Flannel](https://github.com/coreos/flannel#flannel) is a very simple overlay
|
||||
network that satisfies the Kubernetes requirements. It installs in minutes and
|
||||
should get you up and running if the above techniques are not working. Many
|
||||
people have reported success with Flannel and Kubernetes.
|
||||
|
||||
### OpenVSwitch
|
||||
|
||||
[OpenVSwitch](/docs/admin/ovs-networking) is a somewhat more mature but also
|
||||
complicated way to build an overlay network. This is endorsed by several of the
|
||||
"Big Shops" for networking.
|
||||
|
||||
|
||||
### Project Calico
|
||||
|
||||
[Project Calico](https://github.com/projectcalico/calico-containers/blob/master/docs/cni/kubernetes/README.md) is an open source container networking provider and network policy engine.
|
||||
|
@ -193,9 +187,13 @@ Calico can also be run in policy enforcement mode in conjunction with other netw
|
|||
|
||||
[Romana](http://romana.io) is an open source network and security automation solution that lets you deploy Kubernetes without an overlay network. Romana supports Kubernetes [Network Policy](/docs/user-guide/networkpolicies/) to provide isolation across network namespaces.
|
||||
|
||||
### Contiv
|
||||
### Weave Net from Weaveworks
|
||||
|
||||
[Contiv](https://github.com/contiv/netplugin) provides configurable networking (native l3 using BGP, overlay using vxlan, classic l2, or Cisco-SDN/ACI) for various use cases. [Contiv](http://contiv.io) is all open sourced.
|
||||
[Weave Net](https://www.weave.works/products/weave-net/) is a
|
||||
resilient and simple to use network for Kubernetes and its hosted applications.
|
||||
Weave Net runs as a [CNI plug-in](https://www.weave.works/docs/net/latest/cni-plugin/)
|
||||
or stand-alone. In either version, it doesn't require any configuration or extra code
|
||||
to run, and in both cases, the network provides one IP address per pod - as is standard for Kubernetes.
|
||||
|
||||
## Other reading
|
||||
|
||||
|
|
43
docs/api.md
|
@ -95,46 +95,3 @@ DaemonSets, Deployments, HorizontalPodAutoscalers, Ingress, Jobs and ReplicaSets
|
|||
Other extensions resources can be enabled by setting runtime-config on
|
||||
apiserver. runtime-config accepts comma separated values. For ex: to disable deployments and jobs, set
|
||||
`--runtime-config=extensions/v1beta1/deployments=false,extensions/v1beta1/jobs=false`
|
||||
|
||||
## v1beta1, v1beta2, and v1beta3 are deprecated; please move to v1 ASAP
|
||||
|
||||
As of June 4, 2015, the Kubernetes v1 API has been enabled by default. The v1beta1 and v1beta2 APIs were deleted on June 1, 2015. v1beta3 is planned to be deleted on July 6, 2015.
|
||||
|
||||
### v1 conversion tips (from v1beta3)
|
||||
|
||||
We're working to convert all documentation and examples to v1. Use `kubectl create --validate` in order to validate your json or yaml against our Swagger spec.
|
||||
|
||||
Changes to services are the most significant difference between v1beta3 and v1.
|
||||
|
||||
* The `service.spec.portalIP` property is renamed to `service.spec.clusterIP`.
|
||||
* The `service.spec.createExternalLoadBalancer` property is removed. Specify `service.spec.type: "LoadBalancer"` to create an external load balancer instead.
|
||||
* The `service.spec.publicIPs` property is deprecated and now called `service.spec.deprecatedPublicIPs`. This property will be removed entirely when v1beta3 is removed. The vast majority of users of this field were using it to expose services on ports on the node. Those users should specify `service.spec.type: "NodePort"` instead. Read [External Services](/docs/user-guide/services/#external-services) for more info. If this is not sufficient for your use case, please file an issue or contact @thockin.
|
||||
|
||||
Some other difference between v1beta3 and v1:
|
||||
|
||||
* The `pod.spec.containers[*].privileged` and `pod.spec.containers[*].capabilities` properties are now nested under the `pod.spec.containers[*].securityContext` property. See [Security Contexts](/docs/user-guide/security-context).
|
||||
* The `pod.spec.host` property is renamed to `pod.spec.nodeName`.
|
||||
* The `endpoints.subsets[*].addresses.IP` property is renamed to `endpoints.subsets[*].addresses.ip`.
|
||||
* The `pod.status.containerStatuses[*].state.termination` and `pod.status.containerStatuses[*].lastState.termination` properties are renamed to `pod.status.containerStatuses[*].state.terminated` and `pod.status.containerStatuses[*].lastState.terminated` respectively.
|
||||
* The `pod.status.Condition` property is renamed to `pod.status.conditions`.
|
||||
* The `status.details.id` property is renamed to `status.details.name`.
|
||||
|
||||
### v1beta3 conversion tips (from v1beta1/2)
|
||||
|
||||
Some important differences between v1beta1/2 and v1beta3:
|
||||
|
||||
* The resource `id` is now called `name`.
|
||||
* `name`, `labels`, `annotations`, and other metadata are now nested in a map called `metadata`
|
||||
* `desiredState` is now called `spec`, and `currentState` is now called `status`
|
||||
* `/minions` has been moved to `/nodes`, and the resource has kind `Node`
|
||||
* The namespace is required (for all namespaced resources) and has moved from a URL parameter to the path: `/api/v1beta3/namespaces/{namespace}/{resource_collection}/{resource_name}`. If you were not using a namespace before, use `default` here.
|
||||
* The names of all resource collections are now lower cased - instead of `replicationControllers`, use `replicationcontrollers`.
|
||||
* To watch for changes to a resource, open an HTTP or Websocket connection to the collection query and provide the `?watch=true` query parameter along with the desired `resourceVersion` parameter to watch from.
|
||||
* The `labels` query parameter has been renamed to `labelSelector`.
|
||||
* The `fields` query parameter has been renamed to `fieldSelector`.
|
||||
* The container `entrypoint` has been renamed to `command`, and `command` has been renamed to `args`.
|
||||
* Container, volume, and node resources are expressed as nested maps (e.g., `resources{cpu:1}`) rather than as individual fields, and resource values support [scaling suffixes](/docs/user-guide/compute-resources/#specifying-resource-quantities) rather than fixed scales (e.g., milli-cores).
|
||||
* Restart policy is represented simply as a string (e.g., `"Always"`) rather than as a nested map (`always{}`).
|
||||
* Pull policies changed from `PullAlways`, `PullNever`, and `PullIfNotPresent` to `Always`, `Never`, and `IfNotPresent`.
|
||||
* The volume `source` is inlined into `volume` rather than nested.
|
||||
* Host volumes have been changed from `hostDir` to `hostPath` to better reflect that they can be files or directories.
|
|
@ -1,6 +1,4 @@
|
|||
---
|
||||
redirect_from:
|
||||
- /editdocs/
|
||||
---
|
||||
|
||||
{% capture overview %}
|
||||
|
@ -46,7 +44,7 @@ choose the
|
|||
[page type](/docs/contribute/page-templates/)
|
||||
that is the best fit for your content.
|
||||
|
||||
### Submitting a pull request to the master branch
|
||||
### Submitting a pull request to the master branch (Current Release)
|
||||
|
||||
If you want your change to be published in the released version Kubernetes docs,
|
||||
create a pull request against the master branch of the Kubernetes
|
||||
|
@ -64,7 +62,7 @@ site where you can verify that your changes have rendered correctly.
|
|||
If needed, revise your pull request by committing changes to your
|
||||
new branch in your fork.
|
||||
|
||||
### Submitting a pull request to the <vnext> branch
|
||||
### Submitting a pull request to the <vnext> branch (Upcoming Release)
|
||||
|
||||
If your documentation change should not be released until the next release of
|
||||
the Kubernetes product, create a pull request against the <vnext> branch
|
||||
|
|
|
@ -0,0 +1,203 @@
|
|||
---
|
||||
---
|
||||
|
||||
{% capture overview %}
|
||||
This page gives writing style guidelines for the Kubernetes documentation.
|
||||
These are guidelines, not rules. Use your best judgment, and feel free to
|
||||
propose changes to this document in a pull request.
|
||||
|
||||
For additional information on creating new content for the Kubernetes
|
||||
docs, follow the instructions on
|
||||
[using page templates](/docs/contribute/page-templates/) and
|
||||
[creating a documentation pull request](/docs/contribute/create-pull-request/).
|
||||
{% endcapture %}
|
||||
|
||||
{% capture body %}
|
||||
|
||||
## Documentation formatting standards
|
||||
|
||||
### Capitalize API objects
|
||||
|
||||
Capitalize the names of API objects. Refer to API objects without saying
|
||||
"object."
|
||||
|
||||
<table>
|
||||
<tr><th>Do</th><th>Don't</th></tr>
|
||||
<tr><td>The Pod has two Containers.</td><td>The pod has two containers.</td></tr>
|
||||
<tr><td>The Deployment is responsible for ...</td><td>The Deployment object is responsible for ...</td></tr>
|
||||
</table>
|
||||
|
||||
### Use angle brackets for placeholders
|
||||
|
||||
Use angle brackets for placeholders. Tell the reader what a placeholder
|
||||
represents.
|
||||
|
||||
1. Display information about a pod:
|
||||
|
||||
kubectl describe pod <pod-name>
|
||||
|
||||
where `<pod-name>` is the name of one of your pods.
|
||||
|
||||
### Use bold for user interface elements
|
||||
|
||||
<table>
|
||||
<tr><th>Do</th><th>Don't</th></tr>
|
||||
<tr><td>Click <b>Fork</b>.</td><td>Click "Fork".</td></tr>
|
||||
<tr><td>Select <b>Other</b>.</td><td>Select 'Other'.</td></tr>
|
||||
</table>
|
||||
|
||||
### Use italics to define or introduce new terms
|
||||
|
||||
<table>
|
||||
<tr><th>Do</th><th>Don't</th></tr>
|
||||
<tr><td>A <i>cluster</i> is a set of nodes ...</td><td>A "cluster" is a set of nodes ...</td></tr>
|
||||
<tr><td>These components form the <i>control plane.</i></td><td>These components form the <b>control plane.</b></td></tr>
|
||||
</table>
|
||||
|
||||
### Use code style for filenames, directories, and paths
|
||||
|
||||
<table>
|
||||
<tr><th>Do</th><th>Don't</th></tr>
|
||||
<tr><td>Open the <code>envars.yaml</code> file.</td><td>Open the envars.yaml file.</td></tr>
|
||||
<tr><td>Go to the <code>/docs/tutorials</code> directory.</td><td>Go to the /docs/tutorials directory.</td></tr>
|
||||
<tr><td>Open the <code>/_data/concepts.yaml</code> file.</td><td>Open the /_data/concepts.yaml file.</td></tr>
|
||||
</table>
|
||||
|
||||
## Code snippet formatting
|
||||
|
||||
### Use code style for inline code and commands
|
||||
|
||||
For inline code in an HTML document, use the `<code>` tag. In a Markdown
|
||||
document, use the backtick (`).
|
||||
|
||||
<table>
|
||||
<tr><th>Do</th><th>Don't</th></tr>
|
||||
<tr><td>Set the value of the <code>replicas</code> field in the configuration file.</td><td>Set the value of the "replicas" field in the configuration file.</td></tr>
|
||||
<tr><td>The <code>kubectl run</code> command creates a Deployment.</td><td>The "kubectl run" command creates a Deployment.</td></tr>
|
||||
</table>
|
||||
|
||||
### Don't include the command prompt
|
||||
|
||||
<table>
|
||||
<tr><th>Do</th><th>Don't</th></tr>
|
||||
<tr><td>kubectl get pods</td><td>$ kubectl get pods</td></tr>
|
||||
</table>
|
||||
|
||||
### Separate commands from output
|
||||
|
||||
Verify that the pod is running on your chosen node:
|
||||
|
||||
kubectl get pods --output=wide
|
||||
|
||||
The output is similar to this:
|
||||
|
||||
NAME READY STATUS RESTARTS AGE IP NODE
|
||||
nginx 1/1 Running 0 13s 10.200.0.4 worker0
|
||||
|
||||
|
||||
{% comment %}## Kubernetes.io word list
|
||||
|
||||
A list of Kubernetes-specific terms and words to be used consistently across the site.
|
||||
|
||||
<table>
|
||||
<tr><th>Term</th><th>Useage</th></tr>
|
||||
<tr><td>TBD</td><td>TBD</td></tr>
|
||||
</table>{% endcomment %}
|
||||
|
||||
|
||||
## Content best practices
|
||||
|
||||
This section contains suggested best practices for clear, concise, and consistent content.
|
||||
|
||||
### Use present tense
|
||||
|
||||
<table>
|
||||
<tr><th>Do</th><th>Don't</th></tr>
|
||||
<tr><td>This command starts a proxy.</td><td>This command will start a proxy.</td></tr>
|
||||
</table>
|
||||
|
||||
Exception: Use future or past tense if it is required to convey the correct
|
||||
meaning.
|
||||
|
||||
### Use active voice
|
||||
|
||||
<table>
|
||||
<tr><th>Do</th><th>Don't</th></tr>
|
||||
<tr><td>You can explore the API using a browser.</td><td>The API can be explored using a browser.</td></tr>
|
||||
<tr><td>The YAML file specifies the replica count.</td><td>The replica count is specified in the YAML file.</td></tr>
|
||||
</table>
|
||||
|
||||
Exception: Use passive voice if active voice leads to an awkward construction.
|
||||
|
||||
### Use simple and direct language
|
||||
|
||||
Use simple and direct language. Avoid using unnecessary phrases, such as saying "please."
|
||||
|
||||
<table>
|
||||
<tr><th>Do</th><th>Don't</th></tr>
|
||||
<tr><td>To create a ReplicaSet, ...</td><td>In order to create a ReplicaSet, ...</td></tr>
|
||||
<tr><td>See the configuration file.</td><td>Please see the configuration file.</td></tr>
|
||||
<tr><td>View the Pods.</td><td>With this next command, we'll view the Pods.</td></tr>
|
||||
|
||||
</table>
|
||||
|
||||
### Address the reader as "you"
|
||||
|
||||
<table>
|
||||
<tr><th>Do</th><th>Don't</th></tr>
|
||||
<tr><td>You can create a Deployment by ...</td><td>We'll create a Deployment by ...</td></tr>
|
||||
<tr><td>In the preceding output, you can see...</td><td>In the preceding output, we can see ...</td></tr>
|
||||
</table>
|
||||
|
||||
## Patterns to avoid
|
||||
|
||||
### Avoid using "we"
|
||||
|
||||
Using "we" in a sentence can be confusing, because the reader might not know
|
||||
whether they're part of the "we" you're describing.
|
||||
|
||||
<table>
|
||||
<tr><th>Do</th><th>Don't</th></tr>
|
||||
<tr><td>Version 1.4 includes ...</td><td>In version 1.4, we have added ...</td></tr>
|
||||
<tr><td>Kubernetes provides a new feature for ...</td><td>We provide a new feature ...</td></tr>
|
||||
<tr><td>This page teaches you how to use pods.</td><td>In this page, we are going to learn about pods.</td></tr>
|
||||
</table>
|
||||
|
||||
### Avoid jargon and idioms
|
||||
|
||||
Some readers speak English as a second language. Avoid jargon and idioms to help make their understanding easier.
|
||||
|
||||
<table>
|
||||
<tr><th>Do</th><th>Don't</th></tr>
|
||||
<tr><td>Internally, ...</td><td>Under the hood, ...</td></tr>
|
||||
<tr><td>Create a new cluster.</td><td>Turn up a new cluster.</td></tr>
|
||||
</table>
|
||||
|
||||
### Avoid statements about the future
|
||||
|
||||
Avoid making promises or giving hints about the future. If you need to talk about
|
||||
an alpha feature, put the text under a heading that identifies it as alpha
|
||||
information.
|
||||
|
||||
### Avoid statements that will soon be out of date
|
||||
|
||||
Avoid words like "currently" and "new." A feature that is new today might not be
|
||||
considered new in a few months.
|
||||
|
||||
<table>
|
||||
<tr><th>Do</th><th>Don't</th></tr>
|
||||
<tr><td>In version 1.4, ...</td><td>In the current version, ...</td></tr>
|
||||
<tr><td>The Federation feature provides ...</td><td>The new Federation feature provides ...</td></tr>
|
||||
</table>
|
||||
|
||||
{% endcapture %}
|
||||
|
||||
|
||||
{% capture whatsnext %}
|
||||
* Learn about [writing a new topic](/docs/contribute/write-new-topic/).
|
||||
* Learn about [using page templates](/docs/contribute/page-templates/).
|
||||
* Learn about [staging your changes](/docs/contribute/stage-documentation-changes/)
|
||||
* Learn about [creating a pull request](/docs/contribute/create-pull-request/).
|
||||
{% endcapture %}
|
||||
|
||||
{% include templates/concept.md %}
|
|
@ -13,7 +13,7 @@ li>.highlighter-rouge {position:relative; top:3px;}
|
|||
|
||||
## Overview
|
||||
|
||||
This quickstart shows you how to easily install a secure Kubernetes cluster on machines running Ubuntu 16.04 or CentOS 7.
|
||||
This quickstart shows you how to easily install a secure Kubernetes cluster on machines running Ubuntu 16.04, CentOS 7 or HypriotOS v1.0.1+.
|
||||
The installation uses a tool called `kubeadm` which is part of Kubernetes 1.4.
|
||||
|
||||
This process works with local VMs, physical servers and/or cloud servers.
|
||||
|
@ -23,7 +23,7 @@ See the full [`kubeadm` reference](/docs/admin/kubeadm) for information on all `
|
|||
|
||||
**The `kubeadm` tool is currently in alpha but please try it out and give us [feedback](/docs/getting-started-guides/kubeadm/#feedback)!
|
||||
Be sure to read the [limitations](#limitations); in particular note that kubeadm doesn't have great support for
|
||||
automatically configuring cloud providers. Please refer to the specific cloud provider documentation or
|
||||
automatically configuring cloud providers. Please refer to the specific cloud provider documentation or
|
||||
use another provisioning system.**
|
||||
|
||||
kubeadm assumes you have a set of machines (virtual or real) that are up and running. It is designed
|
||||
|
@ -38,7 +38,7 @@ If you are not constrained, other tools build on kubeadm to give you complete cl
|
|||
|
||||
## Prerequisites
|
||||
|
||||
1. One or more machines running Ubuntu 16.04, CentOS 7 or HypriotOS v1.0.1
|
||||
1. One or more machines running Ubuntu 16.04, CentOS 7 or HypriotOS v1.0.1+
|
||||
1. 1GB or more of RAM per machine (any less will leave little room for your apps)
|
||||
1. Full network connectivity between all machines in the cluster (public or private network is fine)
|
||||
|
||||
|
@ -61,6 +61,9 @@ You will install the following packages on all the machines:
|
|||
You will only need this on the master, but it can be useful to have on the other nodes as well.
|
||||
* `kubeadm`: the command to bootstrap the cluster.
|
||||
|
||||
NOTE: If you already have kubeadm installed, you should do a `apt-get update && apt-get upgrade` or `yum update` to get the latest version of kubeadm.
|
||||
See the reference doc if you want to read about the different [kubeadm releases](/docs/admin/kubeadm)
|
||||
|
||||
For each host in turn:
|
||||
|
||||
* SSH into the machine and become `root` if you are not already (for example, run `sudo su -`).
|
||||
|
@ -94,7 +97,7 @@ For each host in turn:
|
|||
|
||||
The kubelet is now restarting every few seconds, as it waits in a crashloop for `kubeadm` to tell it what to do.
|
||||
|
||||
Note: `setenforce 0` will no longer be necessary on CentOS once [#33555](https://github.com/kubernetes/kubernetes/pull/33555) is included in a released version of `kubeadm`.
|
||||
Note: To disable SELinux by running `setenforce 0` is required in order to allow containers to access the host filesystem, which is required by pod networks for example. You have to do this until kubelet can handle SELinux better.
|
||||
|
||||
### (2/4) Initializing your master
|
||||
|
||||
|
@ -103,6 +106,8 @@ All of these components run in pods started by `kubelet`.
|
|||
|
||||
Right now you can't run `kubeadm init` twice without tearing down the cluster in between, see [Tear down](#tear-down).
|
||||
|
||||
If you try to run `kubeadm init` and your machine is in a state that is incompatible with starting a Kubernetes cluster, `kubeadm` will warn you about things that might not work or it will error out for unsatisfied mandatory requirements.
|
||||
|
||||
To initialize the master, pick one of the machines you previously installed `kubelet` and `kubeadm` on, and run:
|
||||
|
||||
# kubeadm init
|
||||
|
@ -110,7 +115,7 @@ To initialize the master, pick one of the machines you previously installed `kub
|
|||
**Note:** this will autodetect the network interface to advertise the master on as the interface with the default gateway.
|
||||
If you want to use a different interface, specify `--api-advertise-addresses=<ip-address>` argument to `kubeadm init`.
|
||||
|
||||
If you want to use [flannel](https://github.com/coreos/flannel) as the pod network; specify `--pod-network-cidr=10.244.0.0/16` if you're using the daemonset manifest below. _However, please note that this is not required for any other networks, including Weave, which is the recommended pod network._
|
||||
If you want to use [flannel](https://github.com/coreos/flannel) as the pod network, specify `--pod-network-cidr=10.244.0.0/16` if you're using the daemonset manifest below. _However, please note that this is not required for any other networks besides Flannel._
|
||||
|
||||
Please refer to the [kubeadm reference doc](/docs/admin/kubeadm/) if you want to read more about the flags `kubeadm init` provides.
|
||||
|
||||
|
@ -201,16 +206,27 @@ For example:
|
|||
|
||||
A few seconds later, you should notice that running `kubectl get nodes` on the master shows a cluster with as many machines as you created.
|
||||
|
||||
### (Optional) Control your cluster from machines other than the master
|
||||
Note that there currently isn't a out-of-the-box way of connecting to the Master's API Server via `kubectl` from a node. Read issue [#35729](https://github.com/kubernetes/kubernetes/issues/35729) for more details.
|
||||
|
||||
### (Optional) Controlling your cluster from machines other than the master
|
||||
|
||||
In order to get a kubectl on your laptop for example to talk to your cluster, you need to copy the `KubeConfig` file from your master to your laptop like this:
|
||||
|
||||
# scp root@<master ip>:/etc/kubernetes/admin.conf .
|
||||
# kubectl --kubeconfig ./admin.conf get nodes
|
||||
|
||||
### (Optional) Connecting to the API Server
|
||||
|
||||
If you want to connect to the API Server for viewing the dashboard (note: not deployed by default) from outside the cluster for example, you can use `kubectl proxy`:
|
||||
|
||||
# scp root@<master ip>:/etc/kubernetes/admin.conf .
|
||||
# kubectl --kubeconfig ./admin.conf proxy
|
||||
|
||||
You can now access the API Server locally at `http://localhost:8001/api/v1`
|
||||
|
||||
### (Optional) Installing a sample application
|
||||
|
||||
As an example, install a sample microservices application, a socks shop, to put your cluster through its paces.
|
||||
As an example, install a sample microservices application, a socks shop, to put your cluster through its paces. Note that this demo does only work on `amd64`.
|
||||
To learn more about the sample microservices app, see the [GitHub README](https://github.com/microservices-demo/microservices-demo).
|
||||
|
||||
# kubectl create namespace sock-shop
|
||||
|
@ -242,17 +258,11 @@ If there is a firewall, make sure it exposes this port to the internet before yo
|
|||
|
||||
* To uninstall the socks shop, run `kubectl delete namespace sock-shop` on the master.
|
||||
|
||||
* To undo what `kubeadm` did, simply delete the machines you created for this tutorial, or run the script below and then start over or uninstall the packages.
|
||||
* To undo what `kubeadm` did, simply run:
|
||||
|
||||
# kubeadm reset
|
||||
|
||||
<br>
|
||||
Reset local state:
|
||||
<pre><code>systemctl stop kubelet;
|
||||
docker rm -f -v $(docker ps -q);
|
||||
find /var/lib/kubelet | xargs -n 1 findmnt -n -t tmpfs -o TARGET -T | uniq | xargs -r umount -v;
|
||||
rm -r -f /etc/kubernetes /var/lib/kubelet /var/lib/etcd;
|
||||
</code></pre>
|
||||
If you wish to start over, run `systemctl start kubelet` followed by `kubeadm init` or `kubeadm join`.
|
||||
<!-- *syntax-highlighting-hack -->
|
||||
|
||||
## Explore other add-ons
|
||||
|
||||
|
@ -275,19 +285,22 @@ kubeadm deb packages and binaries are built for amd64, arm and arm64, following
|
|||
|
||||
deb-packages are released for ARM and ARM 64-bit, but not RPMs (yet, reach out if there's interest).
|
||||
|
||||
Anyway, ARM had some issues when making v1.4, see [#32517](https://github.com/kubernetes/kubernetes/pull/32517) [#33485](https://github.com/kubernetes/kubernetes/pull/33485), [#33117](https://github.com/kubernetes/kubernetes/pull/33117) and [#33376](https://github.com/kubernetes/kubernetes/pull/33376).
|
||||
ARM had some issues when making v1.4, see [#32517](https://github.com/kubernetes/kubernetes/pull/32517) [#33485](https://github.com/kubernetes/kubernetes/pull/33485), [#33117](https://github.com/kubernetes/kubernetes/pull/33117) and [#33376](https://github.com/kubernetes/kubernetes/pull/33376).
|
||||
|
||||
However, thanks to the PRs above, `kube-apiserver` works on ARM from the `v1.4.1` release, so make sure you're at least using `v1.4.1` when running on ARM 32-bit
|
||||
|
||||
The multiarch flannel daemonset can be installed this way. Make sure you replace `ARCH=amd64` with `ARCH=arm` or `ARCH=arm64` if necessary.
|
||||
The multiarch flannel daemonset can be installed this way.
|
||||
|
||||
# ARCH=amd64 curl -sSL https://raw.githubusercontent.com/luxas/flannel/update-daemonset/Documentation/kube-flannel.yml | sed "s/amd64/${ARCH}/g" | kubectl create -f -
|
||||
# export ARCH=amd64
|
||||
# curl -sSL "https://github.com/coreos/flannel/blob/master/Documentation/kube-flannel.yml?raw=true" | sed "s/amd64/${ARCH}/g" | kubectl create -f -
|
||||
|
||||
And obviously replace `ARCH=amd64` with `ARCH=arm` or `ARCH=arm64` depending on the platform you're running on.
|
||||
Replace `ARCH=amd64` with `ARCH=arm` or `ARCH=arm64` depending on the platform you're running on.
|
||||
Note that the Raspberry Pi 3 is in ARM 32-bit mode, so for RPi 3 you should set `ARCH` to `arm`, not `arm64`.
|
||||
|
||||
## Limitations
|
||||
|
||||
Please note: `kubeadm` is a work in progress and these limitations will be addressed in due course.
|
||||
Also you can take a look at the troubleshooting section in the [reference document](/docs/admin/kubeadm/#troubleshooting)
|
||||
|
||||
1. The cluster created here doesn't have cloud-provider integrations by default, so for example it doesn't work automatically with (for example) [Load Balancers](/docs/user-guide/load-balancer/) (LBs) or [Persistent Volumes](/docs/user-guide/persistent-volumes/walkthrough/) (PVs).
|
||||
To set up kubeadm with CloudProvider integrations (it's experimental, but try), refer to the [kubeadm reference](/docs/admin/kubeadm/) document.
|
||||
|
@ -302,6 +315,15 @@ Please note: `kubeadm` is a work in progress and these limitations will be addre
|
|||
1. `kubectl logs` is broken with `kubeadm` clusters due to [#22770](https://github.com/kubernetes/kubernetes/issues/22770).
|
||||
|
||||
Workaround: use `docker logs` on the nodes where the containers are running as a workaround.
|
||||
1. The HostPort functionality does not work with kubeadm due to that CNI networking is used, see issue [#31307](https://github.com/kubernetes/kubernetes/issues/31307).
|
||||
|
||||
Workaround: use the [NodePort feature of services](/docs/user-guide/services/#type-nodeport) instead, or use HostNetwork.
|
||||
1. A running `firewalld` service may conflict with kubeadm, so if you want to run `kubeadm`, you should disable `firewalld` until issue [#35535](https://github.com/kubernetes/kubernetes/issues/35535) is resolved.
|
||||
|
||||
Workaround: Disable `firewalld` or configure it to allow Kubernetes the pod and service cidrs.
|
||||
1. If you see errors like `etcd cluster unavailable or misconfigured`, it's because of high load on the machine which makes the `etcd` container a bit unresponsive (it might miss some requests) and therefore kubelet will restart it. This will get better with `etcd3`.
|
||||
|
||||
Workaround: Set `failureThreshold` in `/etc/kubernetes/manifests/etcd.json` to a larger value.
|
||||
|
||||
1. If you are using VirtualBox (directly or via Vagrant), you will need to ensure that `hostname -i` returns a routable IP address (i.e. one on the second network interface, not the first one).
|
||||
By default, it doesn't do this and kubelet ends-up using first non-loopback network interface, which is usually NATed.
|
||||
|
|
|
@ -0,0 +1,110 @@
|
|||
---
|
||||
---
|
||||
|
||||
<style>
|
||||
li>.highlighter-rouge {position:relative; top:3px;}
|
||||
</style>
|
||||
|
||||
## Overview
|
||||
|
||||
kubectl is the command line tool you use to interact with Kubernetes clusters.
|
||||
|
||||
You should use a version of kubectl that is at least as new as your server.
|
||||
`kubectl version` will print the server and client versions. Using the same version of kubectl
|
||||
as your server naturally works; using a newer kubectl than your server also works; but if you use
|
||||
an older kubectl with a newer server you may see odd validation errors .
|
||||
|
||||
## Download a release
|
||||
|
||||
Download kubectl from the [official Kubernetes releases](https://console.cloud.google.com/storage/browser/kubernetes-release/release/):
|
||||
|
||||
On MacOS:
|
||||
|
||||
```shell
|
||||
wget https://storage.googleapis.com/kubernetes-release/release/v1.4.4/bin/darwin/amd64/kubectl
|
||||
chmod +x kubectl
|
||||
mv kubectl /usr/local/bin/kubectl
|
||||
```
|
||||
|
||||
On Linux:
|
||||
|
||||
```shell
|
||||
wget https://storage.googleapis.com/kubernetes-release/release/v1.4.4/bin/linux/amd64/kubectl
|
||||
chmod +x kubectl
|
||||
mv kubectl /usr/local/bin/kubectl
|
||||
```
|
||||
|
||||
|
||||
You may need to `sudo` the `mv`; you can put it anywhere in your `PATH` - some people prefer to install to `~/bin`.
|
||||
|
||||
|
||||
## Alternatives
|
||||
|
||||
### Download as part of the Google Cloud SDK
|
||||
|
||||
kubectl can be installed as part of the Google Cloud SDK:
|
||||
|
||||
First install the [Google Cloud SDK](https://cloud.google.com/sdk/).
|
||||
|
||||
After Google Cloud SDK installs, run the following command to install `kubectl`:
|
||||
|
||||
```shell
|
||||
gcloud components install kubectl
|
||||
```
|
||||
|
||||
Do check that the version is sufficiently up-to-date using `kubectl version`.
|
||||
|
||||
### Install with brew
|
||||
|
||||
If you are on MacOS and using brew, you can install with:
|
||||
|
||||
```shell
|
||||
brew install kubectl
|
||||
```
|
||||
|
||||
The homebrew project is independent from kubernetes, so do check that the version is
|
||||
sufficiently up-to-date using `kubectl version`.
|
||||
|
||||
|
||||
# Enabling shell autocompletion
|
||||
|
||||
kubectl includes autocompletion support, which can save a lot of typing!
|
||||
|
||||
The completion script itself is generated by kubectl, so you typically just need to invoke it from your profile.
|
||||
|
||||
Common examples are provided here, but for more details please consult `kubectl completion -h`
|
||||
|
||||
## On Linux, using bash
|
||||
|
||||
To add it to your current shell: `source <(kubectl completion bash)`
|
||||
|
||||
To add kubectl autocompletion to your profile (so it is automatically loaded in future shells):
|
||||
|
||||
```shell
|
||||
echo "source <(kubectl completion bash)" >> ~/.bashrc
|
||||
```
|
||||
|
||||
## On MacOS, using bash
|
||||
|
||||
On MacOS, you will need to install the bash-completion support first:
|
||||
|
||||
```shell
|
||||
brew install bash-completion
|
||||
```
|
||||
|
||||
To add it to your current shell:
|
||||
|
||||
```shell
|
||||
source $(brew --prefix)/etc/bash_completion
|
||||
source <(kubectl completion bash)
|
||||
```
|
||||
|
||||
To add kubectl autocompletion to your profile (so it is automatically loaded in future shells):
|
||||
|
||||
```shell
|
||||
echo "source $(brew --prefix)/etc/bash_completion" >> ~/.bash_profile
|
||||
echo "source <(kubectl completion bash)" >> ~/.bash_profile
|
||||
```
|
||||
|
||||
Please note that this only appears to work currently if you install using `brew install kubectl`,
|
||||
and not if you downloaded kubectl directly.
|
|
@ -82,6 +82,8 @@ curl -Lo kubectl http://storage.googleapis.com/kubernetes-release/release/{{page
|
|||
curl -Lo kubectl http://storage.googleapis.com/kubernetes-release/release/{{page.version}}.0/bin/darwin/386/kubectl && chmod +x kubectl && sudo mv kubectl /usr/local/bin/
|
||||
```
|
||||
|
||||
For Windows, download [kubectl.exe](http://storage.googleapis.com/kubernetes-release/release/{{page.version}}.0/bin/windows/amd64/kubectl.exe) and save it to a location on your PATH.
|
||||
|
||||
The generic download path is:
|
||||
```
|
||||
https://storage.googleapis.com/kubernetes-release/release/${K8S_VERSION}/bin/${GOOS}/${GOARCH}/${K8S_BINARY}
|
||||
|
|
|
@ -59,7 +59,7 @@ Under rktnetes, `kubectl get logs` currently cannot get logs from applications t
|
|||
|
||||
## Init containers
|
||||
|
||||
The alpha [init container](https://github.com/kubernetes/kubernetes/blob/master/docs/proposals/container-init.md) feature is currently not supported.
|
||||
The beta [init container](/docs/user-guide/pods/init-containers.md) feature is currently not supported.
|
||||
|
||||
## Container restart back-off
|
||||
|
||||
|
|
|
@ -81,12 +81,12 @@ to implement one of the above options:
|
|||
|
||||
- **Use a network plugin which is called by Kubernetes**
|
||||
- Kubernetes supports the [CNI](https://github.com/containernetworking/cni) network plugin interface.
|
||||
- There are a number of solutions which provide plugins for Kubernetes:
|
||||
- There are a number of solutions which provide plugins for Kubernetes (listed alphabetically):
|
||||
- [Calico](http://docs.projectcalico.org/)
|
||||
- [Flannel](https://github.com/coreos/flannel)
|
||||
- [Calico](https://github.com/projectcalico/calico-containers)
|
||||
- [Weave](https://weave.works/)
|
||||
- [Romana](http://romana.io/)
|
||||
- [Open vSwitch (OVS)](http://openvswitch.org/)
|
||||
- [Romana](http://romana.io/)
|
||||
- [Weave](http://weave.works/)
|
||||
- [More found here](/docs/admin/networking#how-to-achieve-this)
|
||||
- You can also write your own.
|
||||
- **Compile support directly into Kubernetes**
|
||||
|
@ -381,7 +381,7 @@ The minimum version required is [v0.5.6](https://github.com/coreos/rkt/releases/
|
|||
minimum version required to match rkt v0.5.6 is
|
||||
[systemd 215](http://lists.freedesktop.org/archives/systemd-devel/2014-July/020903.html).
|
||||
|
||||
[rkt metadata service](https://github.com/coreos/rkt/blob/master/Documentation/networking.md) is also required
|
||||
[rkt metadata service](https://github.com/coreos/rkt/blob/master/Documentation/networking/overview.md) is also required
|
||||
for rkt networking support. You can start rkt metadata service by using command like
|
||||
`sudo systemd-run rkt metadata-service`
|
||||
|
||||
|
|
|
@ -65,6 +65,7 @@ export GOVC_DATACENTER='ha-datacenter' # The datacenter to be used by vSphere cl
|
|||
```
|
||||
|
||||
Sample environment
|
||||
|
||||
```shell
|
||||
export GOVC_URL='10.161.236.217'
|
||||
export GOVC_USERNAME='administrator'
|
||||
|
@ -79,6 +80,7 @@ export GOVC_DATACENTER='Datacenter'
|
|||
```
|
||||
|
||||
Import this VMDK into your vSphere datastore:
|
||||
|
||||
```shell
|
||||
govc import.vmdk kube.vmdk ./kube/
|
||||
```
|
||||
|
|
|
@ -129,7 +129,7 @@ Let’s now stop the container. You can list the docker containers with:
|
|||
docker ps
|
||||
```
|
||||
|
||||
You should something like see:
|
||||
You should see something like this:
|
||||
|
||||
```shell
|
||||
CONTAINER ID IMAGE COMMAND NAMES
|
||||
|
|
154
docs/index.md
|
@ -4,130 +4,38 @@ assignees:
|
|||
- thockin
|
||||
|
||||
---
|
||||
<style>
|
||||
h2, h3, h4 {
|
||||
border-bottom: 0px !important;
|
||||
}
|
||||
.colContainer {
|
||||
padding-top:2px;
|
||||
padding-left: 2px;
|
||||
overflow: auto;
|
||||
}
|
||||
#samples a {
|
||||
color: #000;
|
||||
}
|
||||
.col3rd {
|
||||
display: block;
|
||||
width: 250px;
|
||||
float: left;
|
||||
margin-right: 30px;
|
||||
margin-bottom: 30px;
|
||||
overflow: hidden;
|
||||
}
|
||||
.col3rd h3, .col2nd h3 {
|
||||
margin-bottom: 0px !important;
|
||||
}
|
||||
.col3rd .button, .col2nd .button {
|
||||
margin-top: 20px;
|
||||
border-radius: 2px;
|
||||
}
|
||||
.col3rd p, .col2nd p {
|
||||
margin-left: 2px;
|
||||
}
|
||||
.col2nd {
|
||||
display: block;
|
||||
width: 400px;
|
||||
float: left;
|
||||
margin-right: 30px;
|
||||
margin-bottom: 30px;
|
||||
overflow: hidden;
|
||||
}
|
||||
.shadowbox {
|
||||
display: inline;
|
||||
float: left;
|
||||
text-transform: none;
|
||||
font-weight: bold;
|
||||
text-align: center;
|
||||
text-overflow: ellipsis;
|
||||
white-space: nowrap;
|
||||
overflow: hidden;
|
||||
line-height: 24px;
|
||||
position: relative;
|
||||
display: block;
|
||||
cursor: pointer;
|
||||
box-shadow: 0 2px 2px rgba(0,0,0,.24),0 0 2px rgba(0,0,0,.12);
|
||||
border-radius: 10px;
|
||||
background: #fff;
|
||||
transition: all .3s;
|
||||
padding: 16px;
|
||||
margin: 0 16px 16px 0;
|
||||
text-decoration: none;
|
||||
letter-spacing: .01em;
|
||||
}
|
||||
.shadowbox img {
|
||||
min-width: 150px;
|
||||
max-width: 150px;
|
||||
max-height: 50px;
|
||||
}
|
||||
</style>
|
||||
<div class="colContainer">
|
||||
<div class="col3rd">
|
||||
<h3>What is Kubernetes?</h3>
|
||||
<p>Kubernetes is an open-source platform for automating deployment, scaling, and operations of application containers across clusters of hosts. Learn more about what this means for your app.</p>
|
||||
<a href="/docs/whatisk8s/" class="button">Read the Overview</a>
|
||||
</div>
|
||||
<div class="col3rd">
|
||||
<h3>Kubernetes Basics Interactive Tutorial</h3>
|
||||
<p>The Kubernetes Basics interactive tutorials let you try out Kubernetes features using Minikube right out of your web browser in a virtual terminal. Learn about the Kubernetes system and deploy, expose, scale, and upgrade a containerized application in just a few minutes.</p>
|
||||
<a href="/docs/tutorials/kubernetes-basics/" class="button">Try the Interactive Tutorials</a>
|
||||
</div>
|
||||
<div class="col3rd">
|
||||
<h3>Installing Kubernetes on Linux with kubeadm</h3>
|
||||
<p>This quickstart will show you how to install a secure Kubernetes cluster on any computers running Linux, using a tool called <code>kubeadm</code>. It'll work with local VMs, physical servers and/or cloud servers, either manually or as a part of your own automation. It is currently in alpha but please try it out and give us feedback!</p>
|
||||
<p>If you are looking for a fully automated solution, note that kubeadm is intended as a building block. Tools such as GKE and kops build on kubeadm to provision a complete cluster.</p>
|
||||
<a href="/docs/getting-started-guides/kubeadm/" class="button">Install Kubernetes with kubeadm</a>
|
||||
</div>
|
||||
<div class="col3rd">
|
||||
<h3>Installing Kubernetes on AWS with kops</h3>
|
||||
<p>This quickstart will show you how to bring up a complete Kubernetes cluster on AWS, using a tool called <code>kops</code>.</p>
|
||||
<a href="/docs/getting-started-guides/kops/" class="button">Install Kubernetes with kops</a>
|
||||
</div>
|
||||
<div class="col3rd">
|
||||
<h3>Guided Tutorial</h3>
|
||||
<p>If you’ve completed one of the quickstarts, a great next step is Kubernetes 101. You will follow a path through the various features of Kubernetes, with code examples along the way, learning all of the core concepts. There's also a <a href="/docs/user-guide/walkthrough/k8s201">Kubernetes 201</a>!</p>
|
||||
<a href="/docs/user-guide/walkthrough/" class="button">Kubernetes 101</a>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
## Samples
|
||||
<p>Kubernetes documentation can help you set up Kubernetes, learn about the system, or get your applications and workloads running on Kubernetes. To learn the basics of what Kubernetes is and how it works, read "<a href="/docs/whatisk8s/">What is Kubernetes</a>". </p>
|
||||
|
||||
<div id="samples" class="colContainer">
|
||||
<a href="/docs/getting-started-guides/meanstack/" class="shadowbox">
|
||||
<img src="/images/docs/meanstack/image_0.png"><br/>MEAN Stack
|
||||
</a>
|
||||
<a href="https://github.com/kubernetes/kubernetes/tree/{{page.githubbranch}}/examples/guestbook" target="_blank" class="shadowbox">
|
||||
<img src="/images/docs/redis.svg"><br/>Guestbook + Redis
|
||||
</a>
|
||||
<a href="https://github.com/kubernetes/kubernetes/tree/{{page.githubbranch}}/examples/storage/cassandra" target="_blank" class="shadowbox">
|
||||
<img src="/images/docs/cassandra.svg"><br/>Cloud Native Cassandra
|
||||
</a>
|
||||
<a href="https://github.com/kubernetes/kubernetes/tree/{{page.githubbranch}}/examples/mysql-wordpress-pd/" target="_blank" class="shadowbox">
|
||||
<img src="/images/docs/wordpress.svg"><br/>WordPress + MySQL
|
||||
</a>
|
||||
</div>
|
||||
<h2>Interactive Tutorial</h2>
|
||||
|
||||
<p> </p>
|
||||
<p> </p>
|
||||
<p>The <a href="/docs/tutorials/kubernetes-basics/">Kubernetes Basics interactive tutorial</a> lets you try out Kubernetes right out of your web browser, using a virtual terminal. Learn about the Kubernetes system and deploy, expose, scale, and upgrade a containerized application in just a few minutes.</p>
|
||||
|
||||
<div class="colContainer">
|
||||
<div class="col2nd">
|
||||
<h3>Contribute to Our Docs</h3>
|
||||
<p>The docs for Kubernetes are open-source, just like the code for Kubernetes itself. The docs are on GitHub Pages, so you can fork it and it will auto-stage on username.github.io, previewing your changes!</p>
|
||||
<a href="/docs/contribute/create-pull-request/" class="button">Write Docs for K8s</a>
|
||||
</div>
|
||||
<div class="col2nd">
|
||||
<h3>Need Help?</h3>
|
||||
<p>Try consulting our <a href="/docs/troubleshooting/">troubleshooting guides</a>, or <a href="https://github.com/kubernetes/kubernetes/wiki/User-FAQ">our FAQ</a>. Kubernetes is also supported by a great community of contributors and experts who hang out in <a href="http://slack.kubernetes.io/">our Slack channel</a>, <a href="https://groups.google.com/forum/#!forum/kubernetes-users">our Google Group</a> and <a href="http://stackoverflow.com/questions/tagged/kubernetes">Stack Overflow</a>.</p>
|
||||
<a href="/docs/troubleshooting/" class="button">Get Support</a>
|
||||
</div>
|
||||
</div>
|
||||
<h2>Installing/Setting Up Kubernetes</h2>
|
||||
|
||||
<p><a href="/docs/getting-started-guides/">Picking the Right Solution</a> can help you get a Kubernetes cluster up and running, either for local development, or on your cloud provider of choice.</p>
|
||||
|
||||
<p>Other/newer ways to set up a Kubernetes cluster include:</p>
|
||||
<ul>
|
||||
<li><a href="/docs/getting-started-guides/minikube/">Minikube</a>: Install a single-node Kubernetes cluster on your local machine for development and testing.</li>
|
||||
<li><a href="/docs/getting-started-guides/kops/">Installing Kubernetes on AWS with kops</a>: Bring up a complete Kubernetes cluster on Amazon Web Services, using a tool called <code>kops</code>.</li>
|
||||
<li><a href="/docs/getting-started-guides/kubeadm/">Installing Kubernetes on Linux with kubeadm</a> (Alpha): Install a secure Kubernetes cluster on any pre-existing machines running Linux, using the built-in <code>kubeadm</code> tool.</li>
|
||||
</ul>
|
||||
|
||||
<h2>Guides, Tutorials, Tasks, and Concepts</h2>
|
||||
|
||||
<p>The Kubernetes documentation contains a number of resources to help you understand and work with Kubernetes.</p>
|
||||
<ul>
|
||||
<li><b><a href="/docs/user-guide/">Guides</a></b> provides documentation for Kubernetes features as well as administering and spinning up clusters, including usage examples.</li>
|
||||
<li><b><a href="/docs/tutorials/">Tutorials</a></b> contain detailed walkthroughs of the Kubernetes workflow.</li>
|
||||
<li><b><a href="/docs/tasks/">Tasks</a></b> contain step-by-step instructions for common Kubernetes tasks.</li>
|
||||
<li><b><a href="/docs/concepts/">Concepts</a></b> provide a deep understanding of how Kubernetes works.</li>
|
||||
</ul>
|
||||
|
||||
<h2>API and Command References</h2>
|
||||
|
||||
<p>The <a href="/docs/reference/">reference</a> documentation provides complete information on the Kubernetes APIs and the <code>kubectl</code> command-line interface.</p>
|
||||
|
||||
<h2>Tools</h2>
|
||||
|
||||
<p>The <a href="/docs/tools/">tools</a> page contains a list of native and third-party tools for Kubernetes.</p>
|
||||
|
|
|
@ -0,0 +1,110 @@
|
|||
---
|
||||
---
|
||||
|
||||
{% capture overview %}
|
||||
|
||||
This page shows how to write and read a Container
|
||||
termination message.
|
||||
|
||||
Termination messages provide a way for containers to write
|
||||
information about fatal events to a location where it can
|
||||
be easily retrieved and surfaced by tools like dashboards
|
||||
and monitoring software. In most cases, information that you
|
||||
put in a termination message should also be written to
|
||||
the general
|
||||
[Kubernetes logs](/docs/user-guide/logging/).
|
||||
|
||||
{% endcapture %}
|
||||
|
||||
|
||||
{% capture prerequisites %}
|
||||
|
||||
{% include task-tutorial-prereqs.md %}
|
||||
|
||||
{% endcapture %}
|
||||
|
||||
|
||||
{% capture steps %}
|
||||
|
||||
### Writing and reading a termination message
|
||||
|
||||
In this exercise, you create a Pod that runs one container.
|
||||
The configuration file specifies a command that runs when
|
||||
the container starts.
|
||||
|
||||
{% include code.html language="yaml" file="termination.yaml" ghlink="/docs/tasks/debug-pod-container/termination.yaml" %}
|
||||
|
||||
1. Create a Pod based on the YAML configuration file:
|
||||
|
||||
export REPO=https://raw.githubusercontent.com/kubernetes/kubernetes.github.io/master
|
||||
kubectl create -f $REPO/docs/tasks/debug-pod-container/termination.yaml
|
||||
|
||||
In the YAML file, in the `cmd` and `args` fields, you can see that the
|
||||
container sleeps for 10 seconds and then writes "Sleep expired" to
|
||||
the `/dev/termination-log` file. After the container writes
|
||||
the "Sleep expired" message, it terminates.
|
||||
|
||||
1. Display information about the Pod:
|
||||
|
||||
kubectl get pod termination-demo
|
||||
|
||||
Repeat the preceding command until the Pod is no longer running.
|
||||
|
||||
1. Display detailed information about the Pod:
|
||||
|
||||
kubectl get pod --output=yaml
|
||||
|
||||
The output includes the "Sleep expired" message:
|
||||
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
...
|
||||
lastState:
|
||||
terminated:
|
||||
containerID: ...
|
||||
exitCode: 0
|
||||
finishedAt: ...
|
||||
message: |
|
||||
Sleep expired
|
||||
...
|
||||
|
||||
1. Use a Go template to filter the output so that it includes
|
||||
only the termination message:
|
||||
|
||||
```
|
||||
{% raw %} kubectl get pod termination-demo -o go-template="{{range .status.containerStatuses}}{{.lastState.terminated.message}}{{end}}"{% endraw %}
|
||||
```
|
||||
|
||||
### Setting the termination log file
|
||||
|
||||
By default Kubernetes retrieves termination messages from
|
||||
`/dev/termination-log`. To change this to a different file,
|
||||
specify a `terminationMessagePath` field for your Container.
|
||||
|
||||
For example, suppose your Container writes termination messages to
|
||||
`/tmp/my-log`, and you want Kubernetes to retrieve those messages.
|
||||
Set `terminationMessagePath` as shown here:
|
||||
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: msg-path-demo
|
||||
spec:
|
||||
containers:
|
||||
- name: msg-path-demo-container
|
||||
image: debian
|
||||
terminationMessagePath: "/tmp/my-log"
|
||||
|
||||
{% endcapture %}
|
||||
|
||||
{% capture whatsnext %}
|
||||
|
||||
* See the `terminationMessagePath` field in
|
||||
[Container](/docs/api-reference/v1/definitions#_v1_container).
|
||||
* Learn about [retrieving logs](/docs/user-guide/logging/).
|
||||
* Learn about [Go templates](https://golang.org/pkg/text/template/).
|
||||
|
||||
{% endcapture %}
|
||||
|
||||
|
||||
{% include templates/task.md %}
|
|
@ -0,0 +1,10 @@
|
|||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: termination-demo
|
||||
spec:
|
||||
containers:
|
||||
- name: termination-demo-container
|
||||
image: debian
|
||||
command: ["/bin/sh"]
|
||||
args: ["-c", "sleep 10 && echo Sleep expired > /dev/termination-log"]
|
|
@ -15,6 +15,10 @@ The Tutorials section of the Kubernetes documentation is a work in progress.
|
|||
|
||||
* [Exposing an External IP Address to Access an Application in a Cluster](/docs/tutorials/stateless-application/expose-external-ip-address/)
|
||||
|
||||
#### Stateful Applications
|
||||
|
||||
* [Running a Single-Instance Stateful Application](/docs/tutorials/stateful-application/run-stateful-application/)
|
||||
|
||||
### What's next
|
||||
|
||||
If you would like to write a tutorial, see
|
||||
|
|
|
@ -0,0 +1,12 @@
|
|||
apiVersion: v1
|
||||
kind: PersistentVolume
|
||||
metadata:
|
||||
name: mysql-pv
|
||||
spec:
|
||||
capacity:
|
||||
storage: 20Gi
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
gcePersistentDisk:
|
||||
pdName: mysql-disk
|
||||
fsType: ext4
|
|
@ -0,0 +1,51 @@
|
|||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: mysql
|
||||
spec:
|
||||
ports:
|
||||
- port: 3306
|
||||
selector:
|
||||
app: mysql
|
||||
clusterIP: None
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: PersistentVolumeClaim
|
||||
metadata:
|
||||
name: mysql-pv-claim
|
||||
spec:
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
resources:
|
||||
requests:
|
||||
storage: 20Gi
|
||||
---
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: mysql
|
||||
spec:
|
||||
strategy:
|
||||
type: Recreate
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: mysql
|
||||
spec:
|
||||
containers:
|
||||
- image: mysql:5.6
|
||||
name: mysql
|
||||
env:
|
||||
# Use secret in real usage
|
||||
- name: MYSQL_ROOT_PASSWORD
|
||||
value: password
|
||||
ports:
|
||||
- containerPort: 3306
|
||||
name: mysql
|
||||
volumeMounts:
|
||||
- name: mysql-persistent-storage
|
||||
mountPath: /var/lib/mysql
|
||||
volumes:
|
||||
- name: mysql-persistent-storage
|
||||
persistentVolumeClaim:
|
||||
claimName: mysql-pv-claim
|
|
@ -0,0 +1,220 @@
|
|||
---
|
||||
---
|
||||
|
||||
{% capture overview %}
|
||||
|
||||
This page shows you how to run a single-instance stateful application
|
||||
in Kubernetes using a PersistentVolume and a Deployment. The
|
||||
application is MySQL.
|
||||
|
||||
{% endcapture %}
|
||||
|
||||
|
||||
{% capture objectives %}
|
||||
|
||||
* Create a PersistentVolume referencing a disk in your environment.
|
||||
* Create a MySQL Deployment.
|
||||
* Expose MySQL to other pods in the cluster at a known DNS name.
|
||||
|
||||
{% endcapture %}
|
||||
|
||||
|
||||
{% capture prerequisites %}
|
||||
|
||||
* {% include task-tutorial-prereqs.md %}
|
||||
|
||||
* For data persistence we will create a Persistent Volume that
|
||||
references a disk in your
|
||||
environment. See
|
||||
[here](/docs/user-guide/persistent-volumes/#types-of-persistent-volumes) for
|
||||
the types of environments supported. This Tutorial will demonstrate
|
||||
`GCEPersistentDisk` but any type will work. `GCEPersistentDisk`
|
||||
volumes only work on Google Compute Engine.
|
||||
|
||||
{% endcapture %}
|
||||
|
||||
|
||||
{% capture lessoncontent %}
|
||||
|
||||
### Set up a disk in your environment
|
||||
|
||||
You can use any type of persistent volume for your stateful app. See
|
||||
[Types of Persistent Volumes](/docs/user-guide/persistent-volumes/#types-of-persistent-volumes)
|
||||
for a list of supported environment disks. For Google Compute Engine, run:
|
||||
|
||||
```
|
||||
gcloud compute disks create --size=20GB mysql-disk
|
||||
```
|
||||
|
||||
Next create a PersistentVolume that points to the `mysql-disk`
|
||||
disk just created. Here is a configuration file for a PersistentVolume
|
||||
that points to the Compute Engine disk above:
|
||||
|
||||
{% include code.html language="yaml" file="gce-volume.yaml" ghlink="/docs/tutorials/stateful-application/gce-volume.yaml" %}
|
||||
|
||||
Notice that the `pdName: mysql-disk` line matches the name of the disk
|
||||
in the Compute Engine environment. See the
|
||||
[Persistent Volumes](/docs/user-guide/persistent-volumes/)
|
||||
for details on writing a PersistentVolume configuration file for other
|
||||
environments.
|
||||
|
||||
Create the persistent volume:
|
||||
|
||||
```
|
||||
kubectl create -f http://k8s.io/docs/tutorials/stateful-application/gce-volume.yaml
|
||||
```
|
||||
|
||||
|
||||
### Deploy MySQL
|
||||
|
||||
You can run a stateful application by creating a Kubernetes Deployment
|
||||
and connecting it to an existing PersistentVolume using a
|
||||
PersistentVolumeClaim. For example, this YAML file describes a
|
||||
Deployment that runs MySQL and references the PersistentVolumeClaim. The file
|
||||
defines a volume mount for /var/lib/mysql, and then creates a
|
||||
PersistentVolumeClaim that looks for a 20G volume. This claim is
|
||||
satisfied by any volume that meets the requirements, in this case, the
|
||||
volume created above.
|
||||
|
||||
Note: The password is defined in the config yaml, and this is insecure. See
|
||||
[Kubernetes Secrets](/docs/user-guide/secrets/)
|
||||
for a secure solution.
|
||||
|
||||
{% include code.html language="yaml" file="mysql-deployment.yaml" ghlink="/docs/tutorials/stateful-application/mysql-deployment.yaml" %}
|
||||
|
||||
1. Deploy the contents of the YAML file:
|
||||
|
||||
kubectl create -f http://k8s.io/docs/tutorials/stateful-application/mysql-deployment.yaml
|
||||
|
||||
1. Display information about the Deployment:
|
||||
|
||||
kubectl describe deployment mysql
|
||||
|
||||
Name: mysql
|
||||
Namespace: default
|
||||
CreationTimestamp: Tue, 01 Nov 2016 11:18:45 -0700
|
||||
Labels: app=mysql
|
||||
Selector: app=mysql
|
||||
Replicas: 1 updated | 1 total | 0 available | 1 unavailable
|
||||
StrategyType: Recreate
|
||||
MinReadySeconds: 0
|
||||
OldReplicaSets: <none>
|
||||
NewReplicaSet: mysql-63082529 (1/1 replicas created)
|
||||
Events:
|
||||
FirstSeen LastSeen Count From SubobjectPath Type Reason Message
|
||||
--------- -------- ----- ---- ------------- -------- ------ -------
|
||||
33s 33s 1 {deployment-controller } Normal ScalingReplicaSet Scaled up replica set mysql-63082529 to 1
|
||||
|
||||
1. List the pods created by the Deployment:
|
||||
|
||||
kubectl get pods -l app=mysql
|
||||
|
||||
NAME READY STATUS RESTARTS AGE
|
||||
mysql-63082529-2z3ki 1/1 Running 0 3m
|
||||
|
||||
1. Inspect the Persistent Volume:
|
||||
|
||||
kubectl describe pv mysql-pv
|
||||
|
||||
Name: mysql-pv
|
||||
Labels: <none>
|
||||
Status: Bound
|
||||
Claim: default/mysql-pv-claim
|
||||
Reclaim Policy: Retain
|
||||
Access Modes: RWO
|
||||
Capacity: 20Gi
|
||||
Message:
|
||||
Source:
|
||||
Type: GCEPersistentDisk (a Persistent Disk resource in Google Compute Engine)
|
||||
PDName: mysql-disk
|
||||
FSType: ext4
|
||||
Partition: 0
|
||||
ReadOnly: false
|
||||
No events.
|
||||
|
||||
1. Inspect the PersistentVolumeClaim:
|
||||
|
||||
kubectl describe pvc mysql-pv-claim
|
||||
|
||||
Name: mysql-pv-claim
|
||||
Namespace: default
|
||||
Status: Bound
|
||||
Volume: mysql-pv
|
||||
Labels: <none>
|
||||
Capacity: 20Gi
|
||||
Access Modes: RWO
|
||||
No events.
|
||||
|
||||
### Accessing the MySQL instance
|
||||
|
||||
The preceding YAML file creates a service that
|
||||
allows other Pods in the cluster to access the database. The Service option
|
||||
`clusterIP: None` lets the Service DNS name resolve directly to the
|
||||
Pod's IP address. This is optimal when you have only one Pod
|
||||
behind a Service and you don't intend to increase the number of Pods.
|
||||
|
||||
Run a MySQL client to connect to the server:
|
||||
|
||||
```
|
||||
kubectl run -it --rm --image=mysql:5.6 mysql-client -- mysql -h mysql -ppassword
|
||||
```
|
||||
|
||||
This command creates a new Pod in the cluster running a mysql client
|
||||
and connects it to the server through the Service. If it connects, you
|
||||
know your stateful MySQL database is up and running.
|
||||
|
||||
```
|
||||
Waiting for pod default/mysql-client-274442439-zyp6i to be running, status is Pending, pod ready: false
|
||||
If you don't see a command prompt, try pressing enter.
|
||||
|
||||
mysql>
|
||||
```
|
||||
|
||||
### Updating
|
||||
|
||||
The image or any other part of the Deployment can be updated as usual
|
||||
with the `kubectl apply` command. Here are some precautions that are
|
||||
specific to stateful apps:
|
||||
|
||||
* Don't scale the app. This setup is for single-instance apps
|
||||
only. The underlying PersistentVolume can only be mounted to one
|
||||
Pod. For clustered stateful apps, see the
|
||||
[StatefulSet documentation](/docs/user-guide/petset/).
|
||||
* Use `strategy:` `type: Recreate` in the Deployment configuration
|
||||
YAML file. This instructs Kubernetes to _not_ use rolling
|
||||
updates. Rolling updates will not work, as you cannot have more than
|
||||
one Pod running at a time. The `Recreate` strategy will stop the
|
||||
first pod before creating a new one with the updated configuration.
|
||||
|
||||
### Deleting a deployment
|
||||
|
||||
Delete the deployed objects by name:
|
||||
|
||||
```
|
||||
kubectl delete deployment,svc mysql
|
||||
kubectl delete pvc mysql-pv-claim
|
||||
kubectl delete pv mysql-pv
|
||||
```
|
||||
|
||||
Also, if you are using Compute Engine disks:
|
||||
|
||||
```
|
||||
gcloud compute disks delete mysql-disk
|
||||
```
|
||||
|
||||
{% endcapture %}
|
||||
|
||||
|
||||
{% capture whatsnext %}
|
||||
|
||||
* Learn more about [Deployment objects](/docs/user-guide/deployments/).
|
||||
|
||||
* Learn more about [Deploying applications](/docs/user-guide/deploying-applications/)
|
||||
|
||||
* [kubectl run documentation](/docs/user-guide/kubectl/kubectl_run/)
|
||||
|
||||
* [Volumes](/docs/user-guide/volumes/) and [Persistent Volumes](/docs/user-guide/persistent-volumes/)
|
||||
|
||||
{% endcapture %}
|
||||
|
||||
{% include templates/tutorial.md %}
|
|
@ -129,7 +129,7 @@ To use it,
|
|||
* Write an application atop of the client-go clients. Note that client-go defines its own API objects, so if needed, please import API definitions from client-go rather than from the main repository, e.g., `import "k8s.io/client-go/1.4/pkg/api/v1"` is correct.
|
||||
|
||||
The Go client can use the same [kubeconfig file](/docs/user-guide/kubeconfig-file)
|
||||
as the kubectl CLI does to locate and authenticate to the apiserver. See this [example](https://github.com/kubernetes/client-go/examples/out-of-cluster.go):
|
||||
as the kubectl CLI does to locate and authenticate to the apiserver. See this [example](https://github.com/kubernetes/client-go/blob/master/examples/out-of-cluster/main.go):
|
||||
|
||||
```golang
|
||||
import (
|
||||
|
@ -183,7 +183,8 @@ From within a pod the recommended ways to connect to API are:
|
|||
in any container of the pod can access it. See this [example of using kubectl proxy
|
||||
in a pod](https://github.com/kubernetes/kubernetes/tree/{{page.githubbranch}}/examples/kubectl-container/).
|
||||
- use the Go client library, and create a client using the `client.NewInCluster()` factory.
|
||||
This handles locating and authenticating to the apiserver. [example](https://github.com/kubernetes/client-go/examples/in-cluster.go)
|
||||
This handles locating and authenticating to the apiserver. See this [example of using Go client
|
||||
library in a pod](https://github.com/kubernetes/client-go/blob/master/examples/in-cluster/main.go).
|
||||
|
||||
In each case, the credentials of the pod are used to communicate securely with the apiserver.
|
||||
|
||||
|
|
|
@ -64,12 +64,12 @@ healthy backend service endpoint at all times, even in the event of
|
|||
pod, cluster,
|
||||
availability zone or regional outages.
|
||||
|
||||
Note that in the
|
||||
case of Google Cloud, the logical L7 load balancer is not a single physical device (which
|
||||
would present both a single point of failure, and a single global
|
||||
network routing choke point), but rather a [truly global, highly available
|
||||
load balancing managed service](https://cloud.google.com/load-balancing/),
|
||||
globally reachable via a single, static IP address.
|
||||
Note that in the case of Google Cloud, the logical L7 load balancer is
|
||||
not a single physical device (which would present both a single point
|
||||
of failure, and a single global network routing choke point), but
|
||||
rather a
|
||||
[truly global, highly available load balancing managed service](https://cloud.google.com/load-balancing/),
|
||||
globally reachable via a single, static IP address.
|
||||
|
||||
Clients inside your federated Kubernetes clusters (i.e. Pods) will be
|
||||
automatically routed to the cluster-local shard of the Federated Service
|
||||
|
@ -86,13 +86,13 @@ You can create a federated ingress in any of the usual ways, for example using k
|
|||
``` shell
|
||||
kubectl --context=federation-cluster create -f myingress.yaml
|
||||
```
|
||||
|
||||
For example ingress YAML configurations, see the [Ingress User Guide](/docs/user-guide/ingress/)
|
||||
The '--context=federation-cluster' flag tells kubectl to submit the
|
||||
request to the Federation API endpoint, with the appropriate
|
||||
credentials. If you have not yet configured such a context, visit the
|
||||
[federation admin guide](/docs/admin/federation/) or one of the
|
||||
[administration tutorials](https://github.com/kelseyhightower/kubernetes-cluster-federation)
|
||||
to find out how to do so. TODO: Update links
|
||||
to find out how to do so.
|
||||
|
||||
As described above, the Federated Ingress will automatically create
|
||||
and maintain matching Kubernetes ingresses in all of the clusters
|
||||
|
@ -147,17 +147,28 @@ Events:
|
|||
2m 2m 1 {loadbalancer-controller } Normal CREATE ip: 130.211.5.194
|
||||
```
|
||||
|
||||
Note the address of your Federated Ingress
|
||||
Note that:
|
||||
|
||||
1. the address of your Federated Ingress
|
||||
corresponds with the address of all of the
|
||||
underlying Kubernetes ingresses (once these have been allocated - this
|
||||
may take up to a few minutes).
|
||||
|
||||
Note also that we have not yet provisioned any backend Pods to receive
|
||||
2. we have not yet provisioned any backend Pods to receive
|
||||
the network traffic directed to this ingress (i.e. 'Service
|
||||
Endpoints' behind the service backing the Ingress), so the Federated Ingress does not yet consider these to
|
||||
be healthy shards and will not direct traffic to any of these clusters.
|
||||
3. the federation control system will
|
||||
automatically reconfigure the load balancer controllers in all of the
|
||||
clusters in your federation to make them consistent, and allow
|
||||
them to share global load balancers. But this reconfiguration can
|
||||
only complete successfully if there are no pre-existing Ingresses in
|
||||
those clusters (this is a safety feature to prevent accidental
|
||||
breakage of existing ingresses). So to ensure that your federated
|
||||
ingresses function correctly, either start with new, empty clusters, or make
|
||||
sure that you delete (and recreate if necessary) all pre-existing
|
||||
Ingresses in the clusters comprising your federation.
|
||||
|
||||
## Adding backend services and pods
|
||||
#Adding backend services and pods
|
||||
|
||||
To render the underlying ingress shards healthy, we need to add
|
||||
backend Pods behind the service upon which the Ingress is based. There are several ways to achieve this, but
|
||||
|
@ -175,6 +186,16 @@ kubectl --context=federation-cluster create -f services/nginx.yaml
|
|||
kubectl --context=federation-cluster create -f myreplicaset.yaml
|
||||
```
|
||||
|
||||
Note that in order for your federated ingress to work correctly on
|
||||
Google Cloud, the node ports of all of the underlying cluster-local
|
||||
services need to be identical. If you're using a federated service
|
||||
this is easy to do. Simply pick a node port that is not already
|
||||
being used in any of your clusters, and add that to the spec of your
|
||||
federated service. If you do not specify a node port for your
|
||||
federated service, each cluster will choose it's own node port for
|
||||
its cluster-local shard of the service, and these will probably end
|
||||
up being different, which is not what you want.
|
||||
|
||||
You can verify this by checking in each of the underlying clusters, for example:
|
||||
|
||||
``` shell
|
||||
|
@ -258,6 +279,35 @@ Check that:
|
|||
`service-controller` or `replicaset-controller`,
|
||||
errors in the output of `kubectl logs federation-controller-manager --namespace federation`).
|
||||
|
||||
#### I can create a federated ingress successfully, but request load is not correctly distributed across the underlying clusters
|
||||
|
||||
Check that:
|
||||
|
||||
1. the services underlying your federated ingress in each cluster have
|
||||
identical node ports. See [above](#creating_a_federated_ingress) for further explanation.
|
||||
2. the load balancer controllers in each of your clusters are of the
|
||||
correct type ("GLBC") and have been correctly reconfigured by the
|
||||
federation control plane to share a global GCE load balancer (this
|
||||
should happen automatically). If they of the correct type, and
|
||||
have been correctly reconfigured, the UID data item in the GLBC
|
||||
configmap in each cluster will be identical across all clusters.
|
||||
See
|
||||
[the GLBC docs](https://github.com/kubernetes/contrib/blob/master/ingress/controllers/gce/BETA_LIMITATIONS.md#changing-the-cluster-uid)
|
||||
for further details.
|
||||
If this is not the case, check the logs of your federation
|
||||
controller manager to determine why this automated reconfiguration
|
||||
might be failing.
|
||||
3. no ingresses have been manually created in any of your clusters before the above
|
||||
reconfiguration of the load balancer controller completed
|
||||
successfully. Ingresses created before the reconfiguration of
|
||||
your GLBC will interfere with the behavior of your federated
|
||||
ingresses created after the reconfiguration (see
|
||||
[the GLBC docs](https://github.com/kubernetes/contrib/blob/master/ingress/controllers/gce/BETA_LIMITATIONS.md#changing-the-cluster-uid)
|
||||
for further information. To remedy this,
|
||||
delete any ingresses created before the cluster joined the
|
||||
federation (and had it's GLBC reconfigured), and recreate them if
|
||||
necessary.
|
||||
|
||||
#### This troubleshooting guide did not help me solve my problem
|
||||
|
||||
Please use one of our [support channels](http://kubernetes.io/docs/troubleshooting/) to seek assistance.
|
||||
|
|
|
@ -4,22 +4,15 @@ assignees:
|
|||
|
||||
---
|
||||
|
||||
* TOC
|
||||
{:toc}
|
||||
The Kubernetes **Guides** can help you work with various aspects of the Kubernetes system.
|
||||
|
||||
* The Kubernetes [User Guide](#user-guide-internal) can help you run programs and services on an existing Kubernetes cluster.
|
||||
* The [Cluster Admin Guide](/docs/admin/) can help you set up and administrate your own Kubernetes cluster.
|
||||
* The [Developer Guide](https://github.com/kubernetes/kubernetes/tree/{{page.githubbranch}}/docs/devel) can help you either write code to directly access the Kubernetes API, or to contribute directly to the Kubernetes project.
|
||||
|
||||
The user guide is intended for anyone who wants to run programs and services on an existing Kubernetes cluster. Setup and administration of a Kubernetes cluster is described in the [Cluster Admin Guide](/docs/admin/). The [Developer Guide](https://github.com/kubernetes/kubernetes/tree/{{page.githubbranch}}/docs/devel) is for anyone wanting to either write code which directly accesses the Kubernetes API, or to contribute directly to the Kubernetes project.
|
||||
## <a name="user-guide-internal"></a>Kubernetes User Guide
|
||||
|
||||
Please ensure you have completed the [prerequisites for running examples from the user guide](/docs/user-guide/prereqs/).
|
||||
|
||||
## Quick walkthrough
|
||||
|
||||
1. [Kubernetes 101](/docs/user-guide/walkthrough/)
|
||||
1. [Kubernetes 201](/docs/user-guide/walkthrough/k8s201/)
|
||||
|
||||
## Thorough walkthrough
|
||||
|
||||
If you don't have any familiarity with Kubernetes, we recommend you read the following sections in order:
|
||||
The following topics in the Kubernetes User Guide can help you run applications and services on a Kubernetes cluster:
|
||||
|
||||
1. [Quick start: launch and expose an application](/docs/user-guide/quick-start/)
|
||||
1. [Configuring and launching containers: configuring common container parameters](/docs/user-guide/configuring-containers/)
|
||||
|
@ -35,7 +28,9 @@ If you don't have any familiarity with Kubernetes, we recommend you read the fol
|
|||
1. [Connecting to containers via proxies](/docs/user-guide/connecting-to-applications-proxy/)
|
||||
1. [Connecting to containers via port forwarding](/docs/user-guide/connecting-to-applications-port-forward/)
|
||||
|
||||
## Concept guide
|
||||
Before running examples in the user guides, please ensure you have completed the [prerequisites](/docs/user-guide/prereqs/).
|
||||
|
||||
## Kubernetes Concepts
|
||||
|
||||
[**Cluster**](/docs/admin/)
|
||||
: A cluster is a set of physical or virtual machines and other infrastructure resources used by Kubernetes to run your applications.
|
||||
|
|
|
@ -3,7 +3,7 @@ kind: Pod
|
|||
metadata:
|
||||
name: nginx
|
||||
annotations:
|
||||
pod.alpha.kubernetes.io/init-containers: '[
|
||||
pod.beta.kubernetes.io/init-containers: '[
|
||||
{
|
||||
"name": "install",
|
||||
"image": "busybox",
|
||||
|
|
|
@ -173,7 +173,7 @@ on node N if node N has a label with key `failure-domain.beta.kubernetes.io/zone
|
|||
such that there is at least one node in the cluster with key `failure-domain.beta.kubernetes.io/zone` and
|
||||
value V that is running a pod that has a label with key "security" and value "S1".) The pod anti-affinity
|
||||
rule says that the pod cannot schedule onto a node if that node is already running a pod with label
|
||||
having key "security" and value "S2". (If the `topologyKey` were `failure-domain.beta.kuberntes.io/zone` then
|
||||
having key "security" and value "S2". (If the `topologyKey` were `failure-domain.beta.kubernetes.io/zone` then
|
||||
it would mean that the pod cannot schedule onto a node if that node is in the same zone as a pod with
|
||||
label having key "security" and value "S2".) See the [design doc](https://github.com/kubernetes/kubernetes/blob/{{page.githubbranch}}/docs/design/podaffinity.md).
|
||||
for many more examples of pod affinity and anti-affinity, both the `requiredDuringSchedulingIgnoredDuringExecution`
|
||||
|
|
|
@ -88,7 +88,7 @@ vm-1 # printf "GET / HTTP/1.0\r\n\r\n" | netcat vm-0.ub 80
|
|||
It's worth exploring what just happened. Init containers run sequentially *before* the application container. In this example we used the init container to copy shared libraries from the rootfs, while preserving user installed packages across container restart.
|
||||
|
||||
```yaml
|
||||
pod.alpha.kubernetes.io/init-containers: '[
|
||||
pod.beta.kubernetes.io/init-containers: '[
|
||||
{
|
||||
"name": "rootfs",
|
||||
"image": "ubuntu:15.10",
|
||||
|
|
|
@ -29,7 +29,7 @@ spec:
|
|||
app: nginx
|
||||
annotations:
|
||||
pod.alpha.kubernetes.io/initialized: "true"
|
||||
pod.alpha.kubernetes.io/init-containers: '[
|
||||
pod.beta.kubernetes.io/init-containers: '[
|
||||
{
|
||||
"name": "peerfinder",
|
||||
"image": "gcr.io/google_containers/peer-finder:0.1",
|
||||
|
|
|
@ -66,8 +66,8 @@ The possible values for RestartPolicy are `Always`, `OnFailure`, or `Never`. If
|
|||
Three types of controllers are currently available:
|
||||
|
||||
- Use a [`Job`](/docs/user-guide/jobs/) for pods which are expected to terminate (e.g. batch computations).
|
||||
- Use a [`ReplicationController`](/docs/user-guide/replication-controller/) for pods which are not expected to
|
||||
terminate (e.g. web servers).
|
||||
- Use a [`ReplicationController`](/docs/user-guide/replication-controller/) or [`Deployment`](/docs/user-guide/deployments/)
|
||||
for pods which are not expected to terminate (e.g. web servers).
|
||||
- Use a [`DaemonSet`](/docs/admin/daemons/): Use for pods which need to run 1 per machine because they provide a
|
||||
machine-specific system service.
|
||||
If you are unsure whether to use ReplicationController or Daemon, then see [Daemon Set versus
|
||||
|
|
|
@ -0,0 +1,169 @@
|
|||
---
|
||||
assignees:
|
||||
- erictune
|
||||
|
||||
---
|
||||
|
||||
* TOC
|
||||
{:toc}
|
||||
|
||||
In addition to having one or more main containers (or **app containers**), a
|
||||
pod can also have one or more **init containers** which run before the app
|
||||
containers. Init containers allow you to reduce and reorganize setup scripts
|
||||
and "glue code".
|
||||
|
||||
## Overview
|
||||
|
||||
An init container is exactly like a regular container, except that it always
|
||||
runs to completion and each init container must complete successfully before
|
||||
the next one is started. If the init container fails, Kubernetes will restart
|
||||
the pod until the init container succeeds. If a pod is marked as `RestartNever`,
|
||||
the pod will fail if the init container fails.
|
||||
|
||||
You specify a container as an init container by adding an annotation
|
||||
The annotation key is `pod.beta.kubernetes.io/init-containers`. The annotation
|
||||
value is a JSON array of [objects of type `v1.Container`
|
||||
](http://kubernetes.io/docs/api-reference/v1/definitions/#_v1_container)
|
||||
|
||||
Once the feature exits beta, the init containers will be specified on the Pod
|
||||
Spec alongside the app `containers` array.
|
||||
The status of the init containers is returned as another annotation -
|
||||
`pod.beta.kubernetes.io/init-container-statuses` -- as an array of the
|
||||
container statuses (similar to the `status.containerStatuses` field).
|
||||
|
||||
Init containers support all of the same features as normal containers,
|
||||
including resource limits, volumes, and security settings. The resource
|
||||
requests and limits for an init container are [handled slightly differently](
|
||||
#resources). Init containers do not support readiness probes since they will
|
||||
run to completion before the pod can be ready.
|
||||
An init container has all of the fields of an app container.
|
||||
|
||||
If you specify multiple init containers for a pod, those containers run one at
|
||||
a time in sequential order. Each must succeed before the next can run. Once all
|
||||
init containers have run to completion, Kubernetes initializes the pod and runs
|
||||
the application containers as usual.
|
||||
|
||||
## What are Init Containers Good For?
|
||||
|
||||
Because init containers have separate images from application containers, they
|
||||
have some advantages for start-up related code. These include:
|
||||
|
||||
* they can contain utilities that are not desirable to include in the app container
|
||||
image for security reasons,
|
||||
* they can contain utilities or custom code for setup that is not present in an app
|
||||
image. (No need to make an image `FROM` another image just to use a tool like
|
||||
`sed`, `awk`, `python`, `dig`, etc during setup).
|
||||
* the application image builder and the deployer roles can work independently without
|
||||
the need to jointly build a single app image.
|
||||
|
||||
Because init containers have different filesystem view (Linux namespaces) from
|
||||
app containers, they can be given access to Secrets that the app containers are
|
||||
not able to access.
|
||||
|
||||
Since init containers run to completion before any app containers start, and
|
||||
since app containers run in parallel, they provide an easier way to block or
|
||||
delay the startup of application containers until some precondition is met.
|
||||
|
||||
Because init containers run in sequence and there can be multiple init containers,
|
||||
they can be composed easily.
|
||||
|
||||
Here are some ideas for how to use init containers:
|
||||
- Wait for a service to be created with a shell command like:
|
||||
`for i in {1..100}; do sleep 1; if dig myservice; then exit 0; fi; exit 1`
|
||||
- Register this pod with a remote server with a command like:
|
||||
`curl -X POST http://$MANAGEMENT_SERVICE_HOST:$MANAGEMENT_SERVICE_PORT/register -d 'instance=$(POD_NAME)&ip=$(POD_IP)'`
|
||||
using `POD_NAME` and `POD_IP` from the downward API.
|
||||
- Wait for some time before starting the app container with a command like `sleep 60`.
|
||||
- Clone a git repository into a volume
|
||||
- Place values like a POD_IP into a configuration file, and run a template tool (e.g. jinja)
|
||||
to generate a configuration file to be consumed by the main app contianer.
|
||||
```
|
||||
|
||||
Complete usage examples can be found in the [PetSets
|
||||
guide](docs/user-guide/petset/bootstrapping/index.md) and the [Production Pods
|
||||
guide](/docs/user-guide/production-pods.md#handling-initialization).
|
||||
|
||||
|
||||
## Detailed Behavior
|
||||
|
||||
Each pod may have 0..N init containers defined along with the existing
|
||||
1..M app containers.
|
||||
|
||||
On startup of the pod, after the network and volumes are initialized, the init
|
||||
containers are started in order. Each container must exit successfully before
|
||||
the next is invoked. If a container fails to start (due to the runtime) or
|
||||
exits with failure, it is retried according to the pod RestartPolicy, except
|
||||
when the pod restart policy is RestartPolicyAlways, in which case just the init
|
||||
containers use RestartPolicyOnFailure.
|
||||
|
||||
A pod cannot be ready until all init containers have succeeded. The ports on an
|
||||
init container are not aggregated under a service. A pod that is being
|
||||
initialized is in the `Pending` phase but should has a condition `Initializing`
|
||||
set to `true`.
|
||||
|
||||
If the pod is [restarted](#pod-restart-reasons) all init containers must
|
||||
execute again.
|
||||
|
||||
Changes to the init container spec are limited to the container image field.
|
||||
Altering a init container image field is equivalent to restarting the pod.
|
||||
|
||||
Because init containers can be restarted, retried, or reexecuted, init container
|
||||
code should be idempotent. In particular, code that writes to files on EmptyDirs
|
||||
should be prepared for the possibility that an output file already exists.
|
||||
|
||||
An init container has all of the fields of an app container. The following
|
||||
fields are prohibited from being used on init containers by validation:
|
||||
|
||||
* `readinessProbe` - init containers must exit for pod startup to continue,
|
||||
are not included in rotation, and so cannot define readiness distinct from
|
||||
completion.
|
||||
|
||||
Init container authors may use `activeDeadlineSeconds` on the pod and
|
||||
`livenessProbe` on the container to prevent init containers from failing
|
||||
forever. The active deadline includes init containers.
|
||||
|
||||
The name of each app and init container in a pod must be unique - it is a
|
||||
validation error for any container to share a name.
|
||||
|
||||
### Resources
|
||||
|
||||
Given the ordering and execution for init containers, the following rules
|
||||
for resource usage apply:
|
||||
|
||||
* The highest of any particular resource request or limit defined on all init
|
||||
containers is the **effective init request/limit**
|
||||
* The pod's **effective request/limit** for a resource is the higher of:
|
||||
* sum of all app containers request/limit for a resource
|
||||
* effective init request/limit for a resource
|
||||
* Scheduling is done based on effective requests/limits, which means
|
||||
init containers can reserve resources for initialization that are not used
|
||||
during the life of the pod.
|
||||
* QoS tier of the pod's **effective QoS tier** is the QoS tier for init containers
|
||||
and app containers alike.
|
||||
|
||||
Quota and limits are applied based on the effective pod request and
|
||||
limit.
|
||||
|
||||
Pod level cGroups are based on the effective pod request and limit, the
|
||||
same as the scheduler.
|
||||
|
||||
|
||||
## Pod Restart Reasons
|
||||
|
||||
A Pod may "restart", causing reexecution of init containers, for the following
|
||||
reasons:
|
||||
|
||||
* An init container image is changed by a user updating the Pod Spec.
|
||||
* App container image changes only restart the app container.
|
||||
* The pod infrastructure container is restarted
|
||||
* This is uncommon and would have to be done by someone with root access to nodes.
|
||||
* All containers in a pod are terminated, requiring a restart (RestartPolicyAlways) AND the record of init container completion has been lost due to garbage collection.
|
||||
|
||||
## Support and compatibilty
|
||||
|
||||
A cluster with Kubelet and Apiserver version 1.4.0 or greater supports init
|
||||
containers with the beta annotations. Support varies for other combinations of
|
||||
Kubelet and Apiserver version; see the [release notes
|
||||
](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG.md) for details.
|
||||
|
||||
|
|
@ -204,6 +204,8 @@ The status of the init containers is returned as another annotation - `pod.beta.
|
|||
|
||||
Init containers support all of the same features as normal containers, including resource limits, volumes, and security settings. The resource requests and limits for an init container are handled slightly different than normal containers since init containers are run one at a time instead of all at once - any limits or quotas will be applied based on the largest init container resource quantity, rather than as the sum of quantities. Init containers do not support readiness probes since they will run to completion before the pod can be ready.
|
||||
|
||||
[Complete Init Container Documentation](/docs/user-guide/pods/init-containers.md)
|
||||
|
||||
|
||||
## Lifecycle hooks and termination notice
|
||||
|
||||
|
|
|
@ -94,7 +94,7 @@ of the replicated pods.
|
|||
kubectl create -f hpa-rs.yaml
|
||||
```
|
||||
|
||||
Alternatively, you can just use the `kubectl autoscale` command to acommplish the same
|
||||
Alternatively, you can just use the `kubectl autoscale` command to acomplish the same
|
||||
(and it's easier!)
|
||||
|
||||
```shell
|
||||
|
|
|
@ -345,7 +345,7 @@ can do a DNS SRV query for `"_http._tcp.my-service.my-ns"` to discover the port
|
|||
number for `"http"`.
|
||||
|
||||
The Kubernetes DNS server is the only way to access services of type
|
||||
`ExternalName`.
|
||||
`ExternalName`. More information is available in the [DNS Admin Guide](http://kubernetes.io/docs/admin/dns/).
|
||||
|
||||
## Headless services
|
||||
|
||||
|
|
|
@ -46,7 +46,7 @@ The system adds fields in several ways:
|
|||
|
||||
- Some fields are added synchronously with creation of the resource and some are set asynchronously.
|
||||
- For example: `metadata.uid` is set synchronously. (Read more about [metadata](https://github.com/kubernetes/kubernetes/tree/{{page.githubbranch}}/docs/devel/api-conventions.md#metadata)).
|
||||
- For example, `status.hostIP` is set only after the pod has been scheduled. This often happens fast, but you may notice pods which do not have this set yet. This is called Late Initialization. (Read mode about [status](https://github.com/kubernetes/kubernetes/tree/{{page.githubbranch}}/docs/devel/api-conventions.md#spec-and-status) and [late initialization](https://github.com/kubernetes/kubernetes/tree/{{page.githubbranch}}/docs/devel/api-conventions.md#late-initialization) ).
|
||||
- For example, `status.hostIP` is set only after the pod has been scheduled. This often happens fast, but you may notice pods which do not have this set yet. This is called Late Initialization. (Read more about [status](https://github.com/kubernetes/kubernetes/tree/{{page.githubbranch}}/docs/devel/api-conventions.md#spec-and-status) and [late initialization](https://github.com/kubernetes/kubernetes/tree/{{page.githubbranch}}/docs/devel/api-conventions.md#late-initialization)).
|
||||
- Some fields are set to default values. Some defaults vary by cluster and some are fixed for the API at a certain version. (Read more about [defaulting](https://github.com/kubernetes/kubernetes/tree/{{page.githubbranch}}/docs/devel/api-conventions.md#defaulting)).
|
||||
- For example, `spec.containers[0].imagePullPolicy` always defaults to `IfNotPresent` in api v1.
|
||||
- For example, `spec.containers[0].resources.limits.cpu` may be defaulted to `100m` on some clusters, to some other value on others, and not defaulted at all on others.
|
||||
|
|
|
@ -0,0 +1,42 @@
|
|||
---
|
||||
layout: docwithnav
|
||||
---
|
||||
|
||||
<!-- BEGIN: Gotta keep this section JS/HTML because it swaps out content dynamically -->
|
||||
<p> </p>
|
||||
<script language="JavaScript">
|
||||
var forwarding=window.location.hash.replace("#","");
|
||||
$( document ).ready(function() {
|
||||
if(forwarding) {
|
||||
$("#generalInstructions").hide();
|
||||
$("#continueEdit").show();
|
||||
$("#continueEditButton").text("Edit " + forwarding);
|
||||
$("#continueEditButton").attr("href", "https://github.com/kubernetes/kubernetes.github.io/edit/master/" + forwarding)
|
||||
} else {
|
||||
$("#generalInstructions").show();
|
||||
$("#continueEdit").hide();
|
||||
}
|
||||
});
|
||||
</script>
|
||||
<div id="continueEdit">
|
||||
|
||||
<h2>Continue your edit</h2>
|
||||
|
||||
<p>Click the below link to edit the page you were just on. When you are done, press "Commit Changes" at the bottom of the screen. This will create a copy of our site on your GitHub account called a "fork." You can make other changes in your fork after it is created, if you want. When you are ready to send us all your changes, go to the index page for your fork and click "New Pull Request" to let us know about it.</p>
|
||||
|
||||
<p><a id="continueEditButton" class="button"></a></p>
|
||||
|
||||
</div>
|
||||
<div id="generalInstructions">
|
||||
|
||||
<h2>Edit our site in the cloud</h2>
|
||||
|
||||
<p>Click the below button to visit the repo for our site. You can then click the "Fork" button in the upper-right area of the screen to create a copy of our site on your GitHub account called a "fork." Make any changes you want in your fork, and when you are ready to send those changes to us, go to the index page for your fork and click "New Pull Request" to let us know about it.</p>
|
||||
|
||||
<p><a class="button" href="https://github.com/kubernetes/kubernetes.github.io/">Browse this site's source code</a></p>
|
||||
|
||||
</div>
|
||||
<!-- END: Dynamic section -->
|
||||
|
||||
|
||||
{% include_relative README.md %}
|
After Width: | Height: | Size: 6.8 KiB |
After Width: | Height: | Size: 7.3 KiB |
Before Width: | Height: | Size: 13 KiB After Width: | Height: | Size: 12 KiB |
After Width: | Height: | Size: 5.1 KiB |
After Width: | Height: | Size: 9.5 KiB |
After Width: | Height: | Size: 17 KiB |
After Width: | Height: | Size: 10 KiB |
After Width: | Height: | Size: 4.2 KiB |
|
@ -127,11 +127,11 @@ func validateObject(obj runtime.Object) (errors field.ErrorList) {
|
|||
t.Namespace = api.NamespaceDefault
|
||||
}
|
||||
errors = expvalidation.ValidateDaemonSet(t)
|
||||
case *batch.ScheduledJob:
|
||||
case *batch.CronJob:
|
||||
if t.Namespace == "" {
|
||||
t.Namespace = api.NamespaceDefault
|
||||
}
|
||||
errors = batch_validation.ValidateScheduledJob(t)
|
||||
errors = batch_validation.ValidateCronJob(t)
|
||||
default:
|
||||
errors = field.ErrorList{}
|
||||
errors = append(errors, field.InternalError(field.NewPath(""), fmt.Errorf("no validation defined for %#v", obj)))
|
||||
|
@ -242,7 +242,7 @@ func TestExampleObjectSchemas(t *testing.T) {
|
|||
"redis-resource-deployment": &extensions.Deployment{},
|
||||
"redis-secret-deployment": &extensions.Deployment{},
|
||||
"run-my-nginx": &extensions.Deployment{},
|
||||
"sj": &batch.ScheduledJob{},
|
||||
"sj": &batch.CronJob{},
|
||||
},
|
||||
"../docs/admin": {
|
||||
"daemon": &extensions.DaemonSet{},
|
||||
|
@ -272,7 +272,7 @@ func TestExampleObjectSchemas(t *testing.T) {
|
|||
"../docs/user-guide/node-selection": {
|
||||
"pod": &api.Pod{},
|
||||
"pod-with-node-affinity": &api.Pod{},
|
||||
"pod-with-pod-affinity": &api.Pod{},
|
||||
"pod-with-pod-affinity": &api.Pod{},
|
||||
},
|
||||
"../docs/admin/resourcequota": {
|
||||
"best-effort": &api.ResourceQuota{},
|
||||
|
|