Merge branch 'master' into kubefed-fixes
commit
c3ec5e82cc
1
404.md
1
404.md
|
@ -3,6 +3,7 @@ layout: docwithnav
|
|||
title: 404 Error!
|
||||
permalink: /404.html
|
||||
no_canonical: true
|
||||
sitemap: false
|
||||
---
|
||||
|
||||
<script src="/js/redirects.js"></script>
|
||||
|
|
19
Gemfile
19
Gemfile
|
@ -1,20 +1,3 @@
|
|||
source "https://rubygems.org"
|
||||
|
||||
gem "jekyll", "3.2.1"
|
||||
gem "jekyll-sass-converter", "1.3.0"
|
||||
gem "minima", "1.1.0"
|
||||
gem "kramdown", "1.11.1"
|
||||
gem "liquid", "3.0.6"
|
||||
gem "rouge", "1.11.1"
|
||||
gem "jemoji", "0.7.0"
|
||||
gem "jekyll-mentions", "1.2.0"
|
||||
gem "jekyll-redirect-from", "0.11.0"
|
||||
gem "jekyll-sitemap", "0.10.0"
|
||||
gem "jekyll-feed", "0.5.1"
|
||||
gem "jekyll-gist", "1.4.0"
|
||||
gem "jekyll-paginate", "1.1.0"
|
||||
gem "jekyll-coffeescript", "1.0.1"
|
||||
gem "jekyll-seo-tag", "2.0.0"
|
||||
gem "jekyll-github-metadata", "2.0.2"
|
||||
gem "listen", "3.0.6"
|
||||
gem "activesupport", "4.2.7"
|
||||
gem "github-pages", group: :jekyll_plugins
|
||||
|
|
125
Gemfile.lock
125
Gemfile.lock
|
@ -7,23 +7,63 @@ GEM
|
|||
minitest (~> 5.1)
|
||||
thread_safe (~> 0.3, >= 0.3.4)
|
||||
tzinfo (~> 1.1)
|
||||
addressable (2.4.0)
|
||||
addressable (2.5.0)
|
||||
public_suffix (~> 2.0, >= 2.0.2)
|
||||
coffee-script (2.4.1)
|
||||
coffee-script-source
|
||||
execjs
|
||||
coffee-script-source (1.10.0)
|
||||
coffee-script-source (1.11.1)
|
||||
colorator (1.1.0)
|
||||
ethon (0.10.1)
|
||||
ffi (>= 1.3.0)
|
||||
execjs (2.7.0)
|
||||
faraday (0.9.2)
|
||||
faraday (0.10.0)
|
||||
multipart-post (>= 1.2, < 3)
|
||||
ffi (1.9.14)
|
||||
forwardable-extended (2.6.0)
|
||||
gemoji (2.1.0)
|
||||
github-pages (109)
|
||||
activesupport (= 4.2.7)
|
||||
github-pages-health-check (= 1.3.0)
|
||||
jekyll (= 3.3.1)
|
||||
jekyll-avatar (= 0.4.2)
|
||||
jekyll-coffeescript (= 1.0.1)
|
||||
jekyll-default-layout (= 0.1.4)
|
||||
jekyll-feed (= 0.8.0)
|
||||
jekyll-gist (= 1.4.0)
|
||||
jekyll-github-metadata (= 2.2.0)
|
||||
jekyll-mentions (= 1.2.0)
|
||||
jekyll-optional-front-matter (= 0.1.2)
|
||||
jekyll-paginate (= 1.1.0)
|
||||
jekyll-readme-index (= 0.0.3)
|
||||
jekyll-redirect-from (= 0.11.0)
|
||||
jekyll-relative-links (= 0.2.1)
|
||||
jekyll-sass-converter (= 1.3.0)
|
||||
jekyll-seo-tag (= 2.1.0)
|
||||
jekyll-sitemap (= 0.12.0)
|
||||
jekyll-swiss (= 0.4.0)
|
||||
jekyll-theme-primer (= 0.1.1)
|
||||
jekyll-titles-from-headings (= 0.1.2)
|
||||
jemoji (= 0.7.0)
|
||||
kramdown (= 1.11.1)
|
||||
liquid (= 3.0.6)
|
||||
listen (= 3.0.6)
|
||||
mercenary (~> 0.3)
|
||||
minima (= 2.0.0)
|
||||
rouge (= 1.11.1)
|
||||
terminal-table (~> 1.4)
|
||||
github-pages-health-check (1.3.0)
|
||||
addressable (~> 2.3)
|
||||
net-dns (~> 0.8)
|
||||
octokit (~> 4.0)
|
||||
public_suffix (~> 2.0)
|
||||
typhoeus (~> 0.7)
|
||||
html-pipeline (2.4.2)
|
||||
activesupport (>= 2)
|
||||
nokogiri (>= 1.4)
|
||||
i18n (0.7.0)
|
||||
jekyll (3.2.1)
|
||||
jekyll (3.3.1)
|
||||
addressable (~> 2.4)
|
||||
colorator (~> 1.0)
|
||||
jekyll-sass-converter (~> 1.0)
|
||||
jekyll-watch (~> 1.1)
|
||||
|
@ -33,26 +73,43 @@ GEM
|
|||
pathutil (~> 0.9)
|
||||
rouge (~> 1.7)
|
||||
safe_yaml (~> 1.0)
|
||||
jekyll-avatar (0.4.2)
|
||||
jekyll (~> 3.0)
|
||||
jekyll-coffeescript (1.0.1)
|
||||
coffee-script (~> 2.2)
|
||||
jekyll-feed (0.5.1)
|
||||
jekyll-default-layout (0.1.4)
|
||||
jekyll (~> 3.0)
|
||||
jekyll-feed (0.8.0)
|
||||
jekyll (~> 3.3)
|
||||
jekyll-gist (1.4.0)
|
||||
octokit (~> 4.2)
|
||||
jekyll-github-metadata (2.0.2)
|
||||
jekyll-github-metadata (2.2.0)
|
||||
jekyll (~> 3.1)
|
||||
octokit (~> 4.0)
|
||||
octokit (~> 4.0, != 4.4.0)
|
||||
jekyll-mentions (1.2.0)
|
||||
activesupport (~> 4.0)
|
||||
html-pipeline (~> 2.3)
|
||||
jekyll (~> 3.0)
|
||||
jekyll-optional-front-matter (0.1.2)
|
||||
jekyll (~> 3.0)
|
||||
jekyll-paginate (1.1.0)
|
||||
jekyll-readme-index (0.0.3)
|
||||
jekyll (~> 3.0)
|
||||
jekyll-redirect-from (0.11.0)
|
||||
jekyll (>= 2.0)
|
||||
jekyll-relative-links (0.2.1)
|
||||
jekyll (~> 3.3)
|
||||
jekyll-sass-converter (1.3.0)
|
||||
sass (~> 3.2)
|
||||
jekyll-seo-tag (2.0.0)
|
||||
jekyll (~> 3.1)
|
||||
jekyll-sitemap (0.10.0)
|
||||
jekyll-seo-tag (2.1.0)
|
||||
jekyll (~> 3.3)
|
||||
jekyll-sitemap (0.12.0)
|
||||
jekyll (~> 3.3)
|
||||
jekyll-swiss (0.4.0)
|
||||
jekyll-theme-primer (0.1.1)
|
||||
jekyll (~> 3.3)
|
||||
jekyll-titles-from-headings (0.1.2)
|
||||
jekyll (~> 3.3)
|
||||
jekyll-watch (1.5.0)
|
||||
listen (~> 3.0, < 3.1)
|
||||
jemoji (0.7.0)
|
||||
|
@ -68,52 +125,40 @@ GEM
|
|||
rb-inotify (>= 0.9.7)
|
||||
mercenary (0.3.6)
|
||||
mini_portile2 (2.1.0)
|
||||
minima (1.1.0)
|
||||
minitest (5.9.0)
|
||||
minima (2.0.0)
|
||||
minitest (5.10.1)
|
||||
multipart-post (2.0.0)
|
||||
nokogiri (1.6.8)
|
||||
net-dns (0.8.0)
|
||||
nokogiri (1.6.8.1)
|
||||
mini_portile2 (~> 2.1.0)
|
||||
pkg-config (~> 1.1.7)
|
||||
octokit (4.3.0)
|
||||
sawyer (~> 0.7.0, >= 0.5.3)
|
||||
octokit (4.6.2)
|
||||
sawyer (~> 0.8.0, >= 0.5.3)
|
||||
pathutil (0.14.0)
|
||||
forwardable-extended (~> 2.6)
|
||||
pkg-config (1.1.7)
|
||||
rb-fsevent (0.9.7)
|
||||
public_suffix (2.0.4)
|
||||
rb-fsevent (0.9.8)
|
||||
rb-inotify (0.9.7)
|
||||
ffi (>= 0.5.0)
|
||||
rouge (1.11.1)
|
||||
safe_yaml (1.0.4)
|
||||
sass (3.4.22)
|
||||
sawyer (0.7.0)
|
||||
addressable (>= 2.3.5, < 2.5)
|
||||
faraday (~> 0.8, < 0.10)
|
||||
sawyer (0.8.1)
|
||||
addressable (>= 2.3.5, < 2.6)
|
||||
faraday (~> 0.8, < 1.0)
|
||||
terminal-table (1.7.3)
|
||||
unicode-display_width (~> 1.1.1)
|
||||
thread_safe (0.3.5)
|
||||
typhoeus (0.8.0)
|
||||
ethon (>= 0.8.0)
|
||||
tzinfo (1.2.2)
|
||||
thread_safe (~> 0.1)
|
||||
unicode-display_width (1.1.2)
|
||||
|
||||
PLATFORMS
|
||||
ruby
|
||||
|
||||
DEPENDENCIES
|
||||
activesupport (= 4.2.7)
|
||||
jekyll (= 3.2.1)
|
||||
jekyll-coffeescript (= 1.0.1)
|
||||
jekyll-feed (= 0.5.1)
|
||||
jekyll-gist (= 1.4.0)
|
||||
jekyll-github-metadata (= 2.0.2)
|
||||
jekyll-mentions (= 1.2.0)
|
||||
jekyll-paginate (= 1.1.0)
|
||||
jekyll-redirect-from (= 0.11.0)
|
||||
jekyll-sass-converter (= 1.3.0)
|
||||
jekyll-seo-tag (= 2.0.0)
|
||||
jekyll-sitemap (= 0.10.0)
|
||||
jemoji (= 0.7.0)
|
||||
kramdown (= 1.11.1)
|
||||
liquid (= 3.0.6)
|
||||
listen (= 3.0.6)
|
||||
minima (= 1.1.0)
|
||||
rouge (= 1.11.1)
|
||||
github-pages
|
||||
|
||||
BUNDLED WITH
|
||||
1.11.2
|
||||
1.13.6
|
||||
|
|
11
_config.yml
11
_config.yml
|
@ -1,11 +1,12 @@
|
|||
name: Kubernetes
|
||||
title: Kubernetes
|
||||
description: Production-Grade Container Orchestration
|
||||
markdown: kramdown
|
||||
kramdown:
|
||||
input: GFM
|
||||
html_to_native: true
|
||||
hard_wrap: false
|
||||
syntax_highlighter: rouge
|
||||
baseurl: /
|
||||
incremental: true
|
||||
|
||||
safe: false
|
||||
|
@ -30,3 +31,11 @@ permalink: pretty
|
|||
|
||||
gems:
|
||||
- jekyll-redirect-from
|
||||
- jekyll-feed
|
||||
- jekyll-sitemap
|
||||
- jekyll-seo-tag
|
||||
|
||||
# SEO
|
||||
logo: /images/favicon.png
|
||||
twitter:
|
||||
username: kubernetesio
|
||||
|
|
|
@ -14,6 +14,8 @@ toc:
|
|||
path: /docs/tasks/configure-pod-container/assign-cpu-ram-container/
|
||||
- title: Configuring a Pod to Use a Volume for Storage
|
||||
path: /docs/tasks/configure-pod-container/configure-volume-storage/
|
||||
- title: Distributing Credentials Securely
|
||||
path: /docs/tasks/configure-pod-container/distribute-credentials-secure/
|
||||
|
||||
- title: Accessing Applications in a Cluster
|
||||
section:
|
||||
|
@ -34,6 +36,7 @@ toc:
|
|||
section:
|
||||
- title: Assigning Pods to Nodes
|
||||
path: /docs/tasks/administer-cluster/assign-pods-nodes/
|
||||
|
||||
- title: Autoscaling the DNS Service in a Cluster
|
||||
path: /docs/tasks/administer-cluster/dns-horizontal-autoscaling/
|
||||
- title: Safely Draining a Node while Respecting Application SLOs
|
||||
|
|
|
@ -58,4 +58,6 @@ toc:
|
|||
- title: Running a Single-Instance Stateful Application
|
||||
path: /docs/tutorials/stateful-application/run-stateful-application/
|
||||
- title: Running a Replicated Stateful Application
|
||||
path: /docs/tutorials/stateful-application/run-replicated-stateful-application/
|
||||
path: /docs/tutorials/stateful-application/run-replicated-stateful-application/
|
||||
- title: Running ZooKeeper, A CP Distributed System
|
||||
path: /docs/tutorials/stateful-application/zookeeper/
|
||||
|
|
|
@ -2,7 +2,6 @@
|
|||
<head>
|
||||
<meta charset="utf-8">
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1">
|
||||
{% if !page.no_canonical %}<link rel="canonical" href="http://kubernetes.io{{page.url}}" />{% endif %}
|
||||
<link rel="shortcut icon" type="image/png" href="/images/favicon.png">
|
||||
<link href='https://fonts.googleapis.com/css?family=Roboto:400,100,100italic,300,300italic,400italic,500,500italic,700,700italic,900,900italic' rel='stylesheet' type='text/css'>
|
||||
<link rel="stylesheet" href='https://fonts.googleapis.com/css?family=Roboto+Mono' type='text/css'>
|
||||
|
@ -14,7 +13,7 @@
|
|||
<script src="/js/script.js"></script>
|
||||
<script src="/js/sweetalert.min.js"></script>
|
||||
<script src="/js/bootstrap.min.js"></script>
|
||||
<title>Kubernetes - {{ title }}</title>
|
||||
{% seo %}
|
||||
</head>
|
||||
<body>
|
||||
<div id="cellophane" onclick="kub.toggleMenu()"></div>
|
||||
|
|
|
@ -712,7 +712,6 @@ dd
|
|||
font-weight: 500
|
||||
margin-bottom: 30px
|
||||
padding-bottom: 10px
|
||||
border-bottom: 1px solid #cccccc
|
||||
|
||||
// Make sure anchor links aren't hidden by the header
|
||||
&:before
|
||||
|
@ -722,6 +721,9 @@ dd
|
|||
height: $header-clearance
|
||||
visibility: hidden
|
||||
|
||||
h1,h2
|
||||
border-bottom: 1px solid #cccccc
|
||||
|
||||
h1
|
||||
font-size: 32px
|
||||
padding-right: 60px
|
||||
|
@ -731,9 +733,12 @@ dd
|
|||
|
||||
h3
|
||||
font-size: 24px
|
||||
font-weight: 300
|
||||
margin-bottom: 5px
|
||||
|
||||
h4
|
||||
font-size: 20px
|
||||
margin-bottom: 0px
|
||||
|
||||
h5, h6
|
||||
font-size: 16px
|
||||
|
|
|
@ -74,7 +74,7 @@ a node for testing.
|
|||
|
||||
If you specify a `.spec.template.spec.nodeSelector`, then the DaemonSet controller will
|
||||
create pods on nodes which match that [node
|
||||
selector](https://github.com/kubernetes/kubernetes.github.io/tree/{{page.docsbranch}}/docs/user-guide/node-selection).
|
||||
selector](/docs/user-guide/node-selection/).
|
||||
If you specify a `scheduler.alpha.kubernetes.io/affinity` annotation in `.spec.template.metadata.annotations`,
|
||||
then DaemonSet controller will create pods on nodes which match that [node affinity](../../user-guide/node-selection/#alpha-feature-in-kubernetes-v12-node-affinity).
|
||||
|
||||
|
|
|
@ -308,11 +308,11 @@ Minikube uses [libmachine](https://github.com/docker/machine/tree/master/libmach
|
|||
For more information about minikube, see the [proposal](https://github.com/kubernetes/kubernetes/blob/master/docs/proposals/local-cluster-ux.md).
|
||||
|
||||
## Additional Links:
|
||||
* **Goals and Non-Goals**: For the goals and non-goals of the minikube project, please see our [roadmap](./ROADMAP.md).
|
||||
* **Development Guide**: See [CONTRIBUTING.md](./CONTRIBUTING.md) for an overview of how to send pull requests.
|
||||
* **Building Minikube**: For instructions on how to build/test minikube from source, see the [build guide](./BUILD_GUIDE.md)
|
||||
* **Adding a New Dependency**: For instructions on how to add a new dependency to minikube see the [adding dependencies guide](./ADD_DEPENDENCY.md)
|
||||
* **Updating Kubernetes**: For instructions on how to add a new dependency to minikube see the [updating kubernetes guide](./UPDATE_KUBERNETES.md)
|
||||
* **Goals and Non-Goals**: For the goals and non-goals of the minikube project, please see our [roadmap](https://github.com/kubernetes/minikube/blob/master/ROADMAP.md).
|
||||
* **Development Guide**: See [CONTRIBUTING.md](https://github.com/kubernetes/minikube/blob/master/CONTRIBUTING.md) for an overview of how to send pull requests.
|
||||
* **Building Minikube**: For instructions on how to build/test minikube from source, see the [build guide](https://github.com/kubernetes/minikube/blob/master/BUILD_GUIDE.md)
|
||||
* **Adding a New Dependency**: For instructions on how to add a new dependency to minikube see the [adding dependencies guide](https://github.com/kubernetes/minikube/blob/master/ADD_DEPENDENCY.md)
|
||||
* **Updating Kubernetes**: For instructions on how to add a new dependency to minikube see the [updating kubernetes guide](https://github.com/kubernetes/minikube/blob/master/UPDATE_KUBERNETES.md)
|
||||
|
||||
## Community
|
||||
|
||||
|
|
|
@ -0,0 +1,170 @@
|
|||
---
|
||||
---
|
||||
|
||||
{% capture overview %}
|
||||
This page shows how to securely inject sensitive data, such as passwords and
|
||||
encryption keys, into Pods.
|
||||
{% endcapture %}
|
||||
|
||||
{% capture prerequisites %}
|
||||
|
||||
{% include task-tutorial-prereqs.md %}
|
||||
|
||||
{% endcapture %}
|
||||
|
||||
{% capture steps %}
|
||||
|
||||
### Converting your secret data to a base-64 representation
|
||||
|
||||
Suppose you want to have two pieces of secret data: a username `my-app` and a password
|
||||
`39528$vdg7Jb`. First, use [Base64 encoding](https://www.base64encode.org/) to
|
||||
convert your username and password to a base-64 representation. Here's a Linux
|
||||
example:
|
||||
|
||||
echo 'my-app' | base64
|
||||
echo '39528$vdg7Jb' | base64
|
||||
|
||||
The output shows that the base-64 representation of your username is `bXktYXBwCg==`,
|
||||
and the base-64 representation of your password is `Mzk1MjgkdmRnN0piCg==`.
|
||||
|
||||
### Creating a Secret
|
||||
|
||||
Here is a configuration file you can use to create a Secret that holds your
|
||||
username and password:
|
||||
|
||||
{% include code.html language="yaml" file="secret.yaml" ghlink="/docs/tasks/administer-cluster/secret.yaml" %}
|
||||
|
||||
1. Create the Secret
|
||||
|
||||
kubectl create -f http://k8s.io/docs/tasks/administer-cluster/secret.yaml
|
||||
|
||||
**Note:** If you want to skip the Base64 encoding step, you can create a Secret
|
||||
by using the `kubectl create secret` command:
|
||||
|
||||
kubectl create secret generic test-secret --from-literal=username="my-app",password="39528$vdg7Jb"
|
||||
|
||||
1. View information about the Secret:
|
||||
|
||||
kubectl get secret test-secret
|
||||
|
||||
Output:
|
||||
|
||||
NAME TYPE DATA AGE
|
||||
test-secret Opaque 2 1m
|
||||
|
||||
|
||||
1. View more detailed information about the Secret:
|
||||
|
||||
kubectl describe secret test-secret
|
||||
|
||||
Output:
|
||||
|
||||
Name: test-secret
|
||||
Namespace: default
|
||||
Labels: <none>
|
||||
Annotations: <none>
|
||||
|
||||
Type: Opaque
|
||||
|
||||
Data
|
||||
====
|
||||
password: 13 bytes
|
||||
username: 7 bytes
|
||||
|
||||
### Creating a Pod that has access to the secret data through a Volume
|
||||
|
||||
Here is a configuration file you can use to create a Pod:
|
||||
|
||||
{% include code.html language="yaml" file="secret-pod.yaml" ghlink="/docs/tasks/administer-cluster/secret-pod.yaml" %}
|
||||
|
||||
1. Create the Pod:
|
||||
|
||||
kubectl create -f http://k8s.io/docs/tasks/administer-cluster/secret-pod.yaml
|
||||
|
||||
1. Verify that your Pod is running:
|
||||
|
||||
kubectl get pod secret-test-pod
|
||||
|
||||
Output:
|
||||
|
||||
NAME READY STATUS RESTARTS AGE
|
||||
secret-test-pod 1/1 Running 0 42m
|
||||
|
||||
|
||||
1. Get a shell into the Container that is running in your Pod:
|
||||
|
||||
kubectl exec -it secret-test-pod -- /bin/bash
|
||||
|
||||
1. The secret data is exposed to the Container through a Volume mounted under
|
||||
`/etc/secret-volume`. In your shell, go to the directory where the secret data
|
||||
is exposed:
|
||||
|
||||
root@secret-test-pod:/# cd /etc/secret-volume
|
||||
|
||||
1. In your shell, list the files in the `/etc/secret-volume` directory:
|
||||
|
||||
root@secret-test-pod:/etc/secret-volume# ls
|
||||
|
||||
The output shows two files, one for each piece of secret data:
|
||||
|
||||
password username
|
||||
|
||||
1. In your shell, display the contents of the `username` and `password` files:
|
||||
|
||||
root@secret-test-pod:/etc/secret-volume# cat username password
|
||||
|
||||
The output is your username and password:
|
||||
|
||||
my-app
|
||||
39528$vdg7Jb
|
||||
|
||||
### Creating a Pod that has access to the secret data through environment variables
|
||||
|
||||
Here is a configuration file you can use to create a Pod:
|
||||
|
||||
{% include code.html language="yaml" file="secret-envars-pod.yaml" ghlink="/docs/tasks/administer-cluster/secret-envars-pod.yaml" %}
|
||||
|
||||
1. Create the Pod:
|
||||
|
||||
kubectl create -f http://k8s.io/docs/tasks/administer-cluster/secret-envars-pod.yaml
|
||||
|
||||
1. Verify that your Pod is running:
|
||||
|
||||
kubectl get pod secret-envars-test-pod
|
||||
|
||||
Output:
|
||||
|
||||
NAME READY STATUS RESTARTS AGE
|
||||
secret-envars-test-pod 1/1 Running 0 4m
|
||||
|
||||
1. Get a shell into the Container that is running in your Pod:
|
||||
|
||||
kubectl exec -it secret-envars-test-pod -- /bin/bash
|
||||
|
||||
1. In your shell, display the environment variables:
|
||||
|
||||
root@secret-envars-test-pod:/# printenv
|
||||
|
||||
The output includes your username and password:
|
||||
|
||||
...
|
||||
SECRET_USERNAME=my-app
|
||||
...
|
||||
SECRET_PASSWORD=39528$vdg7Jb
|
||||
|
||||
{% endcapture %}
|
||||
|
||||
{% capture whatsnext %}
|
||||
|
||||
* Learn more about [Secrets](/docs/user-guide/secrets/).
|
||||
* Learn about [Volumes](/docs/user-guide/volumes/).
|
||||
|
||||
#### Reference
|
||||
|
||||
* [Secret](docs/api-reference/v1/definitions/#_v1_secret)
|
||||
* [Volume](docs/api-reference/v1/definitions/#_v1_volume)
|
||||
* [Pod](docs/api-reference/v1/definitions/#_v1_pod)
|
||||
|
||||
{% endcapture %}
|
||||
|
||||
{% include templates/task.md %}
|
|
@ -0,0 +1,19 @@
|
|||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: secret-envars-test-pod
|
||||
spec:
|
||||
containers:
|
||||
- name: envars-test-container
|
||||
image: nginx
|
||||
env:
|
||||
- name: SECRET_USERNAME
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: test-secret
|
||||
key: username
|
||||
- name: SECRET_PASSWORD
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: test-secret
|
||||
key: password
|
|
@ -0,0 +1,17 @@
|
|||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: secret-test-pod
|
||||
spec:
|
||||
containers:
|
||||
- name: test-container
|
||||
image: nginx
|
||||
volumeMounts:
|
||||
# name must match the volume name below
|
||||
- name: secret-volume
|
||||
mountPath: /etc/secret-volume
|
||||
# The secret data is exposed to Containers in the Pod through a Volume.
|
||||
volumes:
|
||||
- name: secret-volume
|
||||
secret:
|
||||
secretName: test-secret
|
|
@ -0,0 +1,7 @@
|
|||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: test-secret
|
||||
data:
|
||||
username: bXktYXBwCg==
|
||||
password: Mzk1MjgkdmRnN0piCg==
|
|
@ -10,6 +10,7 @@ single thing, typically by giving a short sequence of steps.
|
|||
* [Defining Environment Variables for a Container](/docs/tasks/configure-pod-container/define-environment-variable-container/)
|
||||
* [Defining a Command and Arguments for a Container](/docs/tasks/configure-pod-container/define-command-argument-container/)
|
||||
* [Assigning CPU and RAM Resources to a Container](/docs/tasks/configure-pod-container/assign-cpu-ram-container/)
|
||||
* [Distributing Credentials Securely](/docs/tasks/configure-pod-container/distribute-credentials-secure/)
|
||||
|
||||
#### Accessing Applications in a Cluster
|
||||
|
||||
|
|
|
@ -26,6 +26,8 @@ each of which has a sequence of steps.
|
|||
|
||||
* [Running a Replicated Stateful Application](/docs/tutorials/stateful-application/run-replicated-stateful-application/)
|
||||
|
||||
* [Running ZooKeeper, A CP Distributed System](/docs/tutorials/stateful-application/zookeeper/)
|
||||
|
||||
### What's next
|
||||
|
||||
If you would like to write a tutorial, see
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,164 @@
|
|||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: zk-headless
|
||||
labels:
|
||||
app: zk-headless
|
||||
spec:
|
||||
ports:
|
||||
- port: 2888
|
||||
name: server
|
||||
- port: 3888
|
||||
name: leader-election
|
||||
clusterIP: None
|
||||
selector:
|
||||
app: zk
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: zk-config
|
||||
data:
|
||||
ensemble: "zk-0;zk-1;zk-2"
|
||||
jvm.heap: "2G"
|
||||
tick: "2000"
|
||||
init: "10"
|
||||
sync: "5"
|
||||
client.cnxns: "60"
|
||||
snap.retain: "3"
|
||||
purge.interval: "1"
|
||||
---
|
||||
apiVersion: policy/v1beta1
|
||||
kind: PodDisruptionBudget
|
||||
metadata:
|
||||
name: zk-budget
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
app: zk
|
||||
minAvailable: 2
|
||||
---
|
||||
apiVersion: apps/v1beta1
|
||||
kind: StatefulSet
|
||||
metadata:
|
||||
name: zk
|
||||
spec:
|
||||
serviceName: zk-headless
|
||||
replicas: 3
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: zk
|
||||
annotations:
|
||||
pod.alpha.kubernetes.io/initialized: "true"
|
||||
scheduler.alpha.kubernetes.io/affinity: >
|
||||
{
|
||||
"podAntiAffinity": {
|
||||
"requiredDuringSchedulingRequiredDuringExecution": [{
|
||||
"labelSelector": {
|
||||
"matchExpressions": [{
|
||||
"key": "app",
|
||||
"operator": "In",
|
||||
"values": ["zk-headless"]
|
||||
}]
|
||||
},
|
||||
"topologyKey": "kubernetes.io/hostname"
|
||||
}]
|
||||
}
|
||||
}
|
||||
spec:
|
||||
containers:
|
||||
- name: k8szk
|
||||
imagePullPolicy: Always
|
||||
image: gcr.io/google_samples/k8szk:v1
|
||||
resources:
|
||||
requests:
|
||||
memory: "4Gi"
|
||||
cpu: "1"
|
||||
ports:
|
||||
- containerPort: 2181
|
||||
name: client
|
||||
- containerPort: 2888
|
||||
name: server
|
||||
- containerPort: 3888
|
||||
name: leader-election
|
||||
env:
|
||||
- name : ZK_ENSEMBLE
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: zk-config
|
||||
key: ensemble
|
||||
- name : ZK_HEAP_SIZE
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: zk-config
|
||||
key: jvm.heap
|
||||
- name : ZK_TICK_TIME
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: zk-config
|
||||
key: tick
|
||||
- name : ZK_INIT_LIMIT
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: zk-config
|
||||
key: init
|
||||
- name : ZK_SYNC_LIMIT
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: zk-config
|
||||
key: tick
|
||||
- name : ZK_MAX_CLIENT_CNXNS
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: zk-config
|
||||
key: client.cnxns
|
||||
- name: ZK_SNAP_RETAIN_COUNT
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: zk-config
|
||||
key: snap.retain
|
||||
- name: ZK_PURGE_INTERVAL
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: zk-config
|
||||
key: purge.interval
|
||||
- name: ZK_CLIENT_PORT
|
||||
value: "2181"
|
||||
- name: ZK_SERVER_PORT
|
||||
value: "2888"
|
||||
- name: ZK_ELECTION_PORT
|
||||
value: "3888"
|
||||
command:
|
||||
- sh
|
||||
- -c
|
||||
- zkGenConfig.sh && zkServer.sh start-foreground
|
||||
readinessProbe:
|
||||
exec:
|
||||
command:
|
||||
- "zkOk.sh"
|
||||
initialDelaySeconds: 15
|
||||
timeoutSeconds: 5
|
||||
livenessProbe:
|
||||
exec:
|
||||
command:
|
||||
- "zkOk.sh"
|
||||
initialDelaySeconds: 15
|
||||
timeoutSeconds: 5
|
||||
volumeMounts:
|
||||
- name: datadir
|
||||
mountPath: /var/lib/zookeeper
|
||||
securityContext:
|
||||
runAsUser: 1000
|
||||
fsGroup: 1000
|
||||
volumeClaimTemplates:
|
||||
- metadata:
|
||||
name: datadir
|
||||
annotations:
|
||||
volume.alpha.kubernetes.io/storage-class: anything
|
||||
spec:
|
||||
accessModes: [ "ReadWriteOnce" ]
|
||||
resources:
|
||||
requests:
|
||||
storage: 20Gi
|
|
@ -395,6 +395,75 @@ Events:
|
|||
You can set `.spec.revisionHistoryLimit` field to specify how much revision history of this deployment you want to keep. By default,
|
||||
all revision history will be kept; explicitly setting this field to `0` disallows a deployment being rolled back.
|
||||
|
||||
## Scaling a Deployment
|
||||
|
||||
You can scale a Deployment by using the following command:
|
||||
|
||||
```shell
|
||||
$ kubectl scale deployment nginx-deployment --replicas 10
|
||||
deployment "nginx-deployment" scaled
|
||||
```
|
||||
|
||||
Assuming [horizontal pod autoscaling](/docs/user-guide/horizontal-pod-autoscaling/walkthrough.md) is enabled
|
||||
in your cluster, you can setup an autoscaler for your Deployment and choose the minimum and maximum number of
|
||||
Pods you want to run based on the CPU utilization of your existing Pods.
|
||||
|
||||
```shell
|
||||
$ kubectl autoscale deployment nginx-deployment --min=10 --max=15 --cpu-percent=80
|
||||
deployment "nginx-deployment" autoscaled
|
||||
```
|
||||
|
||||
RollingUpdate Deployments support running multitple versions of an application at the same time. When you
|
||||
or an autoscaler scales a RollingUpdate Deployment that is in the middle of a rollout (either in progress
|
||||
or paused), then the Deployment controller will balance the additional replicas in the existing active
|
||||
ReplicaSets (ReplicaSets with Pods) in order to mitigate risk. This is called *proportional scaling*.
|
||||
|
||||
For example, you are running a Deployment with 10 replicas, [maxSurge](#max-surge)=3, and [maxUnavailable](#max-unavailable)=2.
|
||||
|
||||
```shell
|
||||
$ kubectl get deploy
|
||||
NAME DESIRED CURRENT UP-TO-DATE AVAILABLE AGE
|
||||
nginx-deployment 10 10 10 10 50s
|
||||
```
|
||||
|
||||
You update to a new image which happens to be unresolvable from inside the cluster.
|
||||
|
||||
```shell
|
||||
$ kubectl set image deploy/nginx-deployment nginx=nginx:sometag
|
||||
deployment "nginx-deployment" image updated
|
||||
```
|
||||
|
||||
The image update starts a new rollout with ReplicaSet nginx-deployment-1989198191 but it's blocked due to the
|
||||
maxUnavailable requirement that we mentioned above.
|
||||
|
||||
```shell
|
||||
$ kubectl get rs
|
||||
NAME DESIRED CURRENT READY AGE
|
||||
nginx-deployment-1989198191 5 5 0 9s
|
||||
nginx-deployment-618515232 8 8 8 1m
|
||||
```
|
||||
|
||||
Then a new scaling request for the Deployment comes along. The autoscaler increments the Deployment replicas
|
||||
to 15. The Deployment controller needs to decide where to add these new 5 replicas. If we weren't using
|
||||
proportional scaling, all 5 of them would be added in the new ReplicaSet. With proportional scaling, we
|
||||
spread the additional replicas across all ReplicaSets. Bigger proportions go to the ReplicaSets with the
|
||||
most replicas and lower proportions go to ReplicaSets with less replicas. Any leftovers are added to the
|
||||
ReplicaSet with the most replicas. ReplicaSets with zero replicas are not scaled up.
|
||||
|
||||
In our example above, 3 replicas will be added to the old ReplicaSet and 2 replicas will be added to the
|
||||
new ReplicaSet. The rollout process should eventually move all replicas to the new ReplicaSet, assuming
|
||||
the new replicas become healthy.
|
||||
|
||||
```shell
|
||||
$ kubectl get deploy
|
||||
NAME DESIRED CURRENT UP-TO-DATE AVAILABLE AGE
|
||||
nginx-deployment 15 18 7 8 7m
|
||||
$ kubectl get rs
|
||||
NAME DESIRED CURRENT READY AGE
|
||||
nginx-deployment-1989198191 7 7 0 7m
|
||||
nginx-deployment-618515232 11 11 11 7m
|
||||
```
|
||||
|
||||
## Pausing and Resuming a Deployment
|
||||
|
||||
You can also pause a Deployment mid-way and then resume it. A use case is to support canary deployment.
|
||||
|
|
29
feed.xml
29
feed.xml
|
@ -1,29 +0,0 @@
|
|||
---
|
||||
---
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<rss version="2.0" xmlns:atom="http://www.w3.org/2005/Atom">
|
||||
<channel>
|
||||
<title>{{ site.title | xml_escape }}</title>
|
||||
<description>{{ site.description | xml_escape }}</description>
|
||||
<link>{{ site.url }}{{ site.baseurl }}/</link>
|
||||
<atom:link href="{{ "/feed.xml" | prepend: site.baseurl | prepend: site.url }}" rel="self" type="application/rss+xml" />
|
||||
<pubDate>{{ site.time | date_to_rfc822 }}</pubDate>
|
||||
<lastBuildDate>{{ site.time | date_to_rfc822 }}</lastBuildDate>
|
||||
<generator>Jekyll v{{ jekyll.version }}</generator>
|
||||
{% for post in site.posts limit:10 %}
|
||||
<item>
|
||||
<title>{{ post.title | xml_escape }}</title>
|
||||
<description>{{ post.content | xml_escape }}</description>
|
||||
<pubDate>{{ post.date | date_to_rfc822 }}</pubDate>
|
||||
<link>{{ post.url | prepend: site.baseurl | prepend: site.url }}</link>
|
||||
<guid isPermaLink="true">{{ post.url | prepend: site.baseurl | prepend: site.url }}</guid>
|
||||
{% for tag in post.tags %}
|
||||
<category>{{ tag | xml_escape }}</category>
|
||||
{% endfor %}
|
||||
{% for cat in post.categories %}
|
||||
<category>{{ cat | xml_escape }}</category>
|
||||
{% endfor %}
|
||||
</item>
|
||||
{% endfor %}
|
||||
</channel>
|
||||
</rss>
|
10
index.html
10
index.html
|
@ -1,9 +1,8 @@
|
|||
---
|
||||
title: Production-Grade Container Orchestration
|
||||
---
|
||||
|
||||
<!Doctype html>
|
||||
<html id="home">
|
||||
<html id="home">
|
||||
|
||||
{% include head-header.html %}
|
||||
|
||||
|
@ -22,7 +21,7 @@ title: Production-Grade Container Orchestration
|
|||
<!--<img src="images/logos/redhat_logo.png">-->
|
||||
<!--<img src="images/logos/wepay_logo.png">-->
|
||||
<!--<img src="images/logos/verizon_logo.png">-->
|
||||
<!--</div>-->
|
||||
<!--</div>-->
|
||||
</section>
|
||||
|
||||
<!-- OCEAN NODES -->
|
||||
|
@ -110,7 +109,7 @@ title: Production-Grade Container Orchestration
|
|||
exposing secrets in your stack configuration.</p>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
|
||||
<div class="feature-box">
|
||||
<div>
|
||||
<h4><a href="/docs/user-guide/persistent-volumes/">Storage orchestration</a></h4>
|
||||
|
@ -198,9 +197,8 @@ title: Production-Grade Container Orchestration
|
|||
ga('create', 'UA-36037335-10', 'auto');
|
||||
ga('send', 'pageview');
|
||||
</script>
|
||||
<!-- Start of AnswerDash script
|
||||
<!-- Start of AnswerDash script
|
||||
<script>var AnswerDash;!function(e,t,n,s,a){if(!t.getElementById(s)){var i,r=t.createElement(n),c=t.getElementsByTagName(n)[0];e[a]||(i=e[a]=function(){i.__oninit.push(arguments)},i.__oninit=[]),r.type="text/javascript",r.async=!0,r.src="https://p1.answerdash.com/answerdash.min.js?siteid=756",r.setAttribute("id",s),c.parentNode.insertBefore(r,c)}}(window,document,"script","answerdash-script","AnswerDash");</script>
|
||||
<!-- End of AnswerDash script -->
|
||||
</body>
|
||||
</html>
|
||||
|
||||
|
|
18
sitemap.xml
18
sitemap.xml
|
@ -1,18 +0,0 @@
|
|||
---
|
||||
---
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<urlset
|
||||
xmlns="http://www.sitemaps.org/schemas/sitemap/0.9"
|
||||
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
|
||||
xsi:schemaLocation="http://www.sitemaps.org/schemas/sitemap/0.9
|
||||
http://www.sitemaps.org/schemas/sitemap/0.9/sitemap.xsd">
|
||||
|
||||
<url>
|
||||
<loc>http://kubernetes.io/</loc>
|
||||
<lastmod>{{ site.time | date_to_xmlschema }}</lastmod>
|
||||
</url>
|
||||
{% for page in site.pages %}{% if page.url != "/404.html" and page.url != "/sitemap.xml" and page.url != "/css/styles.css" %}<url>
|
||||
<loc>http://kubernetes.io{{ page.url }}</loc>
|
||||
<lastmod>{% if page.date %}{{ page.date | date_to_xmlschema }}{% else %}{{ site.time | date_to_xmlschema }}{% endif %}</lastmod>
|
||||
</url>{% endif %}{% endfor %}
|
||||
</urlset>
|
|
@ -38,6 +38,8 @@ import (
|
|||
"k8s.io/kubernetes/pkg/apis/extensions"
|
||||
expvalidation "k8s.io/kubernetes/pkg/apis/extensions/validation"
|
||||
"k8s.io/kubernetes/pkg/capabilities"
|
||||
"k8s.io/kubernetes/pkg/apis/policy"
|
||||
policyvalidation "k8s.io/kubernetes/pkg/apis/policy/validation"
|
||||
"k8s.io/kubernetes/pkg/registry/batch/job"
|
||||
"k8s.io/kubernetes/pkg/runtime"
|
||||
"k8s.io/kubernetes/pkg/types"
|
||||
|
@ -147,6 +149,11 @@ func validateObject(obj runtime.Object) (errors field.ErrorList) {
|
|||
t.Namespace = api.NamespaceDefault
|
||||
}
|
||||
errors = apps_validation.ValidateStatefulSet(t)
|
||||
case *policy.PodDisruptionBudget:
|
||||
if t.Namespace == "" {
|
||||
t.Namespace = api.NamespaceDefault
|
||||
}
|
||||
errors = policyvalidation.ValidatePodDisruptionBudget(t)
|
||||
default:
|
||||
errors = field.ErrorList{}
|
||||
errors = append(errors, field.InternalError(field.NewPath(""), fmt.Errorf("no validation defined for %#v", obj)))
|
||||
|
@ -323,6 +330,7 @@ func TestExampleObjectSchemas(t *testing.T) {
|
|||
"mysql-configmap": {&api.ConfigMap{}},
|
||||
"mysql-statefulset": {&apps.StatefulSet{}},
|
||||
"web": {&api.Service{}, &apps.StatefulSet{}},
|
||||
"zookeeper": {&api.Service{}, &api.ConfigMap{}, &policy.PodDisruptionBudget{}, &apps.StatefulSet{}},
|
||||
},
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in New Issue