diff --git a/404.md b/404.md
index 6090a89a3f4..71497ddcbe1 100644
--- a/404.md
+++ b/404.md
@@ -6,7 +6,7 @@ permalink: /404.html
-
+
+
Kubernetes - {{ title }}
@@ -61,4 +64,4 @@
-
\ No newline at end of file
+
diff --git a/_includes/templates/concept-overview.md b/_includes/templates/concept-overview.md
new file mode 100644
index 00000000000..07a0d14ce04
--- /dev/null
+++ b/_includes/templates/concept-overview.md
@@ -0,0 +1,170 @@
+{% if concept %}
+
+# Overview of {{concept}}s
+
+{% if what_is %}
+
+### What is a {{ concept }}?
+
+{{ what_is }}
+
+{% else %}
+
+### ERROR: You must define a "what_is" block
+{: style="color:red" }
+
+This template requires that you explain what this concept is. This explanation will
+be displayed under the heading, **What is a {{ concept }}?**
+
+To get rid of this message and take advantage of this template, define the `what_is`
+variable and populate it with content.
+
+```liquid
+{% raw %}{% capture what_is %}{% endraw %}
+A {{ concept }} does x and y and z...(etc, etc, text goes on)
+{% raw %}{% endcapture %}{% endraw %}
+```
+{% endif %}
+
+
+{% if when_to_use %}
+
+### When to use {{ concept }}s
+
+{{ when_to_use }}
+
+{% else %}
+
+### ERROR: You must define a "when_to_use" block
+{: style="color:red" }
+
+This template requires that you explain when to use this object. This explanation will
+be displayed under the heading, **When to use {{ concept }}s**
+
+To get rid of this message and take advantage of this template, define the `when_to_use`
+variable and populate it with content.
+
+```liquid
+{% raw %}{% capture when_to_use %}{% endraw %}
+You should use {{ concept }} when...
+{% raw %}{% endcapture %}{% endraw %}
+```
+{% endif %}
+
+
+{% if when_not_to_use %}
+
+### When not to use {{ concept }}s (alternatives)
+
+{{ when_not_to_use }}
+
+{% else %}
+
+### ERROR: You must define a "when_not_to_use" block
+{: style="color:red" }
+
+This template requires that you explain when not to use this object. This explanation will
+be displayed under the heading, **When not to use {{ concept }}s (alternatives)**
+
+To get rid of this message and take advantage of this template, define the `when_not_to_use`
+block and populate it with content.
+
+```liquid
+{% raw %}{% capture when_not_to_use %}{% endraw %}
+You should not use {{ concept }} if...
+{% raw %}{% endcapture %}{% endraw %}
+```
+{% endif %}
+
+
+{% if status %}
+
+### {{ concept }} status
+
+{{ status }}
+
+{% else %}
+
+### ERROR: You must define a "status" block
+{: style="color:red" }
+
+This template requires that you explain the current status of support for this object.
+This explanation will be displayed under the heading, **{{ concept }} status**.
+
+To get rid of this message and take advantage of this template, define the `status`
+block and populate it with content.
+
+```liquid
+{% raw %}{% capture status %}{% endraw %}
+The current status of {{ concept }}s is...
+{% raw %}{% endcapture %}{% endraw %}
+```
+{% endif %}
+
+
+{% if required_fields %}
+
+### {{ concept }} spec
+
+#### Required Fields
+
+{{ required_fields }}
+
+{% else %}
+
+### ERROR: You must define a "required_fields" block
+{: style="color:red" }
+
+This template requires that you provide a Markdown list of required fields for this
+object. This list will be displayed under the heading **Required Fields**.
+
+To get rid of this message and take advantage of this template, define the `required_fields`
+block and populate it with content.
+
+```liquid
+{% raw %}{% capture required_fields %}
+* `kind`: Always `Pod`.
+* `apiVersion`: Currently `v1`.
+* `metadata`: An object containing:
+ * `name`: Required if `generateName` is not specified. The name of this pod.
+ It must be an
+ [RFC1035](https://www.ietf.org/rfc/rfc1035.txt) compatible value and be
+ unique within the namespace.
+{% endcapture %}{% endraw %}
+```
+
+**Note**: You can also define a `common_fields` block that will go under a heading
+directly underneath **Required Fields** called **Common Fields**, but it is
+not required.
+{% endif %}
+
+
+{% if common_fields %}
+
+#### Common Fields
+
+{{ common_fields }}
+
+{% endif %}
+
+
+
+
+{% else %}
+
+### ERROR: You must define a "concept" variable
+{: style="color:red" }
+
+This template requires a variable called `concept` that is simply the name of the
+concept for which you are giving an overview. This will be displayed in the
+headings for the document.
+
+To get rid of this message and take advantage of this template, define `concept`:
+
+```liquid
+{% raw %}{% assign concept="Replication Controller" %}{% endraw %}
+```
+
+Complete this task, then we'll walk you through preparing the rest of the document.
+
+{% endif %}
diff --git a/_includes/templates/landing-page.md b/_includes/templates/landing-page.md
new file mode 100644
index 00000000000..28bf9cebe57
--- /dev/null
+++ b/_includes/templates/landing-page.md
@@ -0,0 +1,121 @@
+{% if page.cards %}
+
+
+
+{% for card in page.cards %}{% if card.title %}
+
+
{{card.title}}
+
{% if card.image %}
{% endif %}{{card.description}}
+
+{% endif %}{% endfor %}
+
+
+{% else %}
+
+### ERROR: You must define "cards" front-matter YAML
+{: style="color:red" }
+
+This template requires that you insert YAML at the top of your document
+that defines the "cards" you'd like to display on the page. The cards will
+render in clickable boxes.
+
+To get rid of this message and take advantage of this template, define `cards`:
+
+```yaml
+---
+cards:
+- progression: no
+- card:
+ title: Mean Stack
+ image: /docs/meanstack/image_0.png
+ description: Lorem ipsum dolor it verberum.
+- card:
+ title: Guestbook + Redis
+ image: /images/docs/redis.svg
+ description: Lorem ipsum dolor it verberum.
+- card:
+ title: Cloud Native Cassandra
+ image: /images/docs/cassandra.svg
+ description: Lorem ipsum dolor it verberum.
+- card:
+ title: WordPress + MySQL
+ image: /images/docs/wordpress.svg
+ description: Lorem ipsum dolor it verberum.
+---
+```
+
+**Note:** If `progression` is set to `yes` then a "Start Here!" icon will be
+placed on the first card and arrows suggesting linear reading will be overlayed
+between the other cards, telling the reader that they should explore the content
+in a certain order.
+
+{% endif %}
diff --git a/_includes/v1.2/extensions-v1beta1-operations.html b/_includes/v1.2/extensions-v1beta1-operations.html
index b7f249384fe..ff79dedd173 100755
--- a/_includes/v1.2/extensions-v1beta1-operations.html
+++ b/_includes/v1.2/extensions-v1beta1-operations.html
@@ -181,7 +181,7 @@
200 |
success |
-v1beta1.DaemonSetList |
+v1beta1.DaemonSetList |
@@ -321,7 +321,7 @@
200 |
success |
-v1beta1.DeploymentList |
+v1beta1.DeploymentList |
@@ -461,7 +461,7 @@
200 |
success |
-v1beta1.HorizontalPodAutoscalerList |
+v1beta1.HorizontalPodAutoscalerList |
@@ -601,7 +601,7 @@
200 |
success |
-v1beta1.IngressList |
+v1beta1.IngressList |
@@ -741,7 +741,7 @@
200 |
success |
-v1beta1.JobList |
+v1beta1.JobList |
@@ -889,7 +889,7 @@
200 |
success |
-v1beta1.DaemonSetList |
+v1beta1.DaemonSetList |
@@ -1037,7 +1037,7 @@
200 |
success |
-unversioned.Status |
+unversioned.Status |
@@ -1119,7 +1119,7 @@
body |
|
true |
-v1beta1.DaemonSet |
+v1beta1.DaemonSet |
|
@@ -1153,7 +1153,7 @@
200 |
success |
-v1beta1.DaemonSet |
+v1beta1.DaemonSet |
@@ -1285,7 +1285,7 @@
200 |
success |
-v1beta1.DaemonSet |
+v1beta1.DaemonSet |
@@ -1367,7 +1367,7 @@
body |
|
true |
-v1beta1.DaemonSet |
+v1beta1.DaemonSet |
|
@@ -1409,7 +1409,7 @@
200 |
success |
-v1beta1.DaemonSet |
+v1beta1.DaemonSet |
@@ -1491,7 +1491,7 @@
body |
|
true |
-v1.DeleteOptions |
+v1.DeleteOptions |
|
@@ -1533,7 +1533,7 @@
200 |
success |
-unversioned.Status |
+unversioned.Status |
@@ -1615,7 +1615,7 @@
body |
|
true |
-unversioned.Patch |
+unversioned.Patch |
|
@@ -1657,7 +1657,7 @@
200 |
success |
-v1beta1.DaemonSet |
+v1beta1.DaemonSet |
@@ -1745,7 +1745,7 @@
body |
|
true |
-v1beta1.DaemonSet |
+v1beta1.DaemonSet |
|
@@ -1787,7 +1787,7 @@
200 |
success |
-v1beta1.DaemonSet |
+v1beta1.DaemonSet |
@@ -1935,7 +1935,7 @@
200 |
success |
-v1beta1.DeploymentList |
+v1beta1.DeploymentList |
@@ -2083,7 +2083,7 @@
200 |
success |
-unversioned.Status |
+unversioned.Status |
@@ -2165,7 +2165,7 @@
body |
|
true |
-v1beta1.Deployment |
+v1beta1.Deployment |
|
@@ -2199,7 +2199,7 @@
200 |
success |
-v1beta1.Deployment |
+v1beta1.Deployment |
@@ -2331,7 +2331,7 @@
200 |
success |
-v1beta1.Deployment |
+v1beta1.Deployment |
@@ -2413,7 +2413,7 @@
body |
|
true |
-v1beta1.Deployment |
+v1beta1.Deployment |
|
@@ -2455,7 +2455,7 @@
200 |
success |
-v1beta1.Deployment |
+v1beta1.Deployment |
@@ -2537,7 +2537,7 @@
body |
|
true |
-v1.DeleteOptions |
+v1.DeleteOptions |
|
@@ -2579,7 +2579,7 @@
200 |
success |
-unversioned.Status |
+unversioned.Status |
@@ -2661,7 +2661,7 @@
body |
|
true |
-unversioned.Patch |
+unversioned.Patch |
|
@@ -2703,7 +2703,7 @@
200 |
success |
-v1beta1.Deployment |
+v1beta1.Deployment |
@@ -2791,7 +2791,7 @@
body |
|
true |
-v1beta1.DeploymentRollback |
+v1beta1.DeploymentRollback |
|
@@ -2833,7 +2833,7 @@
200 |
success |
-v1beta1.DeploymentRollback |
+v1beta1.DeploymentRollback |
@@ -2949,7 +2949,7 @@
200 |
success |
-v1beta1.Scale |
+v1beta1.Scale |
@@ -3031,7 +3031,7 @@
body |
|
true |
-v1beta1.Scale |
+v1beta1.Scale |
|
@@ -3073,7 +3073,7 @@
200 |
success |
-v1beta1.Scale |
+v1beta1.Scale |
@@ -3155,7 +3155,7 @@
body |
|
true |
-unversioned.Patch |
+unversioned.Patch |
|
@@ -3197,7 +3197,7 @@
200 |
success |
-v1beta1.Scale |
+v1beta1.Scale |
@@ -3285,7 +3285,7 @@
body |
|
true |
-v1beta1.Deployment |
+v1beta1.Deployment |
|
@@ -3327,7 +3327,7 @@
200 |
success |
-v1beta1.Deployment |
+v1beta1.Deployment |
@@ -3475,7 +3475,7 @@
200 |
success |
-v1beta1.HorizontalPodAutoscalerList |
+v1beta1.HorizontalPodAutoscalerList |
@@ -3623,7 +3623,7 @@
200 |
success |
-unversioned.Status |
+unversioned.Status |
@@ -3705,7 +3705,7 @@
body |
|
true |
-v1beta1.HorizontalPodAutoscaler |
+v1beta1.HorizontalPodAutoscaler |
|
@@ -3739,7 +3739,7 @@
200 |
success |
-v1beta1.HorizontalPodAutoscaler |
+v1beta1.HorizontalPodAutoscaler |
@@ -3871,7 +3871,7 @@
200 |
success |
-v1beta1.HorizontalPodAutoscaler |
+v1beta1.HorizontalPodAutoscaler |
@@ -3953,7 +3953,7 @@
body |
|
true |
-v1beta1.HorizontalPodAutoscaler |
+v1beta1.HorizontalPodAutoscaler |
|
@@ -3995,7 +3995,7 @@
200 |
success |
-v1beta1.HorizontalPodAutoscaler |
+v1beta1.HorizontalPodAutoscaler |
@@ -4077,7 +4077,7 @@
body |
|
true |
-v1.DeleteOptions |
+v1.DeleteOptions |
|
@@ -4119,7 +4119,7 @@
200 |
success |
-unversioned.Status |
+unversioned.Status |
@@ -4201,7 +4201,7 @@
body |
|
true |
-unversioned.Patch |
+unversioned.Patch |
|
@@ -4243,7 +4243,7 @@
200 |
success |
-v1beta1.HorizontalPodAutoscaler |
+v1beta1.HorizontalPodAutoscaler |
@@ -4331,7 +4331,7 @@
body |
|
true |
-v1beta1.HorizontalPodAutoscaler |
+v1beta1.HorizontalPodAutoscaler |
|
@@ -4373,7 +4373,7 @@
200 |
success |
-v1beta1.HorizontalPodAutoscaler |
+v1beta1.HorizontalPodAutoscaler |
@@ -4521,7 +4521,7 @@
200 |
success |
-v1beta1.IngressList |
+v1beta1.IngressList |
@@ -4669,7 +4669,7 @@
200 |
success |
-unversioned.Status |
+unversioned.Status |
@@ -4751,7 +4751,7 @@
body |
|
true |
-v1beta1.Ingress |
+v1beta1.Ingress |
|
@@ -4785,7 +4785,7 @@
200 |
success |
-v1beta1.Ingress |
+v1beta1.Ingress |
@@ -4917,7 +4917,7 @@
200 |
success |
-v1beta1.Ingress |
+v1beta1.Ingress |
@@ -4999,7 +4999,7 @@
body |
|
true |
-v1beta1.Ingress |
+v1beta1.Ingress |
|
@@ -5041,7 +5041,7 @@
200 |
success |
-v1beta1.Ingress |
+v1beta1.Ingress |
@@ -5123,7 +5123,7 @@
body |
|
true |
-v1.DeleteOptions |
+v1.DeleteOptions |
|
@@ -5165,7 +5165,7 @@
200 |
success |
-unversioned.Status |
+unversioned.Status |
@@ -5247,7 +5247,7 @@
body |
|
true |
-unversioned.Patch |
+unversioned.Patch |
|
@@ -5289,7 +5289,7 @@
200 |
success |
-v1beta1.Ingress |
+v1beta1.Ingress |
@@ -5377,7 +5377,7 @@
body |
|
true |
-v1beta1.Ingress |
+v1beta1.Ingress |
|
@@ -5419,7 +5419,7 @@
200 |
success |
-v1beta1.Ingress |
+v1beta1.Ingress |
@@ -5567,7 +5567,7 @@
200 |
success |
-v1beta1.JobList |
+v1beta1.JobList |
@@ -5715,7 +5715,7 @@
200 |
success |
-unversioned.Status |
+unversioned.Status |
@@ -5797,7 +5797,7 @@
body |
|
true |
-v1beta1.Job |
+v1beta1.Job |
|
@@ -5831,7 +5831,7 @@
200 |
success |
-v1beta1.Job |
+v1beta1.Job |
@@ -5963,7 +5963,7 @@
200 |
success |
-v1beta1.Job |
+v1beta1.Job |
@@ -6045,7 +6045,7 @@
body |
|
true |
-v1beta1.Job |
+v1beta1.Job |
|
@@ -6087,7 +6087,7 @@
200 |
success |
-v1beta1.Job |
+v1beta1.Job |
@@ -6169,7 +6169,7 @@
body |
|
true |
-v1.DeleteOptions |
+v1.DeleteOptions |
|
@@ -6211,7 +6211,7 @@
200 |
success |
-unversioned.Status |
+unversioned.Status |
@@ -6293,7 +6293,7 @@
body |
|
true |
-unversioned.Patch |
+unversioned.Patch |
|
@@ -6335,7 +6335,7 @@
200 |
success |
-v1beta1.Job |
+v1beta1.Job |
@@ -6423,7 +6423,7 @@
body |
|
true |
-v1beta1.Job |
+v1beta1.Job |
|
@@ -6465,7 +6465,7 @@
200 |
success |
-v1beta1.Job |
+v1beta1.Job |
@@ -6613,7 +6613,7 @@
200 |
success |
-v1beta1.ReplicaSetList |
+v1beta1.ReplicaSetList |
@@ -6761,7 +6761,7 @@
200 |
success |
-unversioned.Status |
+unversioned.Status |
@@ -6843,7 +6843,7 @@
body |
|
true |
-v1beta1.ReplicaSet |
+v1beta1.ReplicaSet |
|
@@ -6877,7 +6877,7 @@
200 |
success |
-v1beta1.ReplicaSet |
+v1beta1.ReplicaSet |
@@ -7009,7 +7009,7 @@
200 |
success |
-v1beta1.ReplicaSet |
+v1beta1.ReplicaSet |
@@ -7091,7 +7091,7 @@
body |
|
true |
-v1beta1.ReplicaSet |
+v1beta1.ReplicaSet |
|
@@ -7133,7 +7133,7 @@
200 |
success |
-v1beta1.ReplicaSet |
+v1beta1.ReplicaSet |
@@ -7215,7 +7215,7 @@
body |
|
true |
-v1.DeleteOptions |
+v1.DeleteOptions |
|
@@ -7257,7 +7257,7 @@
200 |
success |
-unversioned.Status |
+unversioned.Status |
@@ -7339,7 +7339,7 @@
body |
|
true |
-unversioned.Patch |
+unversioned.Patch |
|
@@ -7381,7 +7381,7 @@
200 |
success |
-v1beta1.ReplicaSet |
+v1beta1.ReplicaSet |
@@ -7503,7 +7503,7 @@
200 |
success |
-v1beta1.Scale |
+v1beta1.Scale |
@@ -7585,7 +7585,7 @@
body |
|
true |
-v1beta1.Scale |
+v1beta1.Scale |
|
@@ -7627,7 +7627,7 @@
200 |
success |
-v1beta1.Scale |
+v1beta1.Scale |
@@ -7709,7 +7709,7 @@
body |
|
true |
-unversioned.Patch |
+unversioned.Patch |
|
@@ -7751,7 +7751,7 @@
200 |
success |
-v1beta1.Scale |
+v1beta1.Scale |
@@ -7839,7 +7839,7 @@
body |
|
true |
-v1beta1.ReplicaSet |
+v1beta1.ReplicaSet |
|
@@ -7881,7 +7881,7 @@
200 |
success |
-v1beta1.ReplicaSet |
+v1beta1.ReplicaSet |
@@ -7997,7 +7997,7 @@
200 |
success |
-v1beta1.Scale |
+v1beta1.Scale |
@@ -8079,7 +8079,7 @@
body |
|
true |
-v1beta1.Scale |
+v1beta1.Scale |
|
@@ -8121,7 +8121,7 @@
200 |
success |
-v1beta1.Scale |
+v1beta1.Scale |
@@ -8203,7 +8203,7 @@
body |
|
true |
-unversioned.Patch |
+unversioned.Patch |
|
@@ -8245,7 +8245,7 @@
200 |
success |
-v1beta1.Scale |
+v1beta1.Scale |
@@ -8391,7 +8391,7 @@
200 |
success |
-v1beta1.ReplicaSetList |
+v1beta1.ReplicaSetList |
@@ -8531,7 +8531,7 @@
200 |
success |
-json.WatchEvent |
+json.WatchEvent |
@@ -8668,7 +8668,7 @@
200 |
success |
-json.WatchEvent |
+json.WatchEvent |
@@ -8805,7 +8805,7 @@
200 |
success |
-json.WatchEvent |
+json.WatchEvent |
@@ -8942,7 +8942,7 @@
200 |
success |
-json.WatchEvent |
+json.WatchEvent |
@@ -9079,7 +9079,7 @@
200 |
success |
-json.WatchEvent |
+json.WatchEvent |
@@ -9224,7 +9224,7 @@
200 |
success |
-json.WatchEvent |
+json.WatchEvent |
@@ -9377,7 +9377,7 @@
200 |
success |
-json.WatchEvent |
+json.WatchEvent |
@@ -9522,7 +9522,7 @@
200 |
success |
-json.WatchEvent |
+json.WatchEvent |
@@ -9675,7 +9675,7 @@
200 |
success |
-json.WatchEvent |
+json.WatchEvent |
@@ -9820,7 +9820,7 @@
200 |
success |
-json.WatchEvent |
+json.WatchEvent |
@@ -9973,7 +9973,7 @@
200 |
success |
-json.WatchEvent |
+json.WatchEvent |
@@ -10118,7 +10118,7 @@
200 |
success |
-json.WatchEvent |
+json.WatchEvent |
@@ -10271,7 +10271,7 @@
200 |
success |
-json.WatchEvent |
+json.WatchEvent |
@@ -10416,7 +10416,7 @@
200 |
success |
-json.WatchEvent |
+json.WatchEvent |
@@ -10569,7 +10569,7 @@
200 |
success |
-json.WatchEvent |
+json.WatchEvent |
@@ -10714,7 +10714,7 @@
200 |
success |
-json.WatchEvent |
+json.WatchEvent |
@@ -10867,7 +10867,7 @@
200 |
success |
-json.WatchEvent |
+json.WatchEvent |
@@ -11004,7 +11004,7 @@
200 |
success |
-json.WatchEvent |
+json.WatchEvent |
diff --git a/_includes/v1.2/v1-operations.html b/_includes/v1.2/v1-operations.html
index 1c7b4e93e91..eecf1a5c1e5 100755
--- a/_includes/v1.2/v1-operations.html
+++ b/_includes/v1.2/v1-operations.html
@@ -181,7 +181,7 @@
200 |
success |
-v1.ComponentStatusList |
+v1.ComponentStatusList |
@@ -289,7 +289,7 @@
200 |
success |
-v1.ComponentStatus |
+v1.ComponentStatus |
@@ -429,7 +429,7 @@
200 |
success |
-v1.ConfigMapList |
+v1.ConfigMapList |
@@ -569,7 +569,7 @@
200 |
success |
-v1.EndpointsList |
+v1.EndpointsList |
@@ -709,7 +709,7 @@
200 |
success |
-v1.EventList |
+v1.EventList |
@@ -849,7 +849,7 @@
200 |
success |
-v1.LimitRangeList |
+v1.LimitRangeList |
@@ -989,7 +989,7 @@
200 |
success |
-v1.NamespaceList |
+v1.NamespaceList |
@@ -1129,7 +1129,7 @@
200 |
success |
-unversioned.Status |
+unversioned.Status |
@@ -1211,7 +1211,7 @@
body |
|
true |
-v1.Namespace |
+v1.Namespace |
|
@@ -1237,7 +1237,7 @@
200 |
success |
-v1.Namespace |
+v1.Namespace |
@@ -1319,7 +1319,7 @@
body |
|
true |
-v1.Binding |
+v1.Binding |
|
@@ -1353,7 +1353,7 @@
200 |
success |
-v1.Binding |
+v1.Binding |
@@ -1501,7 +1501,7 @@
200 |
success |
-v1.ConfigMapList |
+v1.ConfigMapList |
@@ -1649,7 +1649,7 @@
200 |
success |
-unversioned.Status |
+unversioned.Status |
@@ -1731,7 +1731,7 @@
body |
|
true |
-v1.ConfigMap |
+v1.ConfigMap |
|
@@ -1765,7 +1765,7 @@
200 |
success |
-v1.ConfigMap |
+v1.ConfigMap |
@@ -1897,7 +1897,7 @@
200 |
success |
-v1.ConfigMap |
+v1.ConfigMap |
@@ -1979,7 +1979,7 @@
body |
|
true |
-v1.ConfigMap |
+v1.ConfigMap |
|
@@ -2021,7 +2021,7 @@
200 |
success |
-v1.ConfigMap |
+v1.ConfigMap |
@@ -2103,7 +2103,7 @@
body |
|
true |
-v1.DeleteOptions |
+v1.DeleteOptions |
|
@@ -2145,7 +2145,7 @@
200 |
success |
-unversioned.Status |
+unversioned.Status |
@@ -2227,7 +2227,7 @@
body |
|
true |
-unversioned.Patch |
+unversioned.Patch |
|
@@ -2269,7 +2269,7 @@
200 |
success |
-v1.ConfigMap |
+v1.ConfigMap |
@@ -2423,7 +2423,7 @@
200 |
success |
-v1.EndpointsList |
+v1.EndpointsList |
@@ -2571,7 +2571,7 @@
200 |
success |
-unversioned.Status |
+unversioned.Status |
@@ -2653,7 +2653,7 @@
body |
|
true |
-v1.Endpoints |
+v1.Endpoints |
|
@@ -2687,7 +2687,7 @@
200 |
success |
-v1.Endpoints |
+v1.Endpoints |
@@ -2819,7 +2819,7 @@
200 |
success |
-v1.Endpoints |
+v1.Endpoints |
@@ -2901,7 +2901,7 @@
body |
|
true |
-v1.Endpoints |
+v1.Endpoints |
|
@@ -2943,7 +2943,7 @@
200 |
success |
-v1.Endpoints |
+v1.Endpoints |
@@ -3025,7 +3025,7 @@
body |
|
true |
-v1.DeleteOptions |
+v1.DeleteOptions |
|
@@ -3067,7 +3067,7 @@
200 |
success |
-unversioned.Status |
+unversioned.Status |
@@ -3149,7 +3149,7 @@
body |
|
true |
-unversioned.Patch |
+unversioned.Patch |
|
@@ -3191,7 +3191,7 @@
200 |
success |
-v1.Endpoints |
+v1.Endpoints |
@@ -3345,7 +3345,7 @@
200 |
success |
-v1.EventList |
+v1.EventList |
@@ -3493,7 +3493,7 @@
200 |
success |
-unversioned.Status |
+unversioned.Status |
@@ -3575,7 +3575,7 @@
body |
|
true |
-v1.Event |
+v1.Event |
|
@@ -3609,7 +3609,7 @@
200 |
success |
-v1.Event |
+v1.Event |
@@ -3741,7 +3741,7 @@
200 |
success |
-v1.Event |
+v1.Event |
@@ -3823,7 +3823,7 @@
body |
|
true |
-v1.Event |
+v1.Event |
|
@@ -3865,7 +3865,7 @@
200 |
success |
-v1.Event |
+v1.Event |
@@ -3947,7 +3947,7 @@
body |
|
true |
-v1.DeleteOptions |
+v1.DeleteOptions |
|
@@ -3989,7 +3989,7 @@
200 |
success |
-unversioned.Status |
+unversioned.Status |
@@ -4071,7 +4071,7 @@
body |
|
true |
-unversioned.Patch |
+unversioned.Patch |
|
@@ -4113,7 +4113,7 @@
200 |
success |
-v1.Event |
+v1.Event |
@@ -4267,7 +4267,7 @@
200 |
success |
-v1.LimitRangeList |
+v1.LimitRangeList |
@@ -4415,7 +4415,7 @@
200 |
success |
-unversioned.Status |
+unversioned.Status |
@@ -4497,7 +4497,7 @@
body |
|
true |
-v1.LimitRange |
+v1.LimitRange |
|
@@ -4531,7 +4531,7 @@
200 |
success |
-v1.LimitRange |
+v1.LimitRange |
@@ -4663,7 +4663,7 @@
200 |
success |
-v1.LimitRange |
+v1.LimitRange |
@@ -4745,7 +4745,7 @@
body |
|
true |
-v1.LimitRange |
+v1.LimitRange |
|
@@ -4787,7 +4787,7 @@
200 |
success |
-v1.LimitRange |
+v1.LimitRange |
@@ -4869,7 +4869,7 @@
body |
|
true |
-v1.DeleteOptions |
+v1.DeleteOptions |
|
@@ -4911,7 +4911,7 @@
200 |
success |
-unversioned.Status |
+unversioned.Status |
@@ -4993,7 +4993,7 @@
body |
|
true |
-unversioned.Patch |
+unversioned.Patch |
|
@@ -5035,7 +5035,7 @@
200 |
success |
-v1.LimitRange |
+v1.LimitRange |
@@ -5189,7 +5189,7 @@
200 |
success |
-v1.PersistentVolumeClaimList |
+v1.PersistentVolumeClaimList |
@@ -5337,7 +5337,7 @@
200 |
success |
-unversioned.Status |
+unversioned.Status |
@@ -5419,7 +5419,7 @@
body |
|
true |
-v1.PersistentVolumeClaim |
+v1.PersistentVolumeClaim |
|
@@ -5453,7 +5453,7 @@
200 |
success |
-v1.PersistentVolumeClaim |
+v1.PersistentVolumeClaim |
@@ -5585,7 +5585,7 @@
200 |
success |
-v1.PersistentVolumeClaim |
+v1.PersistentVolumeClaim |
@@ -5667,7 +5667,7 @@
body |
|
true |
-v1.PersistentVolumeClaim |
+v1.PersistentVolumeClaim |
|
@@ -5709,7 +5709,7 @@
200 |
success |
-v1.PersistentVolumeClaim |
+v1.PersistentVolumeClaim |
@@ -5791,7 +5791,7 @@
body |
|
true |
-v1.DeleteOptions |
+v1.DeleteOptions |
|
@@ -5833,7 +5833,7 @@
200 |
success |
-unversioned.Status |
+unversioned.Status |
@@ -5915,7 +5915,7 @@
body |
|
true |
-unversioned.Patch |
+unversioned.Patch |
|
@@ -5957,7 +5957,7 @@
200 |
success |
-v1.PersistentVolumeClaim |
+v1.PersistentVolumeClaim |
@@ -6045,7 +6045,7 @@
body |
|
true |
-v1.PersistentVolumeClaim |
+v1.PersistentVolumeClaim |
|
@@ -6087,7 +6087,7 @@
200 |
success |
-v1.PersistentVolumeClaim |
+v1.PersistentVolumeClaim |
@@ -6235,7 +6235,7 @@
200 |
success |
-v1.PodList |
+v1.PodList |
@@ -6383,7 +6383,7 @@
200 |
success |
-unversioned.Status |
+unversioned.Status |
@@ -6465,7 +6465,7 @@
body |
|
true |
-v1.Pod |
+v1.Pod |
|
@@ -6499,7 +6499,7 @@
200 |
success |
-v1.Pod |
+v1.Pod |
@@ -6631,7 +6631,7 @@
200 |
success |
-v1.Pod |
+v1.Pod |
@@ -6713,7 +6713,7 @@
body |
|
true |
-v1.Pod |
+v1.Pod |
|
@@ -6755,7 +6755,7 @@
200 |
success |
-v1.Pod |
+v1.Pod |
@@ -6837,7 +6837,7 @@
body |
|
true |
-v1.DeleteOptions |
+v1.DeleteOptions |
|
@@ -6879,7 +6879,7 @@
200 |
success |
-unversioned.Status |
+unversioned.Status |
@@ -6961,7 +6961,7 @@
body |
|
true |
-unversioned.Patch |
+unversioned.Patch |
|
@@ -7003,7 +7003,7 @@
200 |
success |
-v1.Pod |
+v1.Pod |
@@ -7381,7 +7381,7 @@
body |
|
true |
-v1.Binding |
+v1.Binding |
|
@@ -7423,7 +7423,7 @@
200 |
success |
-v1.Binding |
+v1.Binding |
@@ -7909,7 +7909,7 @@
200 |
success |
-v1.Pod |
+v1.Pod |
@@ -9137,7 +9137,7 @@
body |
|
true |
-v1.Pod |
+v1.Pod |
|
@@ -9179,7 +9179,7 @@
200 |
success |
-v1.Pod |
+v1.Pod |
@@ -9327,7 +9327,7 @@
200 |
success |
-v1.PodTemplateList |
+v1.PodTemplateList |
@@ -9475,7 +9475,7 @@
200 |
success |
-unversioned.Status |
+unversioned.Status |
@@ -9557,7 +9557,7 @@
body |
|
true |
-v1.PodTemplate |
+v1.PodTemplate |
|
@@ -9591,7 +9591,7 @@
200 |
success |
-v1.PodTemplate |
+v1.PodTemplate |
@@ -9723,7 +9723,7 @@
200 |
success |
-v1.PodTemplate |
+v1.PodTemplate |
@@ -9805,7 +9805,7 @@
body |
|
true |
-v1.PodTemplate |
+v1.PodTemplate |
|
@@ -9847,7 +9847,7 @@
200 |
success |
-v1.PodTemplate |
+v1.PodTemplate |
@@ -9929,7 +9929,7 @@
body |
|
true |
-v1.DeleteOptions |
+v1.DeleteOptions |
|
@@ -9971,7 +9971,7 @@
200 |
success |
-unversioned.Status |
+unversioned.Status |
@@ -10053,7 +10053,7 @@
body |
|
true |
-unversioned.Patch |
+unversioned.Patch |
|
@@ -10095,7 +10095,7 @@
200 |
success |
-v1.PodTemplate |
+v1.PodTemplate |
@@ -10249,7 +10249,7 @@
200 |
success |
-v1.ReplicationControllerList |
+v1.ReplicationControllerList |
@@ -10397,7 +10397,7 @@
200 |
success |
-unversioned.Status |
+unversioned.Status |
@@ -10479,7 +10479,7 @@
body |
|
true |
-v1.ReplicationController |
+v1.ReplicationController |
|
@@ -10513,7 +10513,7 @@
200 |
success |
-v1.ReplicationController |
+v1.ReplicationController |
@@ -10645,7 +10645,7 @@
200 |
success |
-v1.ReplicationController |
+v1.ReplicationController |
@@ -10727,7 +10727,7 @@
body |
|
true |
-v1.ReplicationController |
+v1.ReplicationController |
|
@@ -10769,7 +10769,7 @@
200 |
success |
-v1.ReplicationController |
+v1.ReplicationController |
@@ -10851,7 +10851,7 @@
body |
|
true |
-v1.DeleteOptions |
+v1.DeleteOptions |
|
@@ -10893,7 +10893,7 @@
200 |
success |
-unversioned.Status |
+unversioned.Status |
@@ -10975,7 +10975,7 @@
body |
|
true |
-unversioned.Patch |
+unversioned.Patch |
|
@@ -11017,7 +11017,7 @@
200 |
success |
-v1.ReplicationController |
+v1.ReplicationController |
@@ -11139,7 +11139,7 @@
200 |
success |
-v1.Scale |
+v1.Scale |
@@ -11221,7 +11221,7 @@
body |
|
true |
-v1.Scale |
+v1.Scale |
|
@@ -11263,7 +11263,7 @@
200 |
success |
-v1.Scale |
+v1.Scale |
@@ -11345,7 +11345,7 @@
body |
|
true |
-unversioned.Patch |
+unversioned.Patch |
|
@@ -11387,7 +11387,7 @@
200 |
success |
-v1.Scale |
+v1.Scale |
@@ -11475,7 +11475,7 @@
body |
|
true |
-v1.ReplicationController |
+v1.ReplicationController |
|
@@ -11517,7 +11517,7 @@
200 |
success |
-v1.ReplicationController |
+v1.ReplicationController |
@@ -11665,7 +11665,7 @@
200 |
success |
-v1.ResourceQuotaList |
+v1.ResourceQuotaList |
@@ -11813,7 +11813,7 @@
200 |
success |
-unversioned.Status |
+unversioned.Status |
@@ -11895,7 +11895,7 @@
body |
|
true |
-v1.ResourceQuota |
+v1.ResourceQuota |
|
@@ -11929,7 +11929,7 @@
200 |
success |
-v1.ResourceQuota |
+v1.ResourceQuota |
@@ -12061,7 +12061,7 @@
200 |
success |
-v1.ResourceQuota |
+v1.ResourceQuota |
@@ -12143,7 +12143,7 @@
body |
|
true |
-v1.ResourceQuota |
+v1.ResourceQuota |
|
@@ -12185,7 +12185,7 @@
200 |
success |
-v1.ResourceQuota |
+v1.ResourceQuota |
@@ -12267,7 +12267,7 @@
body |
|
true |
-v1.DeleteOptions |
+v1.DeleteOptions |
|
@@ -12309,7 +12309,7 @@
200 |
success |
-unversioned.Status |
+unversioned.Status |
@@ -12391,7 +12391,7 @@
body |
|
true |
-unversioned.Patch |
+unversioned.Patch |
|
@@ -12433,7 +12433,7 @@
200 |
success |
-v1.ResourceQuota |
+v1.ResourceQuota |
@@ -12521,7 +12521,7 @@
body |
|
true |
-v1.ResourceQuota |
+v1.ResourceQuota |
|
@@ -12563,7 +12563,7 @@
200 |
success |
-v1.ResourceQuota |
+v1.ResourceQuota |
@@ -12711,7 +12711,7 @@
200 |
success |
-v1.SecretList |
+v1.SecretList |
@@ -12859,7 +12859,7 @@
200 |
success |
-unversioned.Status |
+unversioned.Status |
@@ -12941,7 +12941,7 @@
body |
|
true |
-v1.Secret |
+v1.Secret |
|
@@ -12975,7 +12975,7 @@
200 |
success |
-v1.Secret |
+v1.Secret |
@@ -13107,7 +13107,7 @@
200 |
success |
-v1.Secret |
+v1.Secret |
@@ -13189,7 +13189,7 @@
body |
|
true |
-v1.Secret |
+v1.Secret |
|
@@ -13231,7 +13231,7 @@
200 |
success |
-v1.Secret |
+v1.Secret |
@@ -13313,7 +13313,7 @@
body |
|
true |
-v1.DeleteOptions |
+v1.DeleteOptions |
|
@@ -13355,7 +13355,7 @@
200 |
success |
-unversioned.Status |
+unversioned.Status |
@@ -13437,7 +13437,7 @@
body |
|
true |
-unversioned.Patch |
+unversioned.Patch |
|
@@ -13479,7 +13479,7 @@
200 |
success |
-v1.Secret |
+v1.Secret |
@@ -13633,7 +13633,7 @@
200 |
success |
-v1.ServiceAccountList |
+v1.ServiceAccountList |
@@ -13781,7 +13781,7 @@
200 |
success |
-unversioned.Status |
+unversioned.Status |
@@ -13863,7 +13863,7 @@
body |
|
true |
-v1.ServiceAccount |
+v1.ServiceAccount |
|
@@ -13897,7 +13897,7 @@
200 |
success |
-v1.ServiceAccount |
+v1.ServiceAccount |
@@ -14029,7 +14029,7 @@
200 |
success |
-v1.ServiceAccount |
+v1.ServiceAccount |
@@ -14111,7 +14111,7 @@
body |
|
true |
-v1.ServiceAccount |
+v1.ServiceAccount |
|
@@ -14153,7 +14153,7 @@
200 |
success |
-v1.ServiceAccount |
+v1.ServiceAccount |
@@ -14235,7 +14235,7 @@
body |
|
true |
-v1.DeleteOptions |
+v1.DeleteOptions |
|
@@ -14277,7 +14277,7 @@
200 |
success |
-unversioned.Status |
+unversioned.Status |
@@ -14359,7 +14359,7 @@
body |
|
true |
-unversioned.Patch |
+unversioned.Patch |
|
@@ -14401,7 +14401,7 @@
200 |
success |
-v1.ServiceAccount |
+v1.ServiceAccount |
@@ -14555,7 +14555,7 @@
200 |
success |
-v1.ServiceList |
+v1.ServiceList |
@@ -14637,7 +14637,7 @@
body |
|
true |
-v1.Service |
+v1.Service |
|
@@ -14671,7 +14671,7 @@
200 |
success |
-v1.Service |
+v1.Service |
@@ -14803,7 +14803,7 @@
200 |
success |
-v1.Service |
+v1.Service |
@@ -14885,7 +14885,7 @@
body |
|
true |
-v1.Service |
+v1.Service |
|
@@ -14927,7 +14927,7 @@
200 |
success |
-v1.Service |
+v1.Service |
@@ -15043,7 +15043,7 @@
200 |
success |
-unversioned.Status |
+unversioned.Status |
@@ -15125,7 +15125,7 @@
body |
|
true |
-unversioned.Patch |
+unversioned.Patch |
|
@@ -15167,7 +15167,7 @@
200 |
success |
-v1.Service |
+v1.Service |
@@ -16191,7 +16191,7 @@
body |
|
true |
-v1.Service |
+v1.Service |
|
@@ -16233,7 +16233,7 @@
200 |
success |
-v1.Service |
+v1.Service |
@@ -16357,7 +16357,7 @@
200 |
success |
-v1.Namespace |
+v1.Namespace |
@@ -16439,7 +16439,7 @@
body |
|
true |
-v1.Namespace |
+v1.Namespace |
|
@@ -16473,7 +16473,7 @@
200 |
success |
-v1.Namespace |
+v1.Namespace |
@@ -16555,7 +16555,7 @@
body |
|
true |
-v1.DeleteOptions |
+v1.DeleteOptions |
|
@@ -16589,7 +16589,7 @@
200 |
success |
-unversioned.Status |
+unversioned.Status |
@@ -16671,7 +16671,7 @@
body |
|
true |
-unversioned.Patch |
+unversioned.Patch |
|
@@ -16705,7 +16705,7 @@
200 |
success |
-v1.Namespace |
+v1.Namespace |
@@ -16793,7 +16793,7 @@
body |
|
true |
-v1.Namespace |
+v1.Namespace |
|
@@ -16827,7 +16827,7 @@
200 |
success |
-v1.Namespace |
+v1.Namespace |
@@ -16909,7 +16909,7 @@
body |
|
true |
-v1.Namespace |
+v1.Namespace |
|
@@ -16943,7 +16943,7 @@
200 |
success |
-v1.Namespace |
+v1.Namespace |
@@ -17083,7 +17083,7 @@
200 |
success |
-v1.NodeList |
+v1.NodeList |
@@ -17223,7 +17223,7 @@
200 |
success |
-unversioned.Status |
+unversioned.Status |
@@ -17305,7 +17305,7 @@
body |
|
true |
-v1.Node |
+v1.Node |
|
@@ -17331,7 +17331,7 @@
200 |
success |
-v1.Node |
+v1.Node |
@@ -17455,7 +17455,7 @@
200 |
success |
-v1.Node |
+v1.Node |
@@ -17537,7 +17537,7 @@
body |
|
true |
-v1.Node |
+v1.Node |
|
@@ -17571,7 +17571,7 @@
200 |
success |
-v1.Node |
+v1.Node |
@@ -17653,7 +17653,7 @@
body |
|
true |
-v1.DeleteOptions |
+v1.DeleteOptions |
|
@@ -17687,7 +17687,7 @@
200 |
success |
-unversioned.Status |
+unversioned.Status |
@@ -17769,7 +17769,7 @@
body |
|
true |
-unversioned.Patch |
+unversioned.Patch |
|
@@ -17803,7 +17803,7 @@
200 |
success |
-v1.Node |
+v1.Node |
@@ -18763,7 +18763,7 @@
body |
|
true |
-v1.Node |
+v1.Node |
|
@@ -18797,7 +18797,7 @@
200 |
success |
-v1.Node |
+v1.Node |
@@ -18937,7 +18937,7 @@
200 |
success |
-v1.PersistentVolumeClaimList |
+v1.PersistentVolumeClaimList |
@@ -19077,7 +19077,7 @@
200 |
success |
-v1.PersistentVolumeList |
+v1.PersistentVolumeList |
@@ -19217,7 +19217,7 @@
200 |
success |
-unversioned.Status |
+unversioned.Status |
@@ -19299,7 +19299,7 @@
body |
|
true |
-v1.PersistentVolume |
+v1.PersistentVolume |
|
@@ -19325,7 +19325,7 @@
200 |
success |
-v1.PersistentVolume |
+v1.PersistentVolume |
@@ -19449,7 +19449,7 @@
200 |
success |
-v1.PersistentVolume |
+v1.PersistentVolume |
@@ -19531,7 +19531,7 @@
body |
|
true |
-v1.PersistentVolume |
+v1.PersistentVolume |
|
@@ -19565,7 +19565,7 @@
200 |
success |
-v1.PersistentVolume |
+v1.PersistentVolume |
@@ -19647,7 +19647,7 @@
body |
|
true |
-v1.DeleteOptions |
+v1.DeleteOptions |
|
@@ -19681,7 +19681,7 @@
200 |
success |
-unversioned.Status |
+unversioned.Status |
@@ -19763,7 +19763,7 @@
body |
|
true |
-unversioned.Patch |
+unversioned.Patch |
|
@@ -19797,7 +19797,7 @@
200 |
success |
-v1.PersistentVolume |
+v1.PersistentVolume |
@@ -19885,7 +19885,7 @@
body |
|
true |
-v1.PersistentVolume |
+v1.PersistentVolume |
|
@@ -19919,7 +19919,7 @@
200 |
success |
-v1.PersistentVolume |
+v1.PersistentVolume |
@@ -20059,7 +20059,7 @@
200 |
success |
-v1.PodList |
+v1.PodList |
@@ -20199,7 +20199,7 @@
200 |
success |
-v1.PodTemplateList |
+v1.PodTemplateList |
@@ -22891,7 +22891,7 @@
200 |
success |
-v1.ReplicationControllerList |
+v1.ReplicationControllerList |
@@ -23031,7 +23031,7 @@
200 |
success |
-v1.ResourceQuotaList |
+v1.ResourceQuotaList |
@@ -23171,7 +23171,7 @@
200 |
success |
-v1.SecretList |
+v1.SecretList |
@@ -23311,7 +23311,7 @@
200 |
success |
-v1.ServiceAccountList |
+v1.ServiceAccountList |
@@ -23451,7 +23451,7 @@
200 |
success |
-v1.ServiceList |
+v1.ServiceList |
@@ -23591,7 +23591,7 @@
200 |
success |
-json.WatchEvent |
+json.WatchEvent |
@@ -23728,7 +23728,7 @@
200 |
success |
-json.WatchEvent |
+json.WatchEvent |
@@ -23865,7 +23865,7 @@
200 |
success |
-json.WatchEvent |
+json.WatchEvent |
@@ -24002,7 +24002,7 @@
200 |
success |
-json.WatchEvent |
+json.WatchEvent |
@@ -24139,7 +24139,7 @@
200 |
success |
-json.WatchEvent |
+json.WatchEvent |
@@ -24284,7 +24284,7 @@
200 |
success |
-json.WatchEvent |
+json.WatchEvent |
@@ -24437,7 +24437,7 @@
200 |
success |
-json.WatchEvent |
+json.WatchEvent |
@@ -24582,7 +24582,7 @@
200 |
success |
-json.WatchEvent |
+json.WatchEvent |
@@ -24735,7 +24735,7 @@
200 |
success |
-json.WatchEvent |
+json.WatchEvent |
@@ -24880,7 +24880,7 @@
200 |
success |
-json.WatchEvent |
+json.WatchEvent |
@@ -25033,7 +25033,7 @@
200 |
success |
-json.WatchEvent |
+json.WatchEvent |
@@ -25178,7 +25178,7 @@
200 |
success |
-json.WatchEvent |
+json.WatchEvent |
@@ -25331,7 +25331,7 @@
200 |
success |
-json.WatchEvent |
+json.WatchEvent |
@@ -25476,7 +25476,7 @@
200 |
success |
-json.WatchEvent |
+json.WatchEvent |
@@ -25629,7 +25629,7 @@
200 |
success |
-json.WatchEvent |
+json.WatchEvent |
@@ -25774,7 +25774,7 @@
200 |
success |
-json.WatchEvent |
+json.WatchEvent |
@@ -25927,7 +25927,7 @@
200 |
success |
-json.WatchEvent |
+json.WatchEvent |
@@ -26072,7 +26072,7 @@
200 |
success |
-json.WatchEvent |
+json.WatchEvent |
@@ -26225,7 +26225,7 @@
200 |
success |
-json.WatchEvent |
+json.WatchEvent |
@@ -26370,7 +26370,7 @@
200 |
success |
-json.WatchEvent |
+json.WatchEvent |
@@ -26523,7 +26523,7 @@
200 |
success |
-json.WatchEvent |
+json.WatchEvent |
@@ -26668,7 +26668,7 @@
200 |
success |
-json.WatchEvent |
+json.WatchEvent |
@@ -26821,7 +26821,7 @@
200 |
success |
-json.WatchEvent |
+json.WatchEvent |
@@ -26966,7 +26966,7 @@
200 |
success |
-json.WatchEvent |
+json.WatchEvent |
@@ -27119,7 +27119,7 @@
200 |
success |
-json.WatchEvent |
+json.WatchEvent |
@@ -27264,7 +27264,7 @@
200 |
success |
-json.WatchEvent |
+json.WatchEvent |
@@ -27417,7 +27417,7 @@
200 |
success |
-json.WatchEvent |
+json.WatchEvent |
@@ -27562,7 +27562,7 @@
200 |
success |
-json.WatchEvent |
+json.WatchEvent |
@@ -27715,7 +27715,7 @@
200 |
success |
-json.WatchEvent |
+json.WatchEvent |
@@ -27860,7 +27860,7 @@
200 |
success |
-json.WatchEvent |
+json.WatchEvent |
@@ -27997,7 +27997,7 @@
200 |
success |
-json.WatchEvent |
+json.WatchEvent |
@@ -28142,7 +28142,7 @@
200 |
success |
-json.WatchEvent |
+json.WatchEvent |
@@ -28279,7 +28279,7 @@
200 |
success |
-json.WatchEvent |
+json.WatchEvent |
@@ -28416,7 +28416,7 @@
200 |
success |
-json.WatchEvent |
+json.WatchEvent |
@@ -28561,7 +28561,7 @@
200 |
success |
-json.WatchEvent |
+json.WatchEvent |
@@ -28698,7 +28698,7 @@
200 |
success |
-json.WatchEvent |
+json.WatchEvent |
@@ -28835,7 +28835,7 @@
200 |
success |
-json.WatchEvent |
+json.WatchEvent |
@@ -28972,7 +28972,7 @@
200 |
success |
-json.WatchEvent |
+json.WatchEvent |
@@ -29109,7 +29109,7 @@
200 |
success |
-json.WatchEvent |
+json.WatchEvent |
@@ -29246,7 +29246,7 @@
200 |
success |
-json.WatchEvent |
+json.WatchEvent |
@@ -29383,7 +29383,7 @@
200 |
success |
-json.WatchEvent |
+json.WatchEvent |
@@ -29520,7 +29520,7 @@
200 |
success |
-json.WatchEvent |
+json.WatchEvent |
diff --git a/_sass/_base.sass b/_sass/_base.sass
index fbe2cbfbc0f..0fbf8b32ceb 100644
--- a/_sass/_base.sass
+++ b/_sass/_base.sass
@@ -512,7 +512,7 @@ section
background-color: $light-grey
border-left: 3px solid $blue
padding: 7.5px 10px 7.5px 18px
- margin-left: -21px
+ margin-left: -3px
color: $blue
.open-toc
@@ -526,10 +526,13 @@ section
overflow-y: auto
.pi-accordion
- margin-left: -20px
+ & > .container:first-child > .item:first-child > .title:first-child
+ padding-left: 0
+ font-size: 1.5em
+ font-weight: 700
- .container
- padding-left: 20px
+ & > .container:first-child > .item.yah:first-child > .title:first-child
+ margin-left: -20px !important
.item
overflow: hidden
@@ -546,7 +549,6 @@ section
a.item > .title
color: black
- padding-left: 0
&:hover
color: $blue
@@ -689,11 +691,10 @@ dd
#docsContent
position: relative
float: right
- //width: calc(100% - 400px)
width: 100%
$toc-margin: 15px
- $header-clearance: 40px
+ $header-clearance: $header-height + 20px
* + h2, * + h3, * + h4, * + h5, * + h6
margin-top: 30px
@@ -705,12 +706,12 @@ dd
padding-bottom: 10px
border-bottom: 1px solid #cccccc
+ // Make sure anchor links aren't hidden by the header
&:before
- content: ''
display: block
- height: 100px
- margin-top: -100px
- background-color: red
+ content: " "
+ margin-top: -$header-clearance
+ height: $header-clearance
visibility: hidden
h1
@@ -845,9 +846,9 @@ dd
white-space: nowrap
text-indent: 50px
overflow: hidden
- background: $blue url(/images/pencil.png) no-repeat
- background-position: 1px 1px
- background-size: auto
+ background: $blue url(/images/icon-pencil.svg) no-repeat
+ background-position: 12px 10px
+ background-size: 29px 29px
#markdown-toc
margin-bottom: 20px
diff --git a/_sass/_desktop.sass b/_sass/_desktop.sass
index 33c57972a73..2574b96811e 100644
--- a/_sass/_desktop.sass
+++ b/_sass/_desktop.sass
@@ -66,6 +66,7 @@ $video-section-height: 550px
#encyclopedia
padding: 50px 50px 20px 20px
+ clear: both
#docsToc
position: relative
@@ -88,6 +89,11 @@ $video-section-height: 550px
main
max-width: $main-max-width
+ #home
+ section, header, footer
+ main
+ max-width: 1000px
+
#oceanNodes
main
position: relative
@@ -95,7 +101,7 @@ $video-section-height: 550px
&:nth-child(1)
max-width: 1000px
- padding-right: 500px
+ padding-right: 475px
h3, p
text-align: left
@@ -105,6 +111,9 @@ $video-section-height: 550px
max-width: 48%
transform: translateY(-50%)
+ img
+ max-width: 425px
+
//.content
// width: 50%
@@ -170,30 +179,6 @@ $video-section-height: 550px
div:last-child
float: right
- //.social
- // position: relative
- // margin: 20px 0
- //
- // a
- // float: left
- //
- // a + a
- // margin-left: 10px
- //
- // label
- // float: right
- // width: auto
- // display: inline-block
- // height: 50px
- // line-height: 50px
- // font-weight: 100
- // white-space: nowrap
- //
- // input
- // margin-left: 8px
- // max-width: none
-
-
#search, #wishField
background-color: transparent
padding: 10px
diff --git a/community.html b/community.html
index 955fe1165fc..7ce6c933c00 100644
--- a/community.html
+++ b/community.html
@@ -61,6 +61,7 @@ title: Community
+
diff --git a/css/sweetalert.css b/css/sweetalert.css
new file mode 100644
index 00000000000..fbccd7302f5
--- /dev/null
+++ b/css/sweetalert.css
@@ -0,0 +1,934 @@
+body.stop-scrolling {
+ height: 100%;
+ overflow: hidden; }
+
+.sweet-overlay {
+ background-color: black;
+ /* IE8 */
+ -ms-filter: "progid:DXImageTransform.Microsoft.Alpha(Opacity=40)";
+ /* IE8 */
+ background-color: rgba(0, 0, 0, 0.4);
+ position: fixed;
+ left: 0;
+ right: 0;
+ top: 0;
+ bottom: 0;
+ display: none;
+ z-index: 10000; }
+
+.sweet-alert {
+ background-color: white;
+ font-family: 'Open Sans', 'Helvetica Neue', Helvetica, Arial, sans-serif;
+ width: 478px;
+ padding: 17px;
+ border-radius: 5px;
+ text-align: left;
+ position: fixed;
+ left: 50%;
+ top: 50%;
+ margin-left: -256px;
+ margin-top: -200px;
+ overflow: hidden;
+ display: none;
+ z-index: 99999; }
+ @media all and (max-width: 540px) {
+ .sweet-alert {
+ width: auto;
+ margin-left: 0;
+ margin-right: 0;
+ left: 15px;
+ right: 15px; } }
+ .sweet-alert h2 {
+ color: #575757;
+ font-size: 30px;
+ text-align: center;
+ font-weight: 600;
+ text-transform: none;
+ position: relative;
+ margin: 25px 0;
+ padding: 0;
+ line-height: 40px;
+ display: block; }
+ .sweet-alert p {
+ color: #797979;
+ font-size: 16px;
+ text-align: left;
+ font-weight: 300;
+ position: relative;
+ text-align: inherit;
+ float: none;
+ margin: 0;
+ padding: 0;
+ padding-left: 10px !important;
+ font-family: courier,monospace;
+ line-height: normal; }
+ .sweet-alert fieldset {
+ border: none;
+ position: relative; }
+ .sweet-alert .sa-error-container {
+ background-color: #f1f1f1;
+ margin-left: -17px;
+ margin-right: -17px;
+ overflow: hidden;
+ padding: 0 10px;
+ max-height: 0;
+ webkit-transition: padding 0.15s, max-height 0.15s;
+ transition: padding 0.15s, max-height 0.15s; }
+ .sweet-alert .sa-error-container.show {
+ padding: 10px 0;
+ max-height: 100px;
+ webkit-transition: padding 0.2s, max-height 0.2s;
+ transition: padding 0.25s, max-height 0.25s; }
+ .sweet-alert .sa-error-container .icon {
+ display: inline-block;
+ width: 24px;
+ height: 24px;
+ border-radius: 50%;
+ background-color: #ea7d7d;
+ color: white;
+ line-height: 24px;
+ text-align: center;
+ margin-right: 3px; }
+ .sweet-alert .sa-error-container p {
+ display: inline-block; }
+ .sweet-alert .sa-input-error {
+ position: absolute;
+ top: 29px;
+ right: 26px;
+ width: 20px;
+ height: 20px;
+ opacity: 0;
+ -webkit-transform: scale(0.5);
+ transform: scale(0.5);
+ -webkit-transform-origin: 50% 50%;
+ transform-origin: 50% 50%;
+ -webkit-transition: all 0.1s;
+ transition: all 0.1s; }
+ .sweet-alert .sa-input-error::before, .sweet-alert .sa-input-error::after {
+ content: "";
+ width: 20px;
+ height: 6px;
+ background-color: #f06e57;
+ border-radius: 3px;
+ position: absolute;
+ top: 50%;
+ margin-top: -4px;
+ left: 50%;
+ margin-left: -9px; }
+ .sweet-alert .sa-input-error::before {
+ -webkit-transform: rotate(-45deg);
+ transform: rotate(-45deg); }
+ .sweet-alert .sa-input-error::after {
+ -webkit-transform: rotate(45deg);
+ transform: rotate(45deg); }
+ .sweet-alert .sa-input-error.show {
+ opacity: 1;
+ -webkit-transform: scale(1);
+ transform: scale(1); }
+ .sweet-alert input {
+ width: 100%;
+ box-sizing: border-box;
+ border-radius: 3px;
+ border: 1px solid #d7d7d7;
+ height: 43px;
+ margin-top: 10px;
+ margin-bottom: 17px;
+ font-size: 18px;
+ box-shadow: inset 0px 1px 1px rgba(0, 0, 0, 0.06);
+ padding: 0 12px;
+ display: none;
+ -webkit-transition: all 0.3s;
+ transition: all 0.3s; }
+ .sweet-alert input:focus {
+ outline: none;
+ box-shadow: 0px 0px 3px #c4e6f5;
+ border: 1px solid #b4dbed; }
+ .sweet-alert input:focus::-moz-placeholder {
+ transition: opacity 0.3s 0.03s ease;
+ opacity: 0.5; }
+ .sweet-alert input:focus:-ms-input-placeholder {
+ transition: opacity 0.3s 0.03s ease;
+ opacity: 0.5; }
+ .sweet-alert input:focus::-webkit-input-placeholder {
+ transition: opacity 0.3s 0.03s ease;
+ opacity: 0.5; }
+ .sweet-alert input::-moz-placeholder {
+ color: #bdbdbd; }
+ .sweet-alert input:-ms-input-placeholder {
+ color: #bdbdbd; }
+ .sweet-alert input::-webkit-input-placeholder {
+ color: #bdbdbd; }
+ .sweet-alert.show-input input {
+ display: block; }
+ .sweet-alert .sa-confirm-button-container {
+ display: inline-block;
+ position: relative; }
+ .sweet-alert .la-ball-fall {
+ position: absolute;
+ left: 50%;
+ top: 50%;
+ margin-left: -27px;
+ margin-top: 4px;
+ opacity: 0;
+ visibility: hidden; }
+ .sweet-alert button {
+ background-color: #8CD4F5;
+ color: white;
+ border: none;
+ box-shadow: none;
+ font-size: 17px;
+ font-weight: 500;
+ -webkit-border-radius: 4px;
+ border-radius: 5px;
+ padding: 10px 32px;
+ margin: 26px 5px 0 5px;
+ cursor: pointer; }
+ .sweet-alert button:focus {
+ outline: none;
+ box-shadow: 0 0 2px rgba(128, 179, 235, 0.5), inset 0 0 0 1px rgba(0, 0, 0, 0.05); }
+ .sweet-alert button:hover {
+ background-color: #7ecff4; }
+ .sweet-alert button:active {
+ background-color: #5dc2f1; }
+ .sweet-alert button.cancel {
+ background-color: #C1C1C1; }
+ .sweet-alert button.cancel:hover {
+ background-color: #b9b9b9; }
+ .sweet-alert button.cancel:active {
+ background-color: #a8a8a8; }
+ .sweet-alert button.cancel:focus {
+ box-shadow: rgba(197, 205, 211, 0.8) 0px 0px 2px, rgba(0, 0, 0, 0.0470588) 0px 0px 0px 1px inset !important; }
+ .sweet-alert button[disabled] {
+ opacity: .6;
+ cursor: default; }
+ .sweet-alert button.confirm[disabled] {
+ color: transparent; }
+ .sweet-alert button.confirm[disabled] ~ .la-ball-fall {
+ opacity: 1;
+ visibility: visible;
+ transition-delay: 0s; }
+ .sweet-alert button::-moz-focus-inner {
+ border: 0; }
+ .sweet-alert[data-has-cancel-button=false] button {
+ box-shadow: none !important; }
+ .sweet-alert[data-has-confirm-button=false][data-has-cancel-button=false] {
+ padding-bottom: 40px; }
+ .sweet-alert .sa-icon {
+ width: 80px;
+ height: 80px;
+ border: 4px solid gray;
+ -webkit-border-radius: 40px;
+ border-radius: 40px;
+ border-radius: 50%;
+ margin: 20px auto;
+ padding: 0;
+ position: relative;
+ box-sizing: content-box; }
+ .sweet-alert .sa-icon.sa-error {
+ border-color: #F27474; }
+ .sweet-alert .sa-icon.sa-error .sa-x-mark {
+ position: relative;
+ display: block; }
+ .sweet-alert .sa-icon.sa-error .sa-line {
+ position: absolute;
+ height: 5px;
+ width: 47px;
+ background-color: #F27474;
+ display: block;
+ top: 37px;
+ border-radius: 2px; }
+ .sweet-alert .sa-icon.sa-error .sa-line.sa-left {
+ -webkit-transform: rotate(45deg);
+ transform: rotate(45deg);
+ left: 17px; }
+ .sweet-alert .sa-icon.sa-error .sa-line.sa-right {
+ -webkit-transform: rotate(-45deg);
+ transform: rotate(-45deg);
+ right: 16px; }
+ .sweet-alert .sa-icon.sa-warning {
+ border-color: #F8BB86; }
+ .sweet-alert .sa-icon.sa-warning .sa-body {
+ position: absolute;
+ width: 5px;
+ height: 47px;
+ left: 50%;
+ top: 10px;
+ -webkit-border-radius: 2px;
+ border-radius: 2px;
+ margin-left: -2px;
+ background-color: #F8BB86; }
+ .sweet-alert .sa-icon.sa-warning .sa-dot {
+ position: absolute;
+ width: 7px;
+ height: 7px;
+ -webkit-border-radius: 50%;
+ border-radius: 50%;
+ margin-left: -3px;
+ left: 50%;
+ bottom: 10px;
+ background-color: #F8BB86; }
+ .sweet-alert .sa-icon.sa-info {
+ border-color: #C9DAE1; }
+ .sweet-alert .sa-icon.sa-info::before {
+ content: "";
+ position: absolute;
+ width: 5px;
+ height: 29px;
+ left: 50%;
+ bottom: 17px;
+ border-radius: 2px;
+ margin-left: -2px;
+ background-color: #C9DAE1; }
+ .sweet-alert .sa-icon.sa-info::after {
+ content: "";
+ position: absolute;
+ width: 7px;
+ height: 7px;
+ border-radius: 50%;
+ margin-left: -3px;
+ top: 19px;
+ background-color: #C9DAE1; }
+ .sweet-alert .sa-icon.sa-success {
+ border-color: #A5DC86; }
+ .sweet-alert .sa-icon.sa-success::before, .sweet-alert .sa-icon.sa-success::after {
+ content: '';
+ -webkit-border-radius: 40px;
+ border-radius: 40px;
+ border-radius: 50%;
+ position: absolute;
+ width: 60px;
+ height: 120px;
+ background: white;
+ -webkit-transform: rotate(45deg);
+ transform: rotate(45deg); }
+ .sweet-alert .sa-icon.sa-success::before {
+ -webkit-border-radius: 120px 0 0 120px;
+ border-radius: 120px 0 0 120px;
+ top: -7px;
+ left: -33px;
+ -webkit-transform: rotate(-45deg);
+ transform: rotate(-45deg);
+ -webkit-transform-origin: 60px 60px;
+ transform-origin: 60px 60px; }
+ .sweet-alert .sa-icon.sa-success::after {
+ -webkit-border-radius: 0 120px 120px 0;
+ border-radius: 0 120px 120px 0;
+ top: -11px;
+ left: 30px;
+ -webkit-transform: rotate(-45deg);
+ transform: rotate(-45deg);
+ -webkit-transform-origin: 0px 60px;
+ transform-origin: 0px 60px; }
+ .sweet-alert .sa-icon.sa-success .sa-placeholder {
+ width: 80px;
+ height: 80px;
+ border: 4px solid rgba(165, 220, 134, 0.2);
+ -webkit-border-radius: 40px;
+ border-radius: 40px;
+ border-radius: 50%;
+ box-sizing: content-box;
+ position: absolute;
+ left: -4px;
+ top: -4px;
+ z-index: 2; }
+ .sweet-alert .sa-icon.sa-success .sa-fix {
+ width: 5px;
+ height: 90px;
+ background-color: white;
+ position: absolute;
+ left: 28px;
+ top: 8px;
+ z-index: 1;
+ -webkit-transform: rotate(-45deg);
+ transform: rotate(-45deg); }
+ .sweet-alert .sa-icon.sa-success .sa-line {
+ height: 5px;
+ background-color: #A5DC86;
+ display: block;
+ border-radius: 2px;
+ position: absolute;
+ z-index: 2; }
+ .sweet-alert .sa-icon.sa-success .sa-line.sa-tip {
+ width: 25px;
+ left: 14px;
+ top: 46px;
+ -webkit-transform: rotate(45deg);
+ transform: rotate(45deg); }
+ .sweet-alert .sa-icon.sa-success .sa-line.sa-long {
+ width: 47px;
+ right: 8px;
+ top: 38px;
+ -webkit-transform: rotate(-45deg);
+ transform: rotate(-45deg); }
+ .sweet-alert .sa-icon.sa-custom {
+ background-size: contain;
+ border-radius: 0;
+ border: none;
+ background-position: center center;
+ background-repeat: no-repeat; }
+
+/*
+ * Animations
+ */
+@-webkit-keyframes showSweetAlert {
+ 0% {
+ transform: scale(0.7);
+ -webkit-transform: scale(0.7); }
+ 45% {
+ transform: scale(1.05);
+ -webkit-transform: scale(1.05); }
+ 80% {
+ transform: scale(0.95);
+ -webkit-transform: scale(0.95); }
+ 100% {
+ transform: scale(1);
+ -webkit-transform: scale(1); } }
+
+@keyframes showSweetAlert {
+ 0% {
+ transform: scale(0.7);
+ -webkit-transform: scale(0.7); }
+ 45% {
+ transform: scale(1.05);
+ -webkit-transform: scale(1.05); }
+ 80% {
+ transform: scale(0.95);
+ -webkit-transform: scale(0.95); }
+ 100% {
+ transform: scale(1);
+ -webkit-transform: scale(1); } }
+
+@-webkit-keyframes hideSweetAlert {
+ 0% {
+ transform: scale(1);
+ -webkit-transform: scale(1); }
+ 100% {
+ transform: scale(0.5);
+ -webkit-transform: scale(0.5); } }
+
+@keyframes hideSweetAlert {
+ 0% {
+ transform: scale(1);
+ -webkit-transform: scale(1); }
+ 100% {
+ transform: scale(0.5);
+ -webkit-transform: scale(0.5); } }
+
+@-webkit-keyframes slideFromTop {
+ 0% {
+ top: 0%; }
+ 100% {
+ top: 50%; } }
+
+@keyframes slideFromTop {
+ 0% {
+ top: 0%; }
+ 100% {
+ top: 50%; } }
+
+@-webkit-keyframes slideToTop {
+ 0% {
+ top: 50%; }
+ 100% {
+ top: 0%; } }
+
+@keyframes slideToTop {
+ 0% {
+ top: 50%; }
+ 100% {
+ top: 0%; } }
+
+@-webkit-keyframes slideFromBottom {
+ 0% {
+ top: 70%; }
+ 100% {
+ top: 50%; } }
+
+@keyframes slideFromBottom {
+ 0% {
+ top: 70%; }
+ 100% {
+ top: 50%; } }
+
+@-webkit-keyframes slideToBottom {
+ 0% {
+ top: 50%; }
+ 100% {
+ top: 70%; } }
+
+@keyframes slideToBottom {
+ 0% {
+ top: 50%; }
+ 100% {
+ top: 70%; } }
+
+.showSweetAlert[data-animation=pop] {
+ -webkit-animation: showSweetAlert 0.3s;
+ animation: showSweetAlert 0.3s; }
+
+.showSweetAlert[data-animation=none] {
+ -webkit-animation: none;
+ animation: none; }
+
+.showSweetAlert[data-animation=slide-from-top] {
+ -webkit-animation: slideFromTop 0.3s;
+ animation: slideFromTop 0.3s; }
+
+.showSweetAlert[data-animation=slide-from-bottom] {
+ -webkit-animation: slideFromBottom 0.3s;
+ animation: slideFromBottom 0.3s; }
+
+.hideSweetAlert[data-animation=pop] {
+ -webkit-animation: hideSweetAlert 0.2s;
+ animation: hideSweetAlert 0.2s; }
+
+.hideSweetAlert[data-animation=none] {
+ -webkit-animation: none;
+ animation: none; }
+
+.hideSweetAlert[data-animation=slide-from-top] {
+ -webkit-animation: slideToTop 0.4s;
+ animation: slideToTop 0.4s; }
+
+.hideSweetAlert[data-animation=slide-from-bottom] {
+ -webkit-animation: slideToBottom 0.3s;
+ animation: slideToBottom 0.3s; }
+
+@-webkit-keyframes animateSuccessTip {
+ 0% {
+ width: 0;
+ left: 1px;
+ top: 19px; }
+ 54% {
+ width: 0;
+ left: 1px;
+ top: 19px; }
+ 70% {
+ width: 50px;
+ left: -8px;
+ top: 37px; }
+ 84% {
+ width: 17px;
+ left: 21px;
+ top: 48px; }
+ 100% {
+ width: 25px;
+ left: 14px;
+ top: 45px; } }
+
+@keyframes animateSuccessTip {
+ 0% {
+ width: 0;
+ left: 1px;
+ top: 19px; }
+ 54% {
+ width: 0;
+ left: 1px;
+ top: 19px; }
+ 70% {
+ width: 50px;
+ left: -8px;
+ top: 37px; }
+ 84% {
+ width: 17px;
+ left: 21px;
+ top: 48px; }
+ 100% {
+ width: 25px;
+ left: 14px;
+ top: 45px; } }
+
+@-webkit-keyframes animateSuccessLong {
+ 0% {
+ width: 0;
+ right: 46px;
+ top: 54px; }
+ 65% {
+ width: 0;
+ right: 46px;
+ top: 54px; }
+ 84% {
+ width: 55px;
+ right: 0px;
+ top: 35px; }
+ 100% {
+ width: 47px;
+ right: 8px;
+ top: 38px; } }
+
+@keyframes animateSuccessLong {
+ 0% {
+ width: 0;
+ right: 46px;
+ top: 54px; }
+ 65% {
+ width: 0;
+ right: 46px;
+ top: 54px; }
+ 84% {
+ width: 55px;
+ right: 0px;
+ top: 35px; }
+ 100% {
+ width: 47px;
+ right: 8px;
+ top: 38px; } }
+
+@-webkit-keyframes rotatePlaceholder {
+ 0% {
+ transform: rotate(-45deg);
+ -webkit-transform: rotate(-45deg); }
+ 5% {
+ transform: rotate(-45deg);
+ -webkit-transform: rotate(-45deg); }
+ 12% {
+ transform: rotate(-405deg);
+ -webkit-transform: rotate(-405deg); }
+ 100% {
+ transform: rotate(-405deg);
+ -webkit-transform: rotate(-405deg); } }
+
+@keyframes rotatePlaceholder {
+ 0% {
+ transform: rotate(-45deg);
+ -webkit-transform: rotate(-45deg); }
+ 5% {
+ transform: rotate(-45deg);
+ -webkit-transform: rotate(-45deg); }
+ 12% {
+ transform: rotate(-405deg);
+ -webkit-transform: rotate(-405deg); }
+ 100% {
+ transform: rotate(-405deg);
+ -webkit-transform: rotate(-405deg); } }
+
+.animateSuccessTip {
+ -webkit-animation: animateSuccessTip 0.75s;
+ animation: animateSuccessTip 0.75s; }
+
+.animateSuccessLong {
+ -webkit-animation: animateSuccessLong 0.75s;
+ animation: animateSuccessLong 0.75s; }
+
+.sa-icon.sa-success.animate::after {
+ -webkit-animation: rotatePlaceholder 4.25s ease-in;
+ animation: rotatePlaceholder 4.25s ease-in; }
+
+@-webkit-keyframes animateErrorIcon {
+ 0% {
+ transform: rotateX(100deg);
+ -webkit-transform: rotateX(100deg);
+ opacity: 0; }
+ 100% {
+ transform: rotateX(0deg);
+ -webkit-transform: rotateX(0deg);
+ opacity: 1; } }
+
+@keyframes animateErrorIcon {
+ 0% {
+ transform: rotateX(100deg);
+ -webkit-transform: rotateX(100deg);
+ opacity: 0; }
+ 100% {
+ transform: rotateX(0deg);
+ -webkit-transform: rotateX(0deg);
+ opacity: 1; } }
+
+.animateErrorIcon {
+ -webkit-animation: animateErrorIcon 0.5s;
+ animation: animateErrorIcon 0.5s; }
+
+@-webkit-keyframes animateXMark {
+ 0% {
+ transform: scale(0.4);
+ -webkit-transform: scale(0.4);
+ margin-top: 26px;
+ opacity: 0; }
+ 50% {
+ transform: scale(0.4);
+ -webkit-transform: scale(0.4);
+ margin-top: 26px;
+ opacity: 0; }
+ 80% {
+ transform: scale(1.15);
+ -webkit-transform: scale(1.15);
+ margin-top: -6px; }
+ 100% {
+ transform: scale(1);
+ -webkit-transform: scale(1);
+ margin-top: 0;
+ opacity: 1; } }
+
+@keyframes animateXMark {
+ 0% {
+ transform: scale(0.4);
+ -webkit-transform: scale(0.4);
+ margin-top: 26px;
+ opacity: 0; }
+ 50% {
+ transform: scale(0.4);
+ -webkit-transform: scale(0.4);
+ margin-top: 26px;
+ opacity: 0; }
+ 80% {
+ transform: scale(1.15);
+ -webkit-transform: scale(1.15);
+ margin-top: -6px; }
+ 100% {
+ transform: scale(1);
+ -webkit-transform: scale(1);
+ margin-top: 0;
+ opacity: 1; } }
+
+.animateXMark {
+ -webkit-animation: animateXMark 0.5s;
+ animation: animateXMark 0.5s; }
+
+@-webkit-keyframes pulseWarning {
+ 0% {
+ border-color: #F8D486; }
+ 100% {
+ border-color: #F8BB86; } }
+
+@keyframes pulseWarning {
+ 0% {
+ border-color: #F8D486; }
+ 100% {
+ border-color: #F8BB86; } }
+
+.pulseWarning {
+ -webkit-animation: pulseWarning 0.75s infinite alternate;
+ animation: pulseWarning 0.75s infinite alternate; }
+
+@-webkit-keyframes pulseWarningIns {
+ 0% {
+ background-color: #F8D486; }
+ 100% {
+ background-color: #F8BB86; } }
+
+@keyframes pulseWarningIns {
+ 0% {
+ background-color: #F8D486; }
+ 100% {
+ background-color: #F8BB86; } }
+
+.pulseWarningIns {
+ -webkit-animation: pulseWarningIns 0.75s infinite alternate;
+ animation: pulseWarningIns 0.75s infinite alternate; }
+
+@-webkit-keyframes rotate-loading {
+ 0% {
+ transform: rotate(0deg); }
+ 100% {
+ transform: rotate(360deg); } }
+
+@keyframes rotate-loading {
+ 0% {
+ transform: rotate(0deg); }
+ 100% {
+ transform: rotate(360deg); } }
+
+/* Internet Explorer 9 has some special quirks that are fixed here */
+/* The icons are not animated. */
+/* This file is automatically merged into sweet-alert.min.js through Gulp */
+/* Error icon */
+.sweet-alert .sa-icon.sa-error .sa-line.sa-left {
+ -ms-transform: rotate(45deg) \9; }
+
+.sweet-alert .sa-icon.sa-error .sa-line.sa-right {
+ -ms-transform: rotate(-45deg) \9; }
+
+/* Success icon */
+.sweet-alert .sa-icon.sa-success {
+ border-color: transparent\9; }
+
+.sweet-alert .sa-icon.sa-success .sa-line.sa-tip {
+ -ms-transform: rotate(45deg) \9; }
+
+.sweet-alert .sa-icon.sa-success .sa-line.sa-long {
+ -ms-transform: rotate(-45deg) \9; }
+
+/*!
+ * Load Awesome v1.1.0 (http://github.danielcardoso.net/load-awesome/)
+ * Copyright 2015 Daniel Cardoso <@DanielCardoso>
+ * Licensed under MIT
+ */
+.la-ball-fall,
+.la-ball-fall > div {
+ position: relative;
+ -webkit-box-sizing: border-box;
+ -moz-box-sizing: border-box;
+ box-sizing: border-box; }
+
+.la-ball-fall {
+ display: block;
+ font-size: 0;
+ color: #fff; }
+
+.la-ball-fall.la-dark {
+ color: #333; }
+
+.la-ball-fall > div {
+ display: inline-block;
+ float: none;
+ background-color: currentColor;
+ border: 0 solid currentColor; }
+
+.la-ball-fall {
+ width: 54px;
+ height: 18px; }
+
+.la-ball-fall > div {
+ width: 10px;
+ height: 10px;
+ margin: 4px;
+ border-radius: 100%;
+ opacity: 0;
+ -webkit-animation: ball-fall 1s ease-in-out infinite;
+ -moz-animation: ball-fall 1s ease-in-out infinite;
+ -o-animation: ball-fall 1s ease-in-out infinite;
+ animation: ball-fall 1s ease-in-out infinite; }
+
+.la-ball-fall > div:nth-child(1) {
+ -webkit-animation-delay: -200ms;
+ -moz-animation-delay: -200ms;
+ -o-animation-delay: -200ms;
+ animation-delay: -200ms; }
+
+.la-ball-fall > div:nth-child(2) {
+ -webkit-animation-delay: -100ms;
+ -moz-animation-delay: -100ms;
+ -o-animation-delay: -100ms;
+ animation-delay: -100ms; }
+
+.la-ball-fall > div:nth-child(3) {
+ -webkit-animation-delay: 0ms;
+ -moz-animation-delay: 0ms;
+ -o-animation-delay: 0ms;
+ animation-delay: 0ms; }
+
+.la-ball-fall.la-sm {
+ width: 26px;
+ height: 8px; }
+
+.la-ball-fall.la-sm > div {
+ width: 4px;
+ height: 4px;
+ margin: 2px; }
+
+.la-ball-fall.la-2x {
+ width: 108px;
+ height: 36px; }
+
+.la-ball-fall.la-2x > div {
+ width: 20px;
+ height: 20px;
+ margin: 8px; }
+
+.la-ball-fall.la-3x {
+ width: 162px;
+ height: 54px; }
+
+.la-ball-fall.la-3x > div {
+ width: 30px;
+ height: 30px;
+ margin: 12px; }
+
+/*
+ * Animation
+ */
+@-webkit-keyframes ball-fall {
+ 0% {
+ opacity: 0;
+ -webkit-transform: translateY(-145%);
+ transform: translateY(-145%); }
+ 10% {
+ opacity: .5; }
+ 20% {
+ opacity: 1;
+ -webkit-transform: translateY(0);
+ transform: translateY(0); }
+ 80% {
+ opacity: 1;
+ -webkit-transform: translateY(0);
+ transform: translateY(0); }
+ 90% {
+ opacity: .5; }
+ 100% {
+ opacity: 0;
+ -webkit-transform: translateY(145%);
+ transform: translateY(145%); } }
+
+@-moz-keyframes ball-fall {
+ 0% {
+ opacity: 0;
+ -moz-transform: translateY(-145%);
+ transform: translateY(-145%); }
+ 10% {
+ opacity: .5; }
+ 20% {
+ opacity: 1;
+ -moz-transform: translateY(0);
+ transform: translateY(0); }
+ 80% {
+ opacity: 1;
+ -moz-transform: translateY(0);
+ transform: translateY(0); }
+ 90% {
+ opacity: .5; }
+ 100% {
+ opacity: 0;
+ -moz-transform: translateY(145%);
+ transform: translateY(145%); } }
+
+@-o-keyframes ball-fall {
+ 0% {
+ opacity: 0;
+ -o-transform: translateY(-145%);
+ transform: translateY(-145%); }
+ 10% {
+ opacity: .5; }
+ 20% {
+ opacity: 1;
+ -o-transform: translateY(0);
+ transform: translateY(0); }
+ 80% {
+ opacity: 1;
+ -o-transform: translateY(0);
+ transform: translateY(0); }
+ 90% {
+ opacity: .5; }
+ 100% {
+ opacity: 0;
+ -o-transform: translateY(145%);
+ transform: translateY(145%); } }
+
+@keyframes ball-fall {
+ 0% {
+ opacity: 0;
+ -webkit-transform: translateY(-145%);
+ -moz-transform: translateY(-145%);
+ -o-transform: translateY(-145%);
+ transform: translateY(-145%); }
+ 10% {
+ opacity: .5; }
+ 20% {
+ opacity: 1;
+ -webkit-transform: translateY(0);
+ -moz-transform: translateY(0);
+ -o-transform: translateY(0);
+ transform: translateY(0); }
+ 80% {
+ opacity: 1;
+ -webkit-transform: translateY(0);
+ -moz-transform: translateY(0);
+ -o-transform: translateY(0);
+ transform: translateY(0); }
+ 90% {
+ opacity: .5; }
+ 100% {
+ opacity: 0;
+ -webkit-transform: translateY(145%);
+ -moz-transform: translateY(145%);
+ -o-transform: translateY(145%);
+ transform: translateY(145%); } }
diff --git a/docs/admin/accessing-the-api.md b/docs/admin/accessing-the-api.md
index 649807c4a42..27ee393ac34 100644
--- a/docs/admin/accessing-the-api.md
+++ b/docs/admin/accessing-the-api.md
@@ -17,20 +17,23 @@ there is one of these running on a single kubernetes-master node.
By default the Kubernetes APIserver serves HTTP on 2 ports:
- 1. Localhost Port
- - serves HTTP
- - default is port 8080, change with `--insecure-port` flag.
- - defaults IP is localhost, change with `--insecure-bind-address` flag.
- - no authentication or authorization checks in HTTP
- - protected by need to have host access
- 2. Secure Port
- - default is port 6443, change with `--secure-port` flag.
- - default IP is first non-localhost network interface, change with `--bind-address` flag.
- - serves HTTPS. Set cert with `--tls-cert-file` and key with `--tls-private-key-file` flag.
- - uses token-file or client-certificate based [authentication](/docs/admin/authentication).
- - uses policy-based [authorization](/docs/admin/authorization).
- 3. Removed: ReadOnly Port
- - For security reasons, this had to be removed. Use the [service account](/docs/user-guide/service-accounts) feature instead.
+ 1. `Localhost Port`:
+
+ - serves HTTP
+ - default is port 8080, change with `--insecure-port` flag.
+ - defaults IP is localhost, change with `--insecure-bind-address` flag.
+ - no authentication or authorization checks in HTTP
+ - protected by need to have host access
+ 2. `Secure Port`:
+
+ - default is port 6443, change with `--secure-port` flag.
+ - default IP is first non-localhost network interface, change with `--bind-address` flag.
+ - serves HTTPS. Set cert with `--tls-cert-file` and key with `--tls-private-key-file` flag.
+ - uses token-file or client-certificate based [authentication](/docs/admin/authentication).
+ - uses policy-based [authorization](/docs/admin/authorization).
+ 3. Removed: `ReadOnly Port`
+
+ - For security reasons, this had to be removed. Use the [service account](/docs/user-guide/service-accounts) feature instead.
## Proxies and Firewall rules
@@ -39,35 +42,31 @@ on the same machine as the apiserver process. The proxy serves HTTPS protected
by Basic Auth on port 443, and proxies to the apiserver on localhost:8080. In
these configurations the secure port is typically set to 6443.
-A firewall rule is typically configured to allow external HTTPS access to port 443.
+A firewall rule is typically configured to allow external HTTPS access to port
+443.
-The above are defaults and reflect how Kubernetes is deployed to Google Compute Engine using
-kube-up.sh. Other cloud providers may vary.
+The above are defaults and reflect how Kubernetes is deployed to Google Compute
+Engine using `kube-up.sh.` Other cloud providers may vary.
## Use Cases vs IP:Ports
-There are three differently configured serving ports because there are a
-variety of uses cases:
+There are differently configured serving ports to serve a variety of uses cases:
1. Clients outside of a Kubernetes cluster, such as human running `kubectl`
- on desktop machine. Currently, accesses the Localhost Port via a proxy (nginx)
- running on the `kubernetes-master` machine. The proxy can use cert-based authentication
- or token-based authentication.
+on a desktop machine. Currently, accesses the Localhost Port via a proxy (nginx)
+running on the `kubernetes-master` machine. The proxy can use cert-based
+authentication or token-based authentication.
2. Processes running in Containers on Kubernetes that need to read from
- the apiserver. Currently, these can use a [service account](/docs/user-guide/service-accounts).
+the apiserver. Currently, these can use a [service account](/docs/user-guide/service-accounts).
3. Scheduler and Controller-manager processes, which need to do read-write
- API operations, using service accounts to avoid the need to be co-located.
+API operations, using service accounts to avoid the need to be co-located.
4. Kubelets, which need to do read-write API operations and are necessarily
- on different machines than the apiserver. Kubelet uses the Secure Port
- to get their pods, to find the services that a pod can see, and to
- write events. Credentials are distributed to kubelets at cluster
- setup time. Kubelet and kube-proxy can use cert-based authentication or token-based
- authentication.
+on different machines than the apiserver. Kubelet uses the Secure Port
+to get their pods, to find the services that a pod can see, and to
+write events. Credentials are distributed to kubelets at cluster
+setup time. Kubelet and kube-proxy can use cert-based authentication or
+token-based authentication.
## Expected changes
- Policy will limit the actions kubelets can do via the authed port.
-
-
-
-
diff --git a/docs/admin/admission-controllers.md b/docs/admin/admission-controllers.md
index 3af012fea7a..59e998b83db 100644
--- a/docs/admin/admission-controllers.md
+++ b/docs/admin/admission-controllers.md
@@ -136,7 +136,13 @@ namespace. In order to enforce integrity of that process, we strongly recommend
Yes.
-For Kubernetes 1.0, we strongly recommend running the following set of admission control plug-ins (order matters):
+For Kubernetes >= 1.2.0, we strongly recommend running the following set of admission control plug-ins (order matters):
+
+```shell
+--admission-control=NamespaceLifecycle,LimitRanger,ServiceAccount,ResourceQuota
+```
+
+For Kubernetes >= 1.0.0, we strongly recommend running the following set of admission control plug-ins (order matters):
```shell
--admission-control=NamespaceLifecycle,LimitRanger,SecurityContextDeny,ServiceAccount,ResourceQuota
diff --git a/docs/admin/authentication.md b/docs/admin/authentication.md
index e59ba6fee53..52b5bf925dc 100644
--- a/docs/admin/authentication.md
+++ b/docs/admin/authentication.md
@@ -25,6 +25,7 @@ When using token authentication from an http client the apiserver expects an `Au
header with a value of `Bearer SOMETOKEN`.
**OpenID Connect ID Token** is enabled by passing the following options to the apiserver:
+
- `--oidc-issuer-url` (required) tells the apiserver where to connect to the OpenID provider. Only HTTPS scheme will be accepted.
- `--oidc-client-id` (required) is used by apiserver to verify the audience of the token.
A valid [ID token](http://openid.net/specs/openid-connect-core-1_0.html#IDToken) MUST have this
@@ -59,38 +60,47 @@ with a value of `Basic BASE64ENCODED(USER:PASSWORD)`.
**Keystone authentication** is enabled by passing the `--experimental-keystone-url=`
option to the apiserver during startup. The plugin is implemented in
-`plugin/pkg/auth/authenticator/request/keystone/keystone.go`.
+`plugin/pkg/auth/authenticator/password/keystone/keystone.go`.
+
For details on how to use keystone to manage projects and users, refer to the
[Keystone documentation](http://docs.openstack.org/developer/keystone/). Please note that
this plugin is still experimental which means it is subject to changes.
+
Please refer to the [discussion](https://github.com/kubernetes/kubernetes/pull/11798#issuecomment-129655212)
-and the [blueprint](https://github.com/kubernetes/kubernetes/issues/11626) for more details
+and the [blueprint](https://github.com/kubernetes/kubernetes/issues/11626) for more details.
## Plugin Development
-We plan for the Kubernetes API server to issue tokens
-after the user has been (re)authenticated by a *bedrock* authentication
-provider external to Kubernetes. We plan to make it easy to develop modules
-that interface between Kubernetes and a bedrock authentication provider (e.g.
-github.com, google.com, enterprise directory, kerberos, etc.)
+We plan for the Kubernetes API server to issue tokens after the user has been
+(re)authenticated by a *bedrock* authentication provider external to Kubernetes.
+We also plan to make it easy to develop modules that interface between
+Kubernetes and a bedrock authentication provider (e.g. github.com, google.com,
+enterprise directory, kerberos, etc.)
## APPENDIX
### Creating Certificates
-When using client certificate authentication, you can generate certificates manually or
-using an existing deployment script.
+When using client certificate authentication, you can generate certificates
+using an existing deployment script or manually through `easyrsa` or `openssl.``
-**Deployment script** is implemented at
-`cluster/saltbase/salt/generate-cert/make-ca-cert.sh`.
-Execute this script with two parameters. First is the IP address of apiserver, the second is
-a list of subject alternate names in the form `IP: or DNS:`.
-The script will generate three files:ca.crt, server.crt and server.key.
-Finally, add these parameters
-`--client-ca-file=/srv/kubernetes/ca.crt`
-`--tls-cert-file=/srv/kubernetes/server.cert`
-`--tls-private-key-file=/srv/kubernetes/server.key`
-into apiserver start parameters.
+#### Using an Existing Deployment Script
+
+**Using an existing deployment script** is implemented at
+`cluster/saltbase/salt/generate-cert/make-ca-cert.sh`.
+
+Execute this script with two parameters. The first is the IP address
+of apiserver. The second is a list of subject alternate names in the form `IP: or DNS:`.
+
+The script will generate three files: `ca.crt`, `server.crt`, and `server.key`.
+
+Finally, add the following parameters into apiserver start parameters:
+
+- `--client-ca-file=/srv/kubernetes/ca.crt`
+- `--tls-cert-file=/srv/kubernetes/server.cert`
+- `--tls-private-key-file=/srv/kubernetes/server.key`
+
+#### easyrsa
**easyrsa** can be used to manually generate certificates for your cluster.
@@ -107,29 +117,34 @@ into apiserver start parameters.
(build-server-full [filename]: Generate a keypair and sign locally for a client or server)
./easyrsa --subject-alt-name="IP:${MASTER_IP}" build-server-full kubernetes-master nopass
-1. Copy `pki/ca.crt` `pki/issued/kubernetes-master.crt`
- `pki/private/kubernetes-master.key` to your directory.
-1. Remember fill the parameters
- `--client-ca-file=/yourdirectory/ca.crt`
- `--tls-cert-file=/yourdirectory/server.cert`
- `--tls-private-key-file=/yourdirectory/server.key`
- and add these into apiserver start parameters.
+1. Copy `pki/ca.crt`, `pki/issued/kubernetes-master.crt`, and `pki/private/kubernetes-master.key` to your directory.
+1. Fill in and add the following parameters into the apiserver start parameters:
+
+ --client-ca-file=/yourdirectory/ca.crt
+ --tls-cert-file=/yourdirectory/server.cert
+ --tls-private-key-file=/yourdirectory/server.key
+
+#### openssl
**openssl** can also be use to manually generate certificates for your cluster.
-1. Generate a ca.key with 2048bit
- `openssl genrsa -out ca.key 2048`
-1. According to the ca.key generate a ca.crt. (-days set the certificate effective time).
- `openssl req -x509 -new -nodes -key ca.key -subj "/CN=${MASTER_IP}" -days 10000 -out ca.crt`
+1. Generate a ca.key with 2048bit:
+
+ openssl genrsa -out ca.key 2048
+1. According to the ca.key generate a ca.crt (use -days to set the certificate effective time):
+
+ openssl req -x509 -new -nodes -key ca.key -subj "/CN=${MASTER_IP}" -days 10000 -out ca.crt
1. Generate a server.key with 2048bit
- `openssl genrsa -out server.key 2048`
-1. According to the server.key generate a server.csr.
- `openssl req -new -key server.key -subj "/CN=${MASTER_IP}" -out server.csr`
-1. According to the ca.key, ca.crt and server.csr generate the server.crt.
- `openssl x509 -req -in server.csr -CA ca.crt -CAkey ca.key -CAcreateserial -out server.crt
- -days 10000`
+
+ openssl genrsa -out server.key 2048
+1. According to the server.key generate a server.csr:
+
+ openssl req -new -key server.key -subj "/CN=${MASTER_IP}" -out server.csr
+1. According to the ca.key, ca.crt and server.csr generate the server.crt:
+
+ openssl x509 -req -in server.csr -CA ca.crt -CAkey ca.key -CAcreateserial -out server.crt -days 10000
1. View the certificate.
- `openssl x509 -noout -text -in ./server.crt`
- Finally, do not forget fill the same parameters and add parameters into apiserver start parameters.
+ openssl x509 -noout -text -in ./server.crt
+Finally, do not forget to fill out and add the same parameters into the apiserver start parameters.
diff --git a/docs/admin/authorization.md b/docs/admin/authorization.md
index 2571fb94a89..3b521b5df14 100644
--- a/docs/admin/authorization.md
+++ b/docs/admin/authorization.md
@@ -13,31 +13,35 @@ policies. An API call must be allowed by some policy in order to proceed.
The following implementations are available, and are selected by flag:
- - `--authorization-mode=AlwaysDeny`
- - `--authorization-mode=AlwaysAllow`
- - `--authorization-mode=ABAC`
- - `--authorization-mode=Webhook`
-
-`AlwaysDeny` blocks all requests (used in tests).
-`AlwaysAllow` allows all requests; use if you don't need authorization.
-`ABAC` allows for user-configured authorization policy. ABAC stands for Attribute-Based Access Control.
-`Webhook` allows for authorization to be driven by a remote service using REST.
+ - `--authorization-mode=AlwaysDeny` blocks all requests (used in tests).
+ - `--authorization-mode=AlwaysAllow` allows all requests; use if you don't
+need authorization.
+ - `--authorization-mode=ABAC`allows for user-configured authorization policy.
+ABAC stands for
+ Attribute-Based Access Control.
+ - `--authorization-mode=Webhook` allows for authorization to be driven by a
+remote service using REST.
## ABAC Mode
### Request Attributes
A request has the following attributes that can be considered for authorization:
+
- user (the user-string which a user was authenticated as).
- group (the list of group names the authenticated user is a member of).
- whether the request is for an API resource.
- the request path.
- - allows authorizing access to miscellaneous endpoints like `/api` or `/healthz` (see [kubectl](#kubectl)).
+ - allows authorizing access to miscellaneous endpoints like `/api` or
+`/healthz` (see [kubectl](#kubectl)).
- the request verb.
- - API verbs like `get`, `list`, `create`, `update`, `watch`, `delete`, and `deletecollection` are used for API requests
- - HTTP verbs like `get`, `post`, `put`, and `delete` are used for non-API requests
+ - API verbs like `get`, `list`, `create`, `update`, `watch`, `delete`, and
+`deletecollection` are used for API requests
+ - HTTP verbs like `get`, `post`, `put`, and `delete` are used for non-API
+requests
- what resource is being accessed (for API requests only)
- - the namespace of the object being accessed (for namespaced API requests only)
+ - the namespace of the object being accessed (for namespaced API requests
+only)
- the API group being accessed (for API requests only)
We anticipate adding more attributes to allow finer grained access control and
@@ -47,33 +51,33 @@ to assist in policy management.
For mode `ABAC`, also specify `--authorization-policy-file=SOME_FILENAME`.
-The file format is [one JSON object per line](http://jsonlines.org/). There should be no enclosing list or map, just
-one map per line.
+The file format is [one JSON object per line](http://jsonlines.org/). There
+should be no enclosing list or map, just one map per line.
+
+Each line is a "policy object". A policy object is a map with the following
+properties:
-Each line is a "policy object". A policy object is a map with the following properties:
- Versioning properties:
- `apiVersion`, type string; valid values are "abac.authorization.kubernetes.io/v1beta1". Allows versioning and conversion of the policy format.
- `kind`, type string: valid values are "Policy". Allows versioning and conversion of the policy format.
-
- `spec` property set to a map with the following properties:
- Subject-matching properties:
- `user`, type string; the user-string from `--token-auth-file`. If you specify `user`, it must match the username of the authenticated user. `*` matches all requests.
- `group`, type string; if you specify `group`, it must match one of the groups of the authenticated user. `*` matches all requests.
-
- `readonly`, type boolean, when true, means that the policy only applies to get, list, and watch operations.
-
- Resource-matching properties:
- `apiGroup`, type string; an API group, such as `extensions`. `*` matches all API groups.
- `namespace`, type string; a namespace string. `*` matches all resource requests.
- `resource`, type string; a resource, such as `pods`. `*` matches all resource requests.
-
- Non-resource-matching properties:
- `nonResourcePath`, type string; matches the non-resource request paths (like `/version` and `/apis`). `*` matches all non-resource requests. `/foo/*` matches `/foo/` and all of its subpaths.
-An unset property is the same as a property set to the zero value for its type (e.g. empty string, 0, false).
-However, unset should be preferred for readability.
+An unset property is the same as a property set to the zero value for its type
+(e.g. empty string, 0, false). However, unset should be preferred for
+readability.
-In the future, policies may be expressed in a JSON format, and managed via a REST interface.
+In the future, policies may be expressed in a JSON format, and managed via a
+REST interface.
### Authorization Algorithm
@@ -84,23 +88,32 @@ are set to the zero value of its type (e.g. empty string, 0, false).
A property set to "*" will match any value of the corresponding attribute.
-The tuple of attributes is checked for a match against every policy in the policy file.
-If at least one line matches the request attributes, then the request is authorized (but may fail later validation).
+The tuple of attributes is checked for a match against every policy in the
+policy file. If at least one line matches the request attributes, then the
+request is authorized (but may fail later validation).
-To permit any user to do something, write a policy with the user property set to "*".
-To permit a user to do anything, write a policy with the apiGroup, namespace, resource, and nonResourcePath properties set to "*".
+To permit any user to do something, write a policy with the user property set to
+"*".
+
+To permit a user to do anything, write a policy with the apiGroup, namespace,
+resource, and nonResourcePath properties set to "*".
### Kubectl
-Kubectl uses the `/api` and `/apis` endpoints of api-server to negotiate client/server versions. To validate objects sent to the API by create/update operations, kubectl queries certain swagger resources. For API version `v1` those would be `/swaggerapi/api/v1` & `/swaggerapi/experimental/v1`.
+Kubectl uses the `/api` and `/apis` endpoints of api-server to negotiate
+client/server versions. To validate objects sent to the API by create/update
+operations, kubectl queries certain swagger resources. For API version `v1`
+those would be `/swaggerapi/api/v1` & `/swaggerapi/experimental/v1`.
-When using ABAC authorization, those special resources have to be explicitly exposed via the `nonResourcePath` property in a policy (see [examples](#examples) below):
+When using ABAC authorization, those special resources have to be explicitly
+exposed via the `nonResourcePath` property in a policy (see [examples](#examples) below):
* `/api`, `/api/*`, `/apis`, and `/apis/*` for API version negotiation.
* `/version` for retrieving the server version via `kubectl version`.
* `/swaggerapi/*` for create/update operations.
-To inspect the HTTP calls involved in a specific kubectl operation you can turn up the verbosity:
+To inspect the HTTP calls involved in a specific kubectl operation you can turn
+up the verbosity:
kubectl --v=8 version
@@ -116,18 +129,22 @@ To inspect the HTTP calls involved in a specific kubectl operation you can turn
### A quick note on service accounts
-A service account automatically generates a user. The user's name is generated according to the naming convention:
+A service account automatically generates a user. The user's name is generated
+according to the naming convention:
```shell
system:serviceaccount::
```
-Creating a new namespace also causes a new service account to be created, of this form:*
+Creating a new namespace also causes a new service account to be created, of
+this form:*
```shell
system:serviceaccount::default
```
-For example, if you wanted to grant the default service account in the kube-system full privilege to the API, you would add this line to your policy file:
+For example, if you wanted to grant the default service account in the
+kube-system full privilege to the API, you would add this line to your policy
+file:
```json
{"apiVersion":"abac.authorization.kubernetes.io/v1beta1","kind":"Policy","user":"system:serviceaccount:kube-system:default","namespace":"*","resource":"*","apiGroup":"*"}
@@ -137,13 +154,17 @@ The apiserver will need to be restarted to pickup the new policy lines.
## Webhook Mode
-When specified, mode `Webhook` causes Kubernetes to query an outside REST service when determining user privileges.
+When specified, mode `Webhook` causes Kubernetes to query an outside REST
+service when determining user privileges.
### Configuration File Format
-Mode `Webhook` requires a file for HTTP configuration, specify by the `--authorization-webhook-config-file=SOME_FILENAME` flag.
+Mode `Webhook` requires a file for HTTP configuration, specify by the
+`--authorization-webhook-config-file=SOME_FILENAME` flag.
-The configuration file uses the [kubeconfig](/docs/user-guide/kubeconfig-file/) file format. Within the file "users" refers to the API Server webhook and "clusters" refers to the remote service.
+The configuration file uses the [kubeconfig](/docs/user-guide/kubeconfig-file/)
+file format. Within the file "users" refers to the API Server webhook and
+"clusters" refers to the remote service.
A configuration example which uses HTTPS client auth:
@@ -173,9 +194,17 @@ contexts:
### Request Payloads
-When faced with an authorization decision, the API Server POSTs a JSON serialized api.authorization.v1beta1.SubjectAccessReview object describing the action. This object contains fields describing the user attempting to make the request, and either details about the resource being accessed or requests attributes.
+When faced with an authorization decision, the API Server POSTs a JSON
+serialized api.authorization.v1beta1.SubjectAccessReview object describing the
+action. This object contains fields describing the user attempting to make the
+request, and either details about the resource being accessed or requests
+attributes.
-Note that webhook API objects are subject to the same [versioning compatibility rules](/docs/api/) as other Kubernetes API objects. Implementers should be aware of loser compatibility promises for beta objects and check the "apiVersion" field of the request to ensure correct deserialization. Additionally, the API Server must enable the `authorization.k8s.io/v1beta1` API extensions group (`--runtime-config=authorization.k8s.io/v1beta1=true`).
+Note that webhook API objects are subject to the same [versioning compatibility rules](/docs/api/)
+as other Kubernetes API objects. Implementers should be aware of loser
+compatibility promises for beta objects and check the "apiVersion" field of the
+request to ensure correct deserialization. Additionally, the API Server must
+enable the `authorization.k8s.io/v1beta1` API extensions group (`--runtime-config=authorization.k8s.io/v1beta1=true`).
An example request body:
@@ -199,7 +228,9 @@ An example request body:
}
```
-The remote service is expected to fill the SubjectAccessReviewStatus field of the request and respond to either allow or disallow access. The response body's "spec" field is ignored and may be omitted. A permissive response would return:
+The remote service is expected to fill the SubjectAccessReviewStatus field of
+the request and respond to either allow or disallow access. The response body's
+"spec" field is ignored and may be omitted. A permissive response would return:
```json
{
@@ -244,9 +275,15 @@ Access to non-resource paths are sent as:
}
```
-Non-resource paths include: `/api`, `/apis`, `/metrics`, `/resetMetrics`, `/logs`, `/debug`, `/healthz`, `/swagger-ui/`, `/swaggerapi/`, `/ui`, and `/version.` Clients require access to `/api`, `/api/*/`, `/apis/`, `/apis/*`, `/apis/*/*`, and `/version` to discover what resources and versions are present on the server. Access to other non-resource paths can be disallowed without restricting access to the REST api.
+Non-resource paths include: `/api`, `/apis`, `/metrics`, `/resetMetrics`,
+`/logs`, `/debug`, `/healthz`, `/swagger-ui/`, `/swaggerapi/`, `/ui`, and
+`/version.` Clients require access to `/api`, `/api/*/`, `/apis/`, `/apis/*`,
+`/apis/*/*`, and `/version` to discover what resources and versions are present
+on the server. Access to other non-resource paths can be disallowed without
+restricting access to the REST api.
-For further documentation refer to the authorization.v1beta1 API objects and plugin/pkg/auth/authorizer/webhook/webhook.go.
+For further documentation refer to the authorization.v1beta1 API objects and
+plugin/pkg/auth/authorizer/webhook/webhook.go.
## Plugin Development
@@ -267,5 +304,5 @@ Authorization plugin code goes in `pkg/auth/authorizer/$MODULENAME`.
An authorization module can be completely implemented in go, or can call out
to a remote authorization service. Authorization modules can implement
their own caching to reduce the cost of repeated authorization calls with the
-same or similar arguments. Developers should then consider the interaction between
-caching and revocation of permissions.
\ No newline at end of file
+same or similar arguments. Developers should then consider the interaction
+between caching and revocation of permissions.
diff --git a/docs/admin/cluster-components.md b/docs/admin/cluster-components.md
index 136d344f604..2e7e663f42c 100644
--- a/docs/admin/cluster-components.md
+++ b/docs/admin/cluster-components.md
@@ -68,7 +68,7 @@ Addon objects are created in the "kube-system" namespace.
#### DNS
While the other addons are not strictly required, all Kubernetes
-clusters should have [cluster DNS](dns.md), as many examples rely on it.
+clusters should have [cluster DNS](/docs/admin/dns/), as many examples rely on it.
Cluster DNS is a DNS server, in addition to the other DNS server(s) in your
environment, which serves DNS records for Kubernetes services.
@@ -88,15 +88,15 @@ about containers in a central database, and provides a UI for browsing that data
#### Cluster-level Logging
-[Container Logging](/docs/user-guide/monitoring.md) saves container logs
+[Container Logging](/docs/user-guide/monitoring) saves container logs
to a central log store with search/browsing interface. There are two
implementations:
* [Cluster-level logging to Google Cloud Logging](
-docs/user-guide/logging/#cluster-level-logging-to-google-cloud-logging)
+/docs/user-guide/logging/#cluster-level-logging-to-google-cloud-logging)
* [Cluster-level Logging with Elasticsearch and Kibana](
-docs/user-guide/logging/#cluster-level-logging-with-elasticsearch-and-kibana)
+/docs/getting-started-guides/logging-elasticsearch/)
## Node components
@@ -138,4 +138,3 @@ running.
### fluentd
`fluentd` is a daemon which helps provide [cluster-level logging](#cluster-level-logging).
-
diff --git a/docs/admin/cluster-large.md b/docs/admin/cluster-large.md
index e6434cf2c3a..be05cb627f0 100644
--- a/docs/admin/cluster-large.md
+++ b/docs/admin/cluster-large.md
@@ -102,7 +102,8 @@ To avoid running into cluster addon resource issues, when creating a cluster wit
* [FluentD with ElasticSearch Plugin](http://releases.k8s.io/{{page.githubbranch}}/cluster/saltbase/salt/fluentd-es/fluentd-es.yaml)
* [FluentD with GCP Plugin](http://releases.k8s.io/{{page.githubbranch}}/cluster/saltbase/salt/fluentd-gcp/fluentd-gcp.yaml)
-Heapster's resource limits are set dynamically based on the initial size of your cluster (see [#16185](http://issue.k8s.io/16185) and [#21258](http://issue.k8s.io/21258)). If you find that Heapster is running
+Heapster's resource limits are set dynamically based on the initial size of your cluster (see [#16185](http://issue.k8s.io/16185)
+and [#22940](http://issue.k8s.io/22940)). If you find that Heapster is running
out of resources, you should adjust the formulas that compute heapster memory request (see those PRs for details).
For directions on how to detect if addon containers are hitting resource limits, see the [Troubleshooting section of Compute Resources](/docs/user-guide/compute-resources/#troubleshooting).
diff --git a/docs/admin/high-availability.md b/docs/admin/high-availability/index.md
similarity index 93%
rename from docs/admin/high-availability.md
rename to docs/admin/high-availability/index.md
index 16a596f2fe9..01ded6eb05a 100644
--- a/docs/admin/high-availability.md
+++ b/docs/admin/high-availability/index.md
@@ -28,9 +28,8 @@ The steps involved are as follows:
* [Setting up master-elected Kubernetes scheduler and controller-manager daemons](#master-elected-components)
Here's what the system should look like when it's finished:
-
-Ready? Let's get started.
+
## Initial set-up
@@ -55,11 +54,11 @@ choices. For example, on systemd-based systems (e.g. RHEL, CentOS), you can run
If you are extending from a standard Kubernetes installation, the `kubelet` binary should already be present on your system. You can run
`which kubelet` to determine if the binary is in fact installed. If it is not installed,
you should install the [kubelet binary](https://storage.googleapis.com/kubernetes-release/release/v0.19.3/bin/linux/amd64/kubelet), the
-[kubelet init file](http://releases.k8s.io/{{page.githubbranch}}/cluster/saltbase/salt/kubelet/initd) and [high-availability/default-kubelet](/docs/admin/high-availability/default-kubelet)
+[kubelet init file](http://releases.k8s.io/{{page.githubbranch}}/cluster/saltbase/salt/kubelet/initd) and [default-kubelet](/docs/admin/high-availability/default-kubelet)
scripts.
-If you are using monit, you should also install the monit daemon (`apt-get install monit`) and the [high-availability/monit-kubelet](/docs/admin/high-availability/monit-kubelet) and
-[high-availability/monit-docker](/docs/admin/high-availability/monit-docker) configs.
+If you are using monit, you should also install the monit daemon (`apt-get install monit`) and the [monit-kubelet](/docs/admin/high-availability/monit-kubelet) and
+[monit-docker](/docs/admin/high-availability/monit-docker) configs.
On systemd systems you `systemctl enable kubelet` and `systemctl enable docker`.
@@ -79,7 +78,7 @@ size of the cluster from three to five nodes. If that is still insufficient, yo
### Clustering etcd
The full details of clustering etcd are beyond the scope of this document, lots of details are given on the
-[etcd clustering page](https://github.com/coreos/etcd/blob/master/Documentation/clustering.md). This example walks through
+[etcd clustering page](https://github.com/coreos/etcd/blob/master/Documentation/op-guide/clustering.md). This example walks through
a simple cluster set up, using etcd's built in discovery to build our cluster.
First, hit the etcd discovery service to create a new token:
@@ -94,7 +93,7 @@ The kubelet on each node actively monitors the contents of that directory, and i
server from the definition of the pod specified in `etcd.yaml`.
Note that in `etcd.yaml` you should substitute the token URL you got above for `${DISCOVERY_TOKEN}` on all three machines,
-and you should substitute a different name (e.g. `node-1`) for ${NODE_NAME} and the correct IP address
+and you should substitute a different name (e.g. `node-1`) for `${NODE_NAME}` and the correct IP address
for `${NODE_IP}` on each machine.
diff --git a/docs/admin/index.md b/docs/admin/index.md
index 95a1aec99a5..1fc64186ea6 100644
--- a/docs/admin/index.md
+++ b/docs/admin/index.md
@@ -61,7 +61,7 @@ project](/docs/admin/salt).
## Multi-tenant support
-* **Resource Quota** ([resource-quota.md](/docs/admin/resource-quota))
+* **Resource Quota** ([resourcequota/](/docs/admin/resourcequota/))
## Security
diff --git a/docs/admin/limitrange/index.md b/docs/admin/limitrange/index.md
index 9b39ede11db..7c89b3058b3 100644
--- a/docs/admin/limitrange/index.md
+++ b/docs/admin/limitrange/index.md
@@ -22,7 +22,7 @@ may be too small to be useful, but big enough for the waste to be costly over th
the cluster operator may want to set limits that a pod must consume at least 20% of the memory and cpu of their
average node size in order to provide for more uniform scheduling and to limit waste.
-This example demonstrates how limits can be applied to a Kubernetes namespace to control
+This example demonstrates how limits can be applied to a Kubernetes [namespace](/docs/admin/namespaces/walkthrough/) to control
min/max resource limits per pod. In addition, this example demonstrates how you can
apply default resource limits to pods in the absence of an end-user specified value.
@@ -41,12 +41,17 @@ This example will work in a custom namespace to demonstrate the concepts involve
Let's create a new namespace called limit-example:
```shell
-$ kubectl create -f docs/admin/limitrange/namespace.yaml
-namespace "limit-example" created
+$ kubectl create namespace limit-example
+namespace "limit-example" created
+```
+
+Note that `kubectl` commands will print the type and name of the resource created or mutated, which can then be used in subsequent commands:
+
+```shell
$ kubectl get namespaces
-NAME LABELS STATUS AGE
-default Active 5m
-limit-example Active 53s
+NAME STATUS AGE
+default Active 51s
+limit-example Active 45s
```
## Step 2: Apply a limit to the namespace
@@ -95,36 +100,45 @@ were previously created in a namespace.
If a resource (cpu or memory) is being restricted by a limit, the user will get an error at time
of creation explaining why.
-Let's first spin up a replication controller that creates a single container pod to demonstrate
+Let's first spin up a [Deployment](/docs/user-guide/deployments) that creates a single container Pod to demonstrate
how default values are applied to each pod.
```shell
$ kubectl run nginx --image=nginx --replicas=1 --namespace=limit-example
-replicationcontroller "nginx" created
-$ kubectl get pods --namespace=limit-example
-NAME READY STATUS RESTARTS AGE
-nginx-aq0mf 1/1 Running 0 35s
-$ kubectl get pods nginx-aq0mf --namespace=limit-example -o yaml | grep resources -C 8
+deployment "nginx" created
```
-```yaml
- resourceVersion: "127"
- selfLink: /api/v1/namespaces/limit-example/pods/nginx-aq0mf
- uid: 51be42a7-7156-11e5-9921-286ed488f785
-spec:
- containers:
- - image: nginx
- imagePullPolicy: IfNotPresent
- name: nginx
- resources:
- limits:
- cpu: 300m
- memory: 200Mi
- requests:
- cpu: 200m
- memory: 100Mi
- terminationMessagePath: /dev/termination-log
- volumeMounts:
+Note that `kubectl run` creates a Deployment named "nginx" on Kubernetes cluster >= v1.2. If you are running older versions, it creates replication controllers instead.
+If you want to obtain the old behavior, use `--generator=run/v1` to create replication controllers. See [`kubectl run`](/docs/user-guide/kubectl/kubectl_run/) for more details.
+The Deployment manages 1 replica of single container Pod. Let's take a look at the Pod it manages. First, find the name of the Pod:
+
+```shell
+$ kubectl get pods --namespace=limit-example
+NAME READY STATUS RESTARTS AGE
+nginx-2040093540-s8vzu 1/1 Running 0 11s
+```
+
+Let's print this Pod with yaml output format (using `-o yaml` flag), and then `grep` the `resources` field. Note that your pod name will be different.
+
+``` shell
+$ kubectl get pods nginx-2040093540-s8vzu --namespace=limit-example -o yaml | grep resources -C 8
+ resourceVersion: "57"
+ selfLink: /api/v1/namespaces/limit-example/pods/nginx-2040093540-ivimu
+ uid: 67b20741-f53b-11e5-b066-64510658e388
+spec:
+ containers:
+ - image: nginx
+ imagePullPolicy: Always
+ name: nginx
+ resources:
+ limits:
+ cpu: 300m
+ memory: 200Mi
+ requests:
+ cpu: 200m
+ memory: 100Mi
+ terminationMessagePath: /dev/termination-log
+ volumeMounts:
```
Note that our nginx container has picked up the namespace default cpu and memory resource *limits* and *requests*.
@@ -141,37 +155,39 @@ Let's create a pod that falls within the allowed limit boundaries.
```shell
$ kubectl create -f docs/admin/limitrange/valid-pod.yaml --namespace=limit-example
pod "valid-pod" created
-$ kubectl get pods valid-pod --namespace=limit-example -o yaml | grep -C 6 resources
```
-```yaml
- uid: 162a12aa-7157-11e5-9921-286ed488f785
-spec:
- containers:
- - image: gcr.io/google_containers/serve_hostname
- imagePullPolicy: IfNotPresent
- name: kubernetes-serve-hostname
- resources:
- limits:
- cpu: "1"
- memory: 512Mi
- requests:
- cpu: "1"
- memory: 512Mi
+Now look at the Pod's resources field:
+
+```shell
+$ kubectl get pods valid-pod --namespace=limit-example -o yaml | grep -C 6 resources
+ uid: 3b1bfd7a-f53c-11e5-b066-64510658e388
+spec:
+ containers:
+ - image: gcr.io/google_containers/serve_hostname
+ imagePullPolicy: Always
+ name: kubernetes-serve-hostname
+ resources:
+ limits:
+ cpu: "1"
+ memory: 512Mi
+ requests:
+ cpu: "1"
+ memory: 512Mi
```
Note that this pod specifies explicit resource *limits* and *requests* so it did not pick up the namespace
default values.
-Note: The *limits* for CPU resource are not enforced in the default Kubernetes setup on the physical node
+Note: The *limits* for CPU resource are enforced in the default Kubernetes setup on the physical node
that runs the container unless the administrator deploys the kubelet with the folllowing flag:
```shell
$ kubelet --help
Usage of kubelet
....
- --cpu-cfs-quota[=false]: Enable CPU CFS quota enforcement for containers that specify CPU limits
-$ kubelet --cpu-cfs-quota=true ...
+ --cpu-cfs-quota[=true]: Enable CPU CFS quota enforcement for containers that specify CPU limits
+$ kubelet --cpu-cfs-quota=false ...
```
## Step 4: Cleanup
@@ -182,8 +198,8 @@ To remove the resources used by this example, you can just delete the limit-exam
$ kubectl delete namespace limit-example
namespace "limit-example" deleted
$ kubectl get namespaces
-NAME LABELS STATUS AGE
-default Active 20m
+NAME STATUS AGE
+default Active 12m
```
## Summary
@@ -191,4 +207,4 @@ default Active 20m
Cluster operators that want to restrict the amount of resources a single container or pod may consume
are able to define allowable ranges per Kubernetes namespace. In the absence of any explicit assignments,
the Kubernetes system is able to apply default resource *limits* and *requests* if desired in order to
-constrain the amount of resource a pod consumes on a node.
\ No newline at end of file
+constrain the amount of resource a pod consumes on a node.
diff --git a/docs/admin/multiple-schedulers.md b/docs/admin/multiple-schedulers.md
new file mode 100644
index 00000000000..a481831e504
--- /dev/null
+++ b/docs/admin/multiple-schedulers.md
@@ -0,0 +1,156 @@
+---
+---
+
+Kubernetes ships with a default scheduler that is described [here](/docs/admin/kube-scheduler/).
+If the default scheduler does not suit your needs you can implement your own scheduler.
+Not just that, you can even run multiple schedulers simultaneously alongside the default
+scheduler and instruct Kubernetes what scheduler to use for each of your pods. Let's
+learn how to run multiple schedulers in Kubernetes with an example.
+
+A detailed description of how to implement a scheduler is outside the scope of this
+document. Please refer to the kube-scheduler implementation in
+[plugin/pkg/scheduler](https://github.com/kubernetes/kubernetes/tree/{{page.githubbranch}}/plugin/pkg/scheduler)
+in the Kubernetes source directory for a canonical example.
+
+### 1. Package the scheduler
+
+Package your scheduler binary into a container image. For the purposes of this example,
+let's just use the default scheduler (kube-scheduler) as our second scheduler as well.
+Clone the [Kubernetes source code from Github](https://github.com/kubernetes/kubernetes)
+and build the source.
+
+```shell
+git clone https://github.com/kubernetes/kubernetes.git
+cd kubernetes
+hack/build-go.sh
+```
+
+Create a container image containing the kube-scheduler binary. Here is the `Dockerfile`
+to build the image:
+
+```docker
+FROM busybox
+ADD _output/local/go/bin/kube-scheduler /usr/local/bin/kube-scheduler
+```
+
+Save the file as `Dockerfile`, build the image and push it to a registry. This example
+pushes the image to
+[Google Container Registry (GCR)](https://cloud.google.com/container-registry/).
+For more details, please read the GCR
+[documentation](https://cloud.google.com/container-registry/docs/).
+
+```shell
+docker build -t my-kube-scheduler:1.0 .
+gcloud docker push gcr.io/my-gcp-project/my-kube-scheduler:1.0
+```
+
+### 2. Define a Kubernetes Deployment for the scheduler
+
+Now that we have our scheduler in a container image, we can just create a pod
+config for it and run it in our Kubernetes cluster. But instead of creating a pod
+directly in the cluster, let's use a [Deployment](/docs/user-guide/deployments/)
+for this example. A [Deployment](/docs/user-guide/deployments/) manages a
+[Replica Set](/docs/user-guide/replicasets/) which in turn manages the pods,
+thereby making the scheduler resilient to failures. Here is the deployment
+config. Save it as `my-scheduler.yaml`:
+
+{% include code.html language="yaml" file="multiple-schedulers/my-scheduler.yaml" ghlink="/docs/admin/multiple-schedulers/my-scheduler.yaml" %}
+
+An important thing to note here is that the name of the scheduler specified as an
+argument to the scheduler command in the container spec should be unique. This is the name that is matched against the value of the optional `scheduler.alpha.kubernetes.io/name` annotation on pods, to determine whether this scheduler is responsible for scheduling a particular pod.
+
+Please see the
+[kube-scheduler documentation](/docs/admin/kube-scheduler/) for
+detailed description of other command line arguments.
+
+### 3. Run the second scheduler in the cluster
+
+In order to run your scheduler in a Kubernetes cluster, just create the deployment
+specified in the config above in a Kubernetes cluster:
+
+```shell
+kubectl create -f my-scheduler.yaml
+```
+
+Verify that the scheduler pod is running:
+
+```shell
+$ kubectl get pods --namespace=kube-system
+NAME READY STATUS RESTARTS AGE
+....
+my-scheduler-lnf4s-4744f 1/1 Running 0 2m
+...
+```
+
+You should see a "Running" my-scheduler pod, in addition to the default kube-scheduler
+pod in this list.
+
+### 4. Specify schedulers for pods
+
+Now that our second scheduler is running, let's create some pods, and direct them to be scheduled by either the default scheduler or the one we just deployed. In order to schedule a given pod using a specific scheduler, we specify the name of the
+scheduler as an annotation in that pod spec. Let's look at three examples.
+
+
+1. Pod spec without any scheduler annotation
+
+ {% include code.html language="yaml" file="multiple-schedulers/pod1.yaml" ghlink="/docs/admin/multiple-schedulers/pod1.yaml" %}
+
+ When no scheduler annotation is supplied, the pod is automatically scheduled using the
+ default-scheduler.
+
+ Save this file as `pod1.yaml` and submit it to the Kubernetes cluster.
+
+ ```shell
+ kubectl create -f pod1.yaml
+ ```
+2. Pod spec with `default-scheduler` annotation
+
+ {% include code.html language="yaml" file="multiple-schedulers/pod2.yaml" ghlink="/docs/admin/multiple-schedulers/pod2.yaml" %}
+
+ A scheduler is specified by supplying the scheduler name as a value to the annotation
+ with key `scheduler.alpha.kubernetes.io/name`. In this case, we supply the name of the
+ default scheduler which is `default-scheduler`.
+
+ Save this file as `pod2.yaml` and submit it to the Kubernetes cluster.
+
+ ```shell
+ kubectl create -f pod2.yaml
+ ```
+3. Pod spec with `my-scheduler` annotation
+
+ {% include code.html language="yaml" file="multiple-schedulers/pod3.yaml" ghlink="/docs/admin/multiple-schedulers/pod3.yaml" %}
+
+ In this case, we specify that this pod should be scheduled using the scheduler that we
+ deployed - `my-scheduler`. Note that the value of the annotation with key
+ `scheduler.alpha.kubernetes.io/name` should match the name supplied to the scheduler
+ command as an argument in the deployment config for the scheduler.
+
+ Save this file as `pod3.yaml` and submit it to the Kubernetes cluster.
+
+ ```shell
+ kubectl create -f pod3.yaml
+ ```
+
+ Verify that all three pods are running.
+
+ ```shell
+ kubectl get pods
+ ```
+
+### Verifying that the pods were scheduled using the desired schedulers
+
+In order to make it easier to work through these examples, we did not verify that the
+pods were actually scheduled using the desired schedulers. We can verify that by
+changing the order of pod and deployment config submissions above. If we submit all the
+pod configs to a Kubernetes cluster before submitting the scheduler deployment config,
+we see that the pod `annotation-second-scheduler` remains in "Pending" state forever
+while the other two pods get scheduled. Once we submit the scheduler deployment config
+and our new scheduler starts running, the `annotation-second-scheduler` pod gets
+scheduled as well.
+
+Alternatively, one could just look at the "Scheduled" entries in the event logs to
+verify that the pods were scheduled by the desired schedulers.
+
+```shell
+kubectl get events
+```
diff --git a/docs/admin/multiple-schedulers/my-scheduler.yaml b/docs/admin/multiple-schedulers/my-scheduler.yaml
new file mode 100644
index 00000000000..0a140deead6
--- /dev/null
+++ b/docs/admin/multiple-schedulers/my-scheduler.yaml
@@ -0,0 +1,40 @@
+apiVersion: extensions/v1beta1
+kind: Deployment
+metadata:
+ labels:
+ component: scheduler
+ tier: control-plane
+ name: my-scheduler
+ namespace: kube-system
+spec:
+ replicas: 1
+ template:
+ metadata:
+ labels:
+ component: scheduler
+ tier: control-plane
+ version: second
+ spec:
+ containers:
+ - command: [/usr/local/bin/kube-scheduler, --address=0.0.0.0,
+ --scheduler-name=my-scheduler]
+ image: gcr.io/my-gcp-project/my-kube-scheduler:1.0
+ livenessProbe:
+ httpGet:
+ path: /healthz
+ port: 10251
+ initialDelaySeconds: 15
+ name: kube-second-scheduler
+ readinessProbe:
+ httpGet:
+ path: /healthz
+ port: 10251
+ resources:
+ requests:
+ cpu: '0.1'
+ securityContext:
+ privileged: false
+ volumeMounts: []
+ hostNetwork: false
+ hostPID: false
+ volumes: []
\ No newline at end of file
diff --git a/docs/admin/multiple-schedulers/pod1.yaml b/docs/admin/multiple-schedulers/pod1.yaml
new file mode 100644
index 00000000000..733aa97d99b
--- /dev/null
+++ b/docs/admin/multiple-schedulers/pod1.yaml
@@ -0,0 +1,10 @@
+apiVersion: v1
+kind: Pod
+metadata:
+ name: no-annotation
+ labels:
+ name: multischeduler-example
+spec:
+ containers:
+ - name: pod-with-no-annotation-container
+ image: gcr.io/google_containers/pause:2.0
\ No newline at end of file
diff --git a/docs/admin/multiple-schedulers/pod2.yaml b/docs/admin/multiple-schedulers/pod2.yaml
new file mode 100644
index 00000000000..2f9fdf9875c
--- /dev/null
+++ b/docs/admin/multiple-schedulers/pod2.yaml
@@ -0,0 +1,12 @@
+apiVersion: v1
+kind: Pod
+metadata:
+ name: annotation-default-scheduler
+ annotations:
+ scheduler.alpha.kubernetes.io/name: default-scheduler
+ labels:
+ name: multischeduler-example
+spec:
+ containers:
+ - name: pod-with-default-annotation-container
+ image: gcr.io/google_containers/pause:2.0
\ No newline at end of file
diff --git a/docs/admin/multiple-schedulers/pod3.yaml b/docs/admin/multiple-schedulers/pod3.yaml
new file mode 100644
index 00000000000..52276df5f6d
--- /dev/null
+++ b/docs/admin/multiple-schedulers/pod3.yaml
@@ -0,0 +1,12 @@
+apiVersion: v1
+kind: Pod
+metadata:
+ name: annotation-second-scheduler
+ annotations:
+ scheduler.alpha.kubernetes.io/name: my-scheduler
+ labels:
+ name: multischeduler-example
+spec:
+ containers:
+ - name: pod-with-second-annotation-container
+ image: gcr.io/google_containers/pause:2.0
\ No newline at end of file
diff --git a/docs/admin/multiple-zones.md b/docs/admin/multiple-zones.md
new file mode 100644
index 00000000000..e420bda3041
--- /dev/null
+++ b/docs/admin/multiple-zones.md
@@ -0,0 +1,313 @@
+---
+---
+
+## Introduction
+
+Kubernetes 1.2 adds support for running a single cluster in multiple failure zones
+(GCE calls them simply "zones", AWS calls them "availability zones", here we'll refer to them as "zones").
+This is a lightweight version of a broader effort for federating multiple
+Kubernetes clusters together (sometimes referred to by the affectionate
+nickname ["Ubernetes"](https://github.com/kubernetes/kubernetes/blob/master/docs/proposals/federation.md).
+Full federation will allow combining separate
+Kubernetes clusters running in different regions or clouds. However, many
+users simply want to run a more available Kubernetes cluster in multiple zones
+of their cloud provider, and this is what the multizone support in 1.2 allows
+(we nickname this "Ubernetes Lite").
+
+Multizone support is deliberately limited: a single Kubernetes cluster can run
+in multiple zones, but only within the same region (and cloud provider). Only
+GCE and AWS are currently supported automatically (though it is easy to
+add similar support for other clouds or even bare metal, by simply arranging
+for the appropriate labels to be added to nodes and volumes).
+
+
+* TOC
+{:toc}
+
+## Functionality
+
+When nodes are started, the kubelet automatically adds labels to them with
+zone information.
+
+Kubernetes will automatically spread the pods in a replication controller
+or service across nodes in a single-zone cluster (to reduce the impact of
+failures.) With multiple-zone clusters, this spreading behaviour is
+extended across zones (to reduce the impact of zone failures.) (This is
+achieved via `SelectorSpreadPriority`). This is a best-effort
+placement, and so if the zones in your cluster are heterogenous
+(e.g. different numbers of nodes, different types of nodes, or
+different pod resource requirements), this might prevent perfectly
+even spreading of your pods across zones. If desired, you can use
+homogenous zones (same number and types of nodes) to reduce the
+probability of unequal spreading.
+
+When persistent volumes are created, the `PersistentVolumeLabel`
+admission controller automatically adds zone labels to them. The scheduler (via the
+`VolumeZonePredicate` predicate) will then ensure that pods that claim a
+given volume are only placed into the same zone as that volume, as volumes
+cannot be attached across zones.
+
+## Limitations
+
+There are some important limitations of the multizone support:
+
+* We assume that the different zones are located close to each other in the
+network, so we don't perform any zone-aware routing. In particular, traffic
+that goes via services might cross zones (even if pods in some pods backing that service
+exist in the same zone as the client), and this may incur additional latency and cost.
+
+* Volume zone-affinity will only work with a `PersistentVolume`, and will not
+work if you directly specify an EBS volume in the pod spec (for example).
+
+* Clusters cannot span clouds or regions (this functionality will require full
+federation support).
+
+* Although your nodes are in multiple zones, kube-up currently builds
+a single master node by default. While services are highly
+available and can tolerate the loss of a zone, the control plane is
+located in a single zone. Users that want a highly available control
+plane should follow the [high availability](/docs/admin/high-availability) instructions.
+
+
+## Walkthough
+
+We're now going to walk through setting up and using a multi-zone
+cluster on both GCE & AWS. To do so, you bring up a full cluster
+(specifying `MULTIZONE=1`), and then you add nodes in additional zones
+by running `kube-up` again (specifying `KUBE_USE_EXISTING_MASTER=true`).
+
+### Bringing up your cluster
+
+Create the cluster as normal, but pass MULTIZONE to tell the cluster to manage multiple zones; creating nodes in us-central1-a.
+
+GCE:
+
+```shell
+curl -sS https://get.k8s.io | MULTIZONE=1 KUBERNETES_PROVIDER=gce KUBE_GCE_ZONE=us-central1-a NUM_NODES=3 bash
+```
+
+AWS:
+
+```shell
+curl -sS https://get.k8s.io | MULTIZONE=1 KUBERNETES_PROVIDER=aws KUBE_AWS_ZONE=us-west-2a NUM_NODES=3 bash
+```
+
+This step brings up a cluster as normal, still running in a single zone
+(but `MULTIZONE=1` has enabled multi-zone capabilities).
+
+### Nodes are labeled
+
+View the nodes; you can see that they are labeled with zone information.
+They are all in `us-central1-a` (GCE) or `us-west-2a` (AWS) so far. The
+labels are `failure-domain.beta.kubernetes.io/region` for the region,
+and `failure-domain.beta.kubernetes.io/zone` for the zone:
+
+```shell
+> kubectl get nodes --show-labels
+
+
+NAME STATUS AGE LABELS
+kubernetes-master Ready,SchedulingDisabled 6m beta.kubernetes.io/instance-type=n1-standard-1,failure-domain.beta.kubernetes.io/region=us-central1,failure-domain.beta.kubernetes.io/zone=us-central1-a,kubernetes.io/hostname=kubernetes-master
+kubernetes-minion-87j9 Ready 6m beta.kubernetes.io/instance-type=n1-standard-2,failure-domain.beta.kubernetes.io/region=us-central1,failure-domain.beta.kubernetes.io/zone=us-central1-a,kubernetes.io/hostname=kubernetes-minion-87j9
+kubernetes-minion-9vlv Ready 6m beta.kubernetes.io/instance-type=n1-standard-2,failure-domain.beta.kubernetes.io/region=us-central1,failure-domain.beta.kubernetes.io/zone=us-central1-a,kubernetes.io/hostname=kubernetes-minion-9vlv
+kubernetes-minion-a12q Ready 6m beta.kubernetes.io/instance-type=n1-standard-2,failure-domain.beta.kubernetes.io/region=us-central1,failure-domain.beta.kubernetes.io/zone=us-central1-a,kubernetes.io/hostname=kubernetes-minion-a12q
+```
+
+### Add more nodes in a second zone
+
+Let's add another set of nodes to the existing cluster, reusing the
+existing master, running in a different zone (us-central1-b or us-west-2b).
+We run kube-up again, but by specifying `KUBE_USE_EXISTING_MASTER=1`
+kube-up will not create a new master, but will reuse one that was previously
+created instead.
+
+GCE:
+
+```shell
+KUBE_USE_EXISTING_MASTER=true MULTIZONE=1 KUBERNETES_PROVIDER=gce KUBE_GCE_ZONE=us-central1-b NUM_NODES=3 kubernetes/cluster/kube-up.sh
+```
+
+On AWS we also need to specify the network CIDR for the additional
+subnet, along with the master internal IP address:
+
+```shell
+KUBE_USE_EXISTING_MASTER=true MULTIZONE=1 KUBERNETES_PROVIDER=aws KUBE_AWS_ZONE=us-west-2b NUM_NODES=3 KUBE_SUBNET_CIDR=172.20.1.0/24 MASTER_INTERNAL_IP=172.20.0.9 kubernetes/cluster/kube-up.sh
+```
+
+
+View the nodes again; 3 more nodes should have launched and be tagged
+in us-central1-b:
+
+```shell
+> kubectl get nodes --show-labels
+
+NAME STATUS AGE LABELS
+kubernetes-master Ready,SchedulingDisabled 16m beta.kubernetes.io/instance-type=n1-standard-1,failure-domain.beta.kubernetes.io/region=us-central1,failure-domain.beta.kubernetes.io/zone=us-central1-a,kubernetes.io/hostname=kubernetes-master
+kubernetes-minion-281d Ready 2m beta.kubernetes.io/instance-type=n1-standard-2,failure-domain.beta.kubernetes.io/region=us-central1,failure-domain.beta.kubernetes.io/zone=us-central1-b,kubernetes.io/hostname=kubernetes-minion-281d
+kubernetes-minion-87j9 Ready 16m beta.kubernetes.io/instance-type=n1-standard-2,failure-domain.beta.kubernetes.io/region=us-central1,failure-domain.beta.kubernetes.io/zone=us-central1-a,kubernetes.io/hostname=kubernetes-minion-87j9
+kubernetes-minion-9vlv Ready 16m beta.kubernetes.io/instance-type=n1-standard-2,failure-domain.beta.kubernetes.io/region=us-central1,failure-domain.beta.kubernetes.io/zone=us-central1-a,kubernetes.io/hostname=kubernetes-minion-9vlv
+kubernetes-minion-a12q Ready 17m beta.kubernetes.io/instance-type=n1-standard-2,failure-domain.beta.kubernetes.io/region=us-central1,failure-domain.beta.kubernetes.io/zone=us-central1-a,kubernetes.io/hostname=kubernetes-minion-a12q
+kubernetes-minion-pp2f Ready 2m beta.kubernetes.io/instance-type=n1-standard-2,failure-domain.beta.kubernetes.io/region=us-central1,failure-domain.beta.kubernetes.io/zone=us-central1-b,kubernetes.io/hostname=kubernetes-minion-pp2f
+kubernetes-minion-wf8i Ready 2m beta.kubernetes.io/instance-type=n1-standard-2,failure-domain.beta.kubernetes.io/region=us-central1,failure-domain.beta.kubernetes.io/zone=us-central1-b,kubernetes.io/hostname=kubernetes-minion-wf8i
+```
+
+### Volume affinity
+
+Create a volume (only PersistentVolumes are supported for zone
+affinity), using the new dynamic volume creation:
+
+```json
+kubectl create -f - < kubectl get pv --show-labels
+NAME CAPACITY ACCESSMODES STATUS CLAIM REASON AGE LABELS
+pv-gce-mj4gm 5Gi RWO Bound default/claim1 46s failure-domain.beta.kubernetes.io/region=us-central1,failure-domain.beta.kubernetes.io/zone=us-central1-a
+```
+
+So now we will create a pod that uses the persistent volume claim.
+Because GCE PDs / AWS EBS volumes cannot be attached across zones,
+this means that this pod can only be created in the same zone as the volume:
+
+```yaml
+kubectl create -f - < kubectl describe pod mypod | grep Node
+Node: kubernetes-minion-9vlv/10.240.0.5
+> kubectl get node kubernetes-minion-9vlv --show-labels
+NAME STATUS AGE LABELS
+kubernetes-minion-9vlv Ready 22m beta.kubernetes.io/instance-type=n1-standard-2,failure-domain.beta.kubernetes.io/region=us-central1,failure-domain.beta.kubernetes.io/zone=us-central1-a,kubernetes.io/hostname=kubernetes-minion-9vlv
+```
+
+### Pods are spread across zones
+
+Pods in a replication controller or service are automatically spread
+across zones. First, let's launch more nodes in a third zone:
+
+GCE:
+
+```shell
+KUBE_USE_EXISTING_MASTER=true MULTIZONE=1 KUBERNETES_PROVIDER=gce KUBE_GCE_ZONE=us-central1-f NUM_NODES=3 kubernetes/cluster/kube-up.sh
+```
+
+AWS:
+
+```shell
+KUBE_USE_EXISTING_MASTER=true MULTIZONE=1 KUBERNETES_PROVIDER=aws KUBE_AWS_ZONE=us-west-2c NUM_NODES=3 KUBE_SUBNET_CIDR=172.20.2.0/24 MASTER_INTERNAL_IP=172.20.0.9 kubernetes/cluster/kube-up.sh
+```
+
+Verify that you now have nodes in 3 zones:
+
+```shell
+kubectl get nodes --show-labels
+```
+
+Create the guestbook-go example, which includes an RC of size 3, running a simple web app:
+
+```shell
+find kubernetes/examples/guestbook-go/ -name '*.json' | xargs -I {} kubectl create -f {}
+```
+
+The pods should be spread across all 3 zones:
+
+```shell
+> kubectl describe pod -l app=guestbook | grep Node
+Node: kubernetes-minion-9vlv/10.240.0.5
+Node: kubernetes-minion-281d/10.240.0.8
+Node: kubernetes-minion-olsh/10.240.0.11
+
+ > kubectl get node kubernetes-minion-9vlv kubernetes-minion-281d kubernetes-minion-olsh --show-labels
+NAME STATUS AGE LABELS
+kubernetes-minion-9vlv Ready 34m beta.kubernetes.io/instance-type=n1-standard-2,failure-domain.beta.kubernetes.io/region=us-central1,failure-domain.beta.kubernetes.io/zone=us-central1-a,kubernetes.io/hostname=kubernetes-minion-9vlv
+kubernetes-minion-281d Ready 20m beta.kubernetes.io/instance-type=n1-standard-2,failure-domain.beta.kubernetes.io/region=us-central1,failure-domain.beta.kubernetes.io/zone=us-central1-b,kubernetes.io/hostname=kubernetes-minion-281d
+kubernetes-minion-olsh Ready 3m beta.kubernetes.io/instance-type=n1-standard-2,failure-domain.beta.kubernetes.io/region=us-central1,failure-domain.beta.kubernetes.io/zone=us-central1-f,kubernetes.io/hostname=kubernetes-minion-olsh
+```
+
+
+Load-balancers span all zones in a cluster; the guestbook-go example
+includes an example load-balanced service:
+
+```shell
+> kubectl describe service guestbook | grep LoadBalancer.Ingress
+LoadBalancer Ingress: 130.211.126.21
+
+> ip=130.211.126.21
+
+> curl -s http://${ip}:3000/env | grep HOSTNAME
+ "HOSTNAME": "guestbook-44sep",
+
+> (for i in `seq 20`; do curl -s http://${ip}:3000/env | grep HOSTNAME; done) | sort | uniq
+ "HOSTNAME": "guestbook-44sep",
+ "HOSTNAME": "guestbook-hum5n",
+ "HOSTNAME": "guestbook-ppm40",
+```
+
+The load balancer correctly targets all the pods, even though they are in multiple zones.
+
+### Shutting down the cluster
+
+When you're done, clean up:
+
+GCE:
+
+```shell
+KUBERNETES_PROVIDER=gce KUBE_USE_EXISTING_MASTER=true KUBE_GCE_ZONE=us-central1-f kubernetes/cluster/kube-down.sh
+KUBERNETES_PROVIDER=gce KUBE_USE_EXISTING_MASTER=true KUBE_GCE_ZONE=us-central1-b kubernetes/cluster/kube-down.sh
+KUBERNETES_PROVIDER=gce KUBE_GCE_ZONE=us-central1-a kubernetes/cluster/kube-down.sh
+```
+
+AWS:
+
+```shell
+KUBERNETES_PROVIDER=aws KUBE_USE_EXISTING_MASTER=true KUBE_AWS_ZONE=us-west-2c kubernetes/cluster/kube-down.sh
+KUBERNETES_PROVIDER=aws KUBE_USE_EXISTING_MASTER=true KUBE_AWS_ZONE=us-west-2b kubernetes/cluster/kube-down.sh
+KUBERNETES_PROVIDER=aws KUBE_AWS_ZONE=us-west-2a kubernetes/cluster/kube-down.sh
+```
diff --git a/docs/admin/namespaces.md b/docs/admin/namespaces.md
deleted file mode 100644
index 6a5265eadd9..00000000000
--- a/docs/admin/namespaces.md
+++ /dev/null
@@ -1,145 +0,0 @@
----
----
-
-A Namespace is a mechanism to partition resources created by users into
-a logically named group.
-
-## Motivation
-
-A single cluster should be able to satisfy the needs of multiple users or groups of users (henceforth a 'user community').
-
-Each user community wants to be able to work in isolation from other communities.
-
-Each user community has its own:
-
-1. resources (pods, services, replication controllers, etc.)
-2. policies (who can or cannot perform actions in their community)
-3. constraints (this community is allowed this much quota, etc.)
-
-A cluster operator may create a Namespace for each unique user community.
-
-The Namespace provides a unique scope for:
-
-1. named resources (to avoid basic naming collisions)
-2. delegated management authority to trusted users
-3. ability to limit community resource consumption
-
-## Use cases
-
-1. As a cluster operator, I want to support multiple user communities on a single cluster.
-2. As a cluster operator, I want to delegate authority to partitions of the cluster to trusted users
- in those communities.
-3. As a cluster operator, I want to limit the amount of resources each community can consume in order
- to limit the impact to other communities using the cluster.
-4. As a cluster user, I want to interact with resources that are pertinent to my user community in
- isolation of what other user communities are doing on the cluster.
-
-
-## Usage
-
-Look [here](/docs/admin/namespaces/) for an in depth example of namespaces.
-
-### Viewing namespaces
-
-You can list the current namespaces in a cluster using:
-
-```shell
-$ kubectl get namespaces
-NAME LABELS STATUS
-default Active
-kube-system Active
-```
-
-Kubernetes starts with two initial namespaces:
- * `default` The default namespace for objects with no other namespace
- * `kube-system` The namespace for objects created by the Kubernetes system
-
-You can also get the summary of a specific namespace using:
-
-```shell
-$ kubectl get namespaces
-```
-
-Or you can get detailed information with:
-
-```shell
-$ kubectl describe namespaces
-Name: default
-Labels:
-Status: Active
-
-No resource quota.
-
-Resource Limits
- Type Resource Min Max Default
- ---- -------- --- --- ---
- Container cpu - - 100m
-```
-
-Note that these details show both resource quota (if present) as well as resource limit ranges.
-
-Resource quota tracks aggregate usage of resources in the *Namespace* and allows cluster operators
-to define *Hard* resource usage limits that a *Namespace* may consume.
-
-A limit range defines min/max constraints on the amount of resources a single entity can consume in
-a *Namespace*.
-
-See [Admission control: Limit Range](https://github.com/kubernetes/kubernetes/blob/{{page.githubbranch}}/docs/design/admission_control_limit_range.md)
-
-A namespace can be in one of two phases:
- * `Active` the namespace is in use
- * `Terminating` the namespace is being deleted, and can not be used for new objects
-
-See the [design doc](https://github.com/kubernetes/kubernetes/blob/{{page.githubbranch}}/docs/design/namespaces.md#phases) for more details.
-
-### Creating a new namespace
-
-To create a new namespace, first create a new YAML file called `my-namespace.yaml` with the contents:
-
-```yaml
-apiVersion: v1
-kind: Namespace
-metadata:
- name:
-```
-
-Note that the name of your namespace must be a DNS compatible label.
-
-More information on the `finalizers` field can be found in the namespace [design doc](https://github.com/kubernetes/kubernetes/blob/{{page.githubbranch}}/docs/design/namespaces.md#finalizers).
-
-Then run:
-
-```shell
-$ kubectl create -f ./my-namespace.yaml
-```
-
-### Working in namespaces
-
-See [Setting the namespace for a request](/docs/user-guide/namespaces/#setting-the-namespace-for-a-request)
-and [Setting the namespace preference](/docs/user-guide/namespaces/#setting-the-namespace-preference).
-
-### Deleting a namespace
-
-You can delete a namespace with
-
-```shell
-$ kubectl delete namespaces
-```
-
-**WARNING, this deletes _everything_ under the namespace!**
-
-This delete is asynchronous, so for a time you will see the namespace in the `Terminating` state.
-
-## Namespaces and DNS
-
-When you create a [Service](/docs/user-guide/services), it creates a corresponding [DNS entry](/docs/admin/dns).
-This entry is of the form `..svc.cluster.local`, which means
-that if a container just uses `` it will resolve to the service which
-is local to a namespace. This is useful for using the same configuration across
-multiple namespaces such as Development, Staging and Production. If you want to reach
-across namespaces, you need to use the fully qualified domain name (FQDN).
-
-## Design
-
-Details of the design of namespaces in Kubernetes, including a [detailed example](https://github.com/kubernetes/kubernetes/blob/{{page.githubbranch}}/docs/design/namespaces.md#example-openshift-origin-managing-a-kubernetes-namespace)
-can be found in the [namespaces design doc](https://github.com/kubernetes/kubernetes/blob/{{page.githubbranch}}/docs/design/namespaces.md)
\ No newline at end of file
diff --git a/docs/admin/namespaces/index.md b/docs/admin/namespaces/index.md
index b81886830e8..2b9f14e8353 100644
--- a/docs/admin/namespaces/index.md
+++ b/docs/admin/namespaces/index.md
@@ -1,234 +1,140 @@
---
---
-Kubernetes _namespaces_ help different projects, teams, or customers to share a Kubernetes cluster.
+A Namespace is a mechanism to partition resources created by users into
+a logically named group.
-It does this by providing the following:
+## Motivation
-1. A scope for [Names](/docs/user-guide/identifiers).
-2. A mechanism to attach authorization and policy to a subsection of the cluster.
+A single cluster should be able to satisfy the needs of multiple users or groups of users (henceforth a 'user community').
-Use of multiple namespaces is optional.
+Each user community wants to be able to work in isolation from other communities.
-This example demonstrates how to use Kubernetes namespaces to subdivide your cluster.
+Each user community has its own:
-### Step Zero: Prerequisites
+1. resources (pods, services, replication controllers, etc.)
+2. policies (who can or cannot perform actions in their community)
+3. constraints (this community is allowed this much quota, etc.)
-This example assumes the following:
+A cluster operator may create a Namespace for each unique user community.
-1. You have an [existing Kubernetes cluster](/docs/getting-started-guides/).
-2. You have a basic understanding of Kubernetes _[pods](/docs/user-guide/pods)_, _[services](/docs/user-guide/services)_, and _[replication controllers](/docs/user-guide/replication-controller)_.
+The Namespace provides a unique scope for:
-### Step One: Understand the default namespace
+1. named resources (to avoid basic naming collisions)
+2. delegated management authority to trusted users
+3. ability to limit community resource consumption
-By default, a Kubernetes cluster will instantiate a default namespace when provisioning the cluster to hold the default set of pods,
-services, and replication controllers used by the cluster.
+## Use cases
-Assuming you have a fresh cluster, you can introspect the available namespace's by doing the following:
+1. As a cluster operator, I want to support multiple user communities on a single cluster.
+2. As a cluster operator, I want to delegate authority to partitions of the cluster to trusted users
+ in those communities.
+3. As a cluster operator, I want to limit the amount of resources each community can consume in order
+ to limit the impact to other communities using the cluster.
+4. As a cluster user, I want to interact with resources that are pertinent to my user community in
+ isolation of what other user communities are doing on the cluster.
+
+## Viewing namespaces
+
+You can list the current namespaces in a cluster using:
```shell
$ kubectl get namespaces
-NAME LABELS
-default
+NAME LABELS STATUS
+default Active
+kube-system Active
```
-### Step Two: Create new namespaces
+Kubernetes starts with two initial namespaces:
+ * `default` The default namespace for objects with no other namespace
+ * `kube-system` The namespace for objects created by the Kubernetes system
-For this exercise, we will create two additional Kubernetes namespaces to hold our content.
-
-Let's imagine a scenario where an organization is using a shared Kubernetes cluster for development and production use cases.
-
-The development team would like to maintain a space in the cluster where they can get a view on the list of pods, services, and replication controllers
-they use to build and run their application. In this space, Kubernetes resources come and go, and the restrictions on who can or cannot modify resources
-are relaxed to enable agile development.
-
-The operations team would like to maintain a space in the cluster where they can enforce strict procedures on who can or cannot manipulate the set of
-pods, services, and replication controllers that run the production site.
-
-One pattern this organization could follow is to partition the Kubernetes cluster into two namespaces: development and production.
-
-Let's create two new namespaces to hold our work.
-
-Use the file [`namespace-dev.json`](/docs/admin/namespaces/namespace-dev.json) which describes a development namespace:
-
-{% include code.html language="json" file="namespace-dev.json" ghlink="/docs/admin/namespaces/namespace-dev.json" %}
-
-Create the development namespace using kubectl.
+You can also get the summary of a specific namespace using:
```shell
-$ kubectl create -f docs/admin/namespaces/namespace-dev.json
+$ kubectl get namespaces
```
-And then lets create the production namespace using kubectl.
+Or you can get detailed information with:
```shell
-$ kubectl create -f docs/admin/namespaces/namespace-prod.json
+$ kubectl describe namespaces
+Name: default
+Labels:
+Status: Active
+
+No resource quota.
+
+Resource Limits
+ Type Resource Min Max Default
+ ---- -------- --- --- ---
+ Container cpu - - 100m
```
-To be sure things are right, let's list all of the namespaces in our cluster.
+Note that these details show both resource quota (if present) as well as resource limit ranges.
-```shell
-$ kubectl get namespaces
-NAME LABELS STATUS
-default Active
-development name=development Active
-production name=production Active
-```
+Resource quota tracks aggregate usage of resources in the *Namespace* and allows cluster operators
+to define *Hard* resource usage limits that a *Namespace* may consume.
-### Step Three: Create pods in each namespace
+A limit range defines min/max constraints on the amount of resources a single entity can consume in
+a *Namespace*.
-A Kubernetes namespace provides the scope for pods, services, and replication controllers in the cluster.
+See [Admission control: Limit Range](https://github.com/kubernetes/kubernetes/blob/{{page.githubbranch}}/docs/design/admission_control_limit_range.md)
-Users interacting with one namespace do not see the content in another namespace.
+A namespace can be in one of two phases:
+ * `Active` the namespace is in use
+ * `Terminating` the namespace is being deleted, and can not be used for new objects
-To demonstrate this, let's spin up a simple replication controller and pod in the development namespace.
+See the [design doc](https://github.com/kubernetes/kubernetes/blob/{{page.githubbranch}}/docs/design/namespaces.md#phases) for more details.
-We first check what is the current context:
+## Creating a new namespace
+
+To create a new namespace, first create a new YAML file called `my-namespace.yaml` with the contents:
```yaml
apiVersion: v1
-clusters:
-- cluster:
- certificate-authority-data: REDACTED
- server: https://130.211.122.180
- name: lithe-cocoa-92103_kubernetes
-contexts:
-- context:
- cluster: lithe-cocoa-92103_kubernetes
- user: lithe-cocoa-92103_kubernetes
- name: lithe-cocoa-92103_kubernetes
-current-context: lithe-cocoa-92103_kubernetes
-kind: Config
-preferences: {}
-users:
-- name: lithe-cocoa-92103_kubernetes
- user:
- client-certificate-data: REDACTED
- client-key-data: REDACTED
- token: 65rZW78y8HbwXXtSXuUw9DbP4FLjHi4b
-- name: lithe-cocoa-92103_kubernetes-basic-auth
- user:
- password: h5M0FtUUIflBSdI7
- username: admin
+kind: Namespace
+metadata:
+ name:
```
-The next step is to define a context for the kubectl client to work in each namespace. The value of "cluster" and "user" fields are copied from the current context.
+Note that the name of your namespace must be a DNS compatible label.
+
+More information on the `finalizers` field can be found in the namespace [design doc](https://github.com/kubernetes/kubernetes/blob/{{page.githubbranch}}/docs/design/namespaces.md#finalizers).
+
+Then run:
```shell
-$ kubectl config set-context dev --namespace=development --cluster=lithe-cocoa-92103_kubernetes --user=lithe-cocoa-92103_kubernetes
-$ kubectl config set-context prod --namespace=production --cluster=lithe-cocoa-92103_kubernetes --user=lithe-cocoa-92103_kubernetes
+$ kubectl create -f ./my-namespace.yaml
```
-The above commands provided two request contexts you can alternate against depending on what namespace you
-wish to work against.
+## Working in namespaces
-Let's switch to operate in the development namespace.
+See [Setting the namespace for a request](/docs/user-guide/namespaces/#setting-the-namespace-for-a-request)
+and [Setting the namespace preference](/docs/user-guide/namespaces/#setting-the-namespace-preference).
+
+## Deleting a namespace
+
+You can delete a namespace with
```shell
-$ kubectl config use-context dev
+$ kubectl delete namespaces
```
-You can verify your current context by doing the following:
+**WARNING, this deletes _everything_ under the namespace!**
-```shell
-$ kubectl config view
-```
+This delete is asynchronous, so for a time you will see the namespace in the `Terminating` state.
-```yaml
-apiVersion: v1
-clusters:
-- cluster:
- certificate-authority-data: REDACTED
- server: https://130.211.122.180
- name: lithe-cocoa-92103_kubernetes
-contexts:
-- context:
- cluster: lithe-cocoa-92103_kubernetes
- namespace: development
- user: lithe-cocoa-92103_kubernetes
- name: dev
-- context:
- cluster: lithe-cocoa-92103_kubernetes
- user: lithe-cocoa-92103_kubernetes
- name: lithe-cocoa-92103_kubernetes
-- context:
- cluster: lithe-cocoa-92103_kubernetes
- namespace: production
- user: lithe-cocoa-92103_kubernetes
- name: prod
-current-context: dev
-kind: Config
-preferences: {}
-users:
-- name: lithe-cocoa-92103_kubernetes
- user:
- client-certificate-data: REDACTED
- client-key-data: REDACTED
- token: 65rZW78y8HbwXXtSXuUw9DbP4FLjHi4b
-- name: lithe-cocoa-92103_kubernetes-basic-auth
- user:
- password: h5M0FtUUIflBSdI7
- username: admin
-```
+## Namespaces and DNS
-At this point, all requests we make to the Kubernetes cluster from the command line are scoped to the development namespace.
+When you create a [Service](/docs/user-guide/services), it creates a corresponding [DNS entry](/docs/admin/dns).
+This entry is of the form `..svc.cluster.local`, which means
+that if a container just uses `` it will resolve to the service which
+is local to a namespace. This is useful for using the same configuration across
+multiple namespaces such as Development, Staging and Production. If you want to reach
+across namespaces, you need to use the fully qualified domain name (FQDN).
-Let's create some content.
+## Design
-```shell
-$ kubectl run snowflake --image=kubernetes/serve_hostname --replicas=2
-```
-
-We have just created a replication controller whose replica size is 2 that is running the pod called snowflake with a basic container that just serves the hostname.
-
-```shell
-$ kubectl get rc
-CONTROLLER CONTAINER(S) IMAGE(S) SELECTOR REPLICAS
-snowflake snowflake kubernetes/serve_hostname run=snowflake 2
-
-$ kubectl get pods
-NAME READY STATUS RESTARTS AGE
-snowflake-8w0qn 1/1 Running 0 22s
-snowflake-jrpzb 1/1 Running 0 22s
-```
-
-And this is great, developers are able to do what they want, and they do not have to worry about affecting content in the production namespace.
-
-Let's switch to the production namespace and show how resources in one namespace are hidden from the other.
-
-```shell
-$ kubectl config use-context prod
-```
-
-The production namespace should be empty.
-
-```shell
-$ kubectl get rc
-CONTROLLER CONTAINER(S) IMAGE(S) SELECTOR REPLICAS
-
-$ kubectl get pods
-NAME READY STATUS RESTARTS AGE
-```
-
-Production likes to run cattle, so let's create some cattle pods.
-
-```shell
-$ kubectl run cattle --image=kubernetes/serve_hostname --replicas=5
-
-$ kubectl get rc
-CONTROLLER CONTAINER(S) IMAGE(S) SELECTOR REPLICAS
-cattle cattle kubernetes/serve_hostname run=cattle 5
-
-$ kubectl get pods
-NAME READY STATUS RESTARTS AGE
-cattle-97rva 1/1 Running 0 12s
-cattle-i9ojn 1/1 Running 0 12s
-cattle-qj3yv 1/1 Running 0 12s
-cattle-yc7vn 1/1 Running 0 12s
-cattle-zz7ea 1/1 Running 0 12s
-```
-
-At this point, it should be clear that the resources users create in one namespace are hidden from the other namespace.
-
-As the policy support in Kubernetes evolves, we will extend this scenario to show how you can provide different
-authorization rules for each namespace.
+Details of the design of namespaces in Kubernetes, including a [detailed example](https://github.com/kubernetes/kubernetes/blob/{{page.githubbranch}}/docs/design/namespaces.md#example-openshift-origin-managing-a-kubernetes-namespace)
+can be found in the [namespaces design doc](https://github.com/kubernetes/kubernetes/blob/{{page.githubbranch}}/docs/design/namespaces.md)
diff --git a/docs/admin/namespaces/walkthrough.md b/docs/admin/namespaces/walkthrough.md
new file mode 100644
index 00000000000..e3b87c7f65e
--- /dev/null
+++ b/docs/admin/namespaces/walkthrough.md
@@ -0,0 +1,200 @@
+---
+---
+
+Kubernetes _namespaces_ help different projects, teams, or customers to share a Kubernetes cluster.
+
+It does this by providing the following:
+
+1. A scope for [Names](/docs/user-guide/identifiers/).
+2. A mechanism to attach authorization and policy to a subsection of the cluster.
+
+Use of multiple namespaces is optional.
+
+This example demonstrates how to use Kubernetes namespaces to subdivide your cluster.
+
+### Step Zero: Prerequisites
+
+This example assumes the following:
+
+1. You have an [existing Kubernetes cluster](/docs/getting-started-guides/).
+2. You have a basic understanding of Kubernetes _[Pods](/docs/user-guide/pods/)_, _[Services](/docs/user-guide/services/)_, and _[Deployments](/docs/user-guide/deployments/)_.
+
+### Step One: Understand the default namespace
+
+By default, a Kubernetes cluster will instantiate a default namespace when provisioning the cluster to hold the default set of Pods,
+Services, and Deployments used by the cluster.
+
+Assuming you have a fresh cluster, you can introspect the available namespace's by doing the following:
+
+```shell
+$ kubectl get namespaces
+NAME STATUS AGE
+default Active 13m
+```
+
+### Step Two: Create new namespaces
+
+For this exercise, we will create two additional Kubernetes namespaces to hold our content.
+
+Let's imagine a scenario where an organization is using a shared Kubernetes cluster for development and production use cases.
+
+The development team would like to maintain a space in the cluster where they can get a view on the list of Pods, Services, and Deployments
+they use to build and run their application. In this space, Kubernetes resources come and go, and the restrictions on who can or cannot modify resources
+are relaxed to enable agile development.
+
+The operations team would like to maintain a space in the cluster where they can enforce strict procedures on who can or cannot manipulate the set of
+Pods, Services, and Deployments that run the production site.
+
+One pattern this organization could follow is to partition the Kubernetes cluster into two namespaces: development and production.
+
+Let's create two new namespaces to hold our work.
+
+Use the file [`namespace-dev.json`](/docs/admin/namespaces/namespace-dev.json) which describes a development namespace:
+
+{% include code.html language="json" file="namespace-dev.json" ghlink="/docs/admin/namespaces/namespace-dev.json" %}
+
+Create the development namespace using kubectl.
+
+```shell
+$ kubectl create -f docs/admin/namespaces/namespace-dev.json
+```
+
+And then lets create the production namespace using kubectl.
+
+```shell
+$ kubectl create -f docs/admin/namespaces/namespace-prod.json
+```
+
+To be sure things are right, let's list all of the namespaces in our cluster.
+
+```shell
+$ kubectl get namespaces --show-labels
+NAME STATUS AGE LABELS
+default Active 32m
+development Active 29s name=development
+production Active 23s name=production
+```
+
+### Step Three: Create pods in each namespace
+
+A Kubernetes namespace provides the scope for Pods, Services, and Deployments in the cluster.
+
+Users interacting with one namespace do not see the content in another namespace.
+
+To demonstrate this, let's spin up a simple Deployment and Pods in the development namespace.
+
+We first check what is the current context:
+
+```shell
+$ kubectl config view
+apiVersion: v1
+clusters:
+- cluster:
+ certificate-authority-data: REDACTED
+ server: https://130.211.122.180
+ name: lithe-cocoa-92103_kubernetes
+contexts:
+- context:
+ cluster: lithe-cocoa-92103_kubernetes
+ user: lithe-cocoa-92103_kubernetes
+ name: lithe-cocoa-92103_kubernetes
+current-context: lithe-cocoa-92103_kubernetes
+kind: Config
+preferences: {}
+users:
+- name: lithe-cocoa-92103_kubernetes
+ user:
+ client-certificate-data: REDACTED
+ client-key-data: REDACTED
+ token: 65rZW78y8HbwXXtSXuUw9DbP4FLjHi4b
+- name: lithe-cocoa-92103_kubernetes-basic-auth
+ user:
+ password: h5M0FtUUIflBSdI7
+ username: admin
+
+$ kubectl config current-context
+lithe-cocoa-92103_kubernetes
+```
+
+The next step is to define a context for the kubectl client to work in each namespace. The value of "cluster" and "user" fields are copied from the current context.
+
+```shell
+$ kubectl config set-context dev --namespace=development --cluster=lithe-cocoa-92103_kubernetes --user=lithe-cocoa-92103_kubernetes
+$ kubectl config set-context prod --namespace=production --cluster=lithe-cocoa-92103_kubernetes --user=lithe-cocoa-92103_kubernetes
+```
+
+The above commands provided two request contexts you can alternate against depending on what namespace you
+wish to work against.
+
+Let's switch to operate in the development namespace.
+
+```shell
+$ kubectl config use-context dev
+```
+
+You can verify your current context by doing the following:
+
+```shell
+$ kubectl config current-context
+dev
+```
+
+At this point, all requests we make to the Kubernetes cluster from the command line are scoped to the development namespace.
+
+Let's create some content.
+
+```shell
+$ kubectl run snowflake --image=kubernetes/serve_hostname --replicas=2
+```
+We have just created a deployment whose replica size is 2 that is running the pod called snowflake with a basic container that just serves the hostname.
+Note that `kubectl run` creates deployments only on kubernetes cluster >= v1.2. If you are running older versions, it creates replication controllers instead.
+If you want to obtain the old behavior, use `--generator=run/v1` to create replication controllers. See [`kubectl run`](/docs/user-guide/kubectl/kubectl_run/) for more details.
+
+```shell
+$ kubectl get deployment
+NAME DESIRED CURRENT UP-TO-DATE AVAILABLE AGE
+snowflake 2 2 2 2 2m
+
+$ kubectl get pods -l run=snowflake
+NAME READY STATUS RESTARTS AGE
+snowflake-3968820950-9dgr8 1/1 Running 0 2m
+snowflake-3968820950-vgc4n 1/1 Running 0 2m
+```
+
+And this is great, developers are able to do what they want, and they do not have to worry about affecting content in the production namespace.
+
+Let's switch to the production namespace and show how resources in one namespace are hidden from the other.
+
+```shell
+$ kubectl config use-context prod
+```
+
+The production namespace should be empty, and the following commands should return nothing.
+
+```shell
+$ kubectl get deployment
+$ kubectl get pods
+```
+
+Production likes to run cattle, so let's create some cattle pods.
+
+```shell
+$ kubectl run cattle --image=kubernetes/serve_hostname --replicas=5
+
+$ kubectl get deployment
+NAME DESIRED CURRENT UP-TO-DATE AVAILABLE AGE
+cattle 5 5 5 5 10s
+
+kubectl get pods -l run=cattle
+NAME READY STATUS RESTARTS AGE
+cattle-2263376956-41xy6 1/1 Running 0 34s
+cattle-2263376956-kw466 1/1 Running 0 34s
+cattle-2263376956-n4v97 1/1 Running 0 34s
+cattle-2263376956-p5p3i 1/1 Running 0 34s
+cattle-2263376956-sxpth 1/1 Running 0 34s
+```
+
+At this point, it should be clear that the resources users create in one namespace are hidden from the other namespace.
+
+As the policy support in Kubernetes evolves, we will extend this scenario to show how you can provide different
+authorization rules for each namespace.
diff --git a/docs/admin/networking.md b/docs/admin/networking.md
index 74b428f111e..3557d7da498 100644
--- a/docs/admin/networking.md
+++ b/docs/admin/networking.md
@@ -154,6 +154,15 @@ Follow the "With Linux Bridge devices" section of [this very nice
tutorial](http://blog.oddbit.com/2014/08/11/four-ways-to-connect-a-docker/) from
Lars Kellogg-Stedman.
+### Weave Net from Weaveworks
+
+[Weave Net](https://www.weave.works/documentation/net-1-5-0-introducing-weave/) is a
+resilient and simple to use network for Kubernetes and its hosted applications.
+Weave Net runs as a [CNI plug-in](https://www.weave.works/documentation/net-1-5-0-cni-plugin/)
+or stand-alone. In either version, it doesn’t require any configuration or extra code
+to run, and in both cases, the network provides one IP address per pod - as is standard for Kubernetes.
+
+
### Flannel
[Flannel](https://github.com/coreos/flannel#flannel) is a very simple overlay
@@ -167,10 +176,6 @@ people have reported success with Flannel and Kubernetes.
complicated way to build an overlay network. This is endorsed by several of the
"Big Shops" for networking.
-### Weave
-
-[Weave](https://github.com/zettio/weave) is yet another way to build an overlay
-network, primarily aiming at Docker integration.
### Calico
@@ -179,7 +184,11 @@ IPs.
### Romana
-[Romana](https://romana.io) is an open source software defined networking (SDN) solution that lets you deploy Kubernetes without an overlay network.
+[Romana](http://romana.io) is an open source software defined networking (SDN) solution that lets you deploy Kubernetes without an overlay network.
+
+### Contiv
+
+[Contiv](https://github.com/contiv/netplugin) provides configurable networking (native l3 using BGP, overlay using vxlan, classic l2, or Cisco-SDN/ACI) for various use cases. [Contiv](http://contiv.io) is all open sourced.
## Other reading
diff --git a/docs/admin/resource-quota.md b/docs/admin/resource-quota.md
deleted file mode 100755
index beafc082793..00000000000
--- a/docs/admin/resource-quota.md
+++ /dev/null
@@ -1,154 +0,0 @@
----
----
-
-When several users or teams share a cluster with a fixed number of nodes,
-there is a concern that one team could use more than its fair share of resources.
-
-Resource quotas are a tool for administrators to address this concern. Resource quotas
-work like this:
-
-- Different teams work in different namespaces. Currently this is voluntary, but
- support for making this mandatory via ACLs is planned.
-- The administrator creates a Resource Quota for each namespace.
-- Users put compute resource requests on their pods. The sum of all resource requests across
- all pods in the same namespace must not exceed any hard resource limit in any Resource Quota
- document for the namespace. Note that we used to verify Resource Quota by taking the sum of
- resource limits of the pods, but this was altered to use resource requests. Backwards compatibility
- for those pods previously created is preserved because pods that only specify a resource limit have
- their resource requests defaulted to match their defined limits. The user is only charged for the
- resources they request in the Resource Quota versus their limits because the request is the minimum
- amount of resource guaranteed by the cluster during scheduling. For more information on over commit,
- see [compute-resources](/docs/user-guide/compute-resources).
-- If creating a pod would cause the namespace to exceed any of the limits specified in the
- the Resource Quota for that namespace, then the request will fail with HTTP status
- code `403 FORBIDDEN`.
-- If quota is enabled in a namespace and the user does not specify *requests* on the pod for each
- of the resources for which quota is enabled, then the POST of the pod will fail with HTTP
- status code `403 FORBIDDEN`. Hint: Use the LimitRange admission controller to force default
- values of *limits* (then resource *requests* would be equal to *limits* by default, see
- [admission controller](/docs/admin/admission-controllers)) before the quota is checked to avoid this problem.
-
-Examples of policies that could be created using namespaces and quotas are:
-
-- In a cluster with a capacity of 32 GiB RAM, and 16 cores, let team A use 20 Gib and 10 cores,
- let B use 10GiB and 4 cores, and hold 2GiB and 2 cores in reserve for future allocation.
-- Limit the "testing" namespace to using 1 core and 1GiB RAM. Let the "production" namespace
- use any amount.
-
-In the case where the total capacity of the cluster is less than the sum of the quotas of the namespaces,
-there may be contention for resources. This is handled on a first-come-first-served basis.
-
-Neither contention nor changes to quota will affect already-running pods.
-
-## Enabling Resource Quota
-
-Resource Quota support is enabled by default for many Kubernetes distributions. It is
-enabled when the apiserver `--admission-control=` flag has `ResourceQuota` as
-one of its arguments.
-
-Resource Quota is enforced in a particular namespace when there is a
-`ResourceQuota` object in that namespace. There should be at most one
-`ResourceQuota` object in a namespace.
-
-## Compute Resource Quota
-
-The total sum of [compute resources](/docs/user-guide/compute-resources) requested by pods
-in a namespace can be limited. The following compute resource types are supported:
-
-| ResourceName | Description |
-| ------------ | ----------- |
-| cpu | Total cpu requests of containers |
-| memory | Total memory requests of containers
-
-For example, `cpu` quota sums up the `resources.requests.cpu` fields of every
-container of every pod in the namespace, and enforces a maximum on that sum.
-
-## Object Count Quota
-
-The number of objects of a given type can be restricted. The following types
-are supported:
-
-| ResourceName | Description |
-| ------------ | ----------- |
-| pods | Total number of pods |
-| services | Total number of services |
-| replicationcontrollers | Total number of replication controllers |
-| resourcequotas | Total number of [resource quotas](/docs/admin/admission-controllers/#resourcequota) |
-| secrets | Total number of secrets |
-| persistentvolumeclaims | Total number of [persistent volume claims](/docs/user-guide/persistent-volumes/#persistentvolumeclaims) |
-
-For example, `pods` quota counts and enforces a maximum on the number of `pods`
-created in a single namespace.
-
-You might want to set a pods quota on a namespace
-to avoid the case where a user creates many small pods and exhausts the cluster's
-supply of Pod IPs.
-
-## Viewing and Setting Quotas
-
-Kubectl supports creating, updating, and viewing quotas:
-
-```shell
-$ kubectl namespace myspace
-$ cat < quota.json
-{
- "apiVersion": "v1",
- "kind": "ResourceQuota",
- "metadata": {
- "name": "quota"
- },
- "spec": {
- "hard": {
- "memory": "1Gi",
- "cpu": "20",
- "pods": "10",
- "services": "5",
- "replicationcontrollers":"20",
- "resourcequotas":"1"
- }
- }
-}
-EOF
-$ kubectl create -f ./quota.json
-$ kubectl get quota
-NAME
-quota
-$ kubectl describe quota quota
-Name: quota
-Resource Used Hard
--------- ---- ----
-cpu 0m 20
-memory 0 1Gi
-pods 5 10
-replicationcontrollers 5 20
-resourcequotas 1 1
-services 3 5
-```
-
-## Quota and Cluster Capacity
-
-Resource Quota objects are independent of the Cluster Capacity. They are
-expressed in absolute units. So, if you add nodes to your cluster, this does *not*
-automatically give each namespace the ability to consume more resources.
-
-Sometimes more complex policies may be desired, such as:
-
- - proportionally divide total cluster resources among several teams.
- - allow each tenant to grow resource usage as needed, but have a generous
- limit to prevent accidental resource exhaustion.
- - detect demand from one namespace, add nodes, and increase quota.
-
-Such policies could be implemented using ResourceQuota as a building-block, by
-writing a 'controller' which watches the quota usage and adjusts the quota
-hard limits of each namespace according to other signals.
-
-Note that resource quota divides up aggregate cluster resources, but it creates no
-restrictions around nodes: pods from several namespaces may run on the same node.
-
-## Example
-
-See a [detailed example for how to use resource quota](/docs/admin/resourcequota/).
-
-## Read More
-
-See [ResourceQuota design doc](https://github.com/kubernetes/kubernetes/blob/{{page.githubbranch}}/docs/design/admission_control_resource_quota.md) for more information.
\ No newline at end of file
diff --git a/docs/admin/resourcequota/index.md b/docs/admin/resourcequota/index.md
index dab2e6ca644..40559f058e6 100644
--- a/docs/admin/resourcequota/index.md
+++ b/docs/admin/resourcequota/index.md
@@ -1,156 +1,154 @@
---
---
-This example demonstrates how [resource quota](/docs/admin/admission-controllers/#resourcequota) and
-[limitsranger](/docs/admin/admission-controllers/#limitranger) can be applied to a Kubernetes namespace.
+When several users or teams share a cluster with a fixed number of nodes,
+there is a concern that one team could use more than its fair share of resources.
+
+Resource quotas are a tool for administrators to address this concern. Resource quotas
+work like this:
+
+- Different teams work in different namespaces. Currently this is voluntary, but
+ support for making this mandatory via ACLs is planned.
+- The administrator creates a Resource Quota for each namespace.
+- Users put compute resource requests on their pods. The sum of all resource requests across
+ all pods in the same namespace must not exceed any hard resource limit in any Resource Quota
+ document for the namespace. Note that we used to verify Resource Quota by taking the sum of
+ resource limits of the pods, but this was altered to use resource requests. Backwards compatibility
+ for those pods previously created is preserved because pods that only specify a resource limit have
+ their resource requests defaulted to match their defined limits. The user is only charged for the
+ resources they request in the Resource Quota versus their limits because the request is the minimum
+ amount of resource guaranteed by the cluster during scheduling. For more information on over commit,
+ see [compute-resources](/docs/user-guide/compute-resources).
+- If creating a pod would cause the namespace to exceed any of the limits specified in the
+ the Resource Quota for that namespace, then the request will fail with HTTP status
+ code `403 FORBIDDEN`.
+- If quota is enabled in a namespace and the user does not specify *requests* on the pod for each
+ of the resources for which quota is enabled, then the POST of the pod will fail with HTTP
+ status code `403 FORBIDDEN`. Hint: Use the LimitRange admission controller to force default
+ values of *limits* (then resource *requests* would be equal to *limits* by default, see
+ [admission controller](/docs/admin/admission-controllers)) before the quota is checked to avoid this problem.
+
+Examples of policies that could be created using namespaces and quotas are:
+
+- In a cluster with a capacity of 32 GiB RAM, and 16 cores, let team A use 20 Gib and 10 cores,
+ let B use 10GiB and 4 cores, and hold 2GiB and 2 cores in reserve for future allocation.
+- Limit the "testing" namespace to using 1 core and 1GiB RAM. Let the "production" namespace
+ use any amount.
+
+In the case where the total capacity of the cluster is less than the sum of the quotas of the namespaces,
+there may be contention for resources. This is handled on a first-come-first-served basis.
+
+Neither contention nor changes to quota will affect already-running pods.
+
+## Enabling Resource Quota
+
+Resource Quota support is enabled by default for many Kubernetes distributions. It is
+enabled when the apiserver `--admission-control=` flag has `ResourceQuota` as
+one of its arguments.
+
+Resource Quota is enforced in a particular namespace when there is a
+`ResourceQuota` object in that namespace. There should be at most one
+`ResourceQuota` object in a namespace.
+
+## Compute Resource Quota
+
+The total sum of [compute resources](/docs/user-guide/compute-resources) requested by pods
+in a namespace can be limited. The following compute resource types are supported:
+
+| ResourceName | Description |
+| ------------ | ----------- |
+| cpu | Total cpu requests of containers |
+| memory | Total memory requests of containers
+
+For example, `cpu` quota sums up the `resources.requests.cpu` fields of every
+container of every pod in the namespace, and enforces a maximum on that sum.
+
+## Object Count Quota
+
+The number of objects of a given type can be restricted. The following types
+are supported:
+
+| ResourceName | Description |
+| ------------ | ----------- |
+| pods | Total number of pods |
+| services | Total number of services |
+| replicationcontrollers | Total number of replication controllers |
+| resourcequotas | Total number of [resource quotas](/docs/admin/admission-controllers/#resourcequota) |
+| secrets | Total number of secrets |
+| persistentvolumeclaims | Total number of [persistent volume claims](/docs/user-guide/persistent-volumes/#persistentvolumeclaims) |
+
+For example, `pods` quota counts and enforces a maximum on the number of `pods`
+created in a single namespace.
+
+You might want to set a pods quota on a namespace
+to avoid the case where a user creates many small pods and exhausts the cluster's
+supply of Pod IPs.
+
+## Viewing and Setting Quotas
+
+Kubectl supports creating, updating, and viewing quotas:
+
+```shell
+$ kubectl namespace myspace
+$ cat < quota.json
+{
+ "apiVersion": "v1",
+ "kind": "ResourceQuota",
+ "metadata": {
+ "name": "quota"
+ },
+ "spec": {
+ "hard": {
+ "memory": "1Gi",
+ "cpu": "20",
+ "pods": "10",
+ "services": "5",
+ "replicationcontrollers":"20",
+ "resourcequotas":"1"
+ }
+ }
+}
+EOF
+$ kubectl create -f ./quota.json
+$ kubectl get quota
+NAME
+quota
+$ kubectl describe quota quota
+Name: quota
+Resource Used Hard
+-------- ---- ----
+cpu 0m 20
+memory 0 1Gi
+pods 5 10
+replicationcontrollers 5 20
+resourcequotas 1 1
+services 3 5
+```
+
+## Quota and Cluster Capacity
+
+Resource Quota objects are independent of the Cluster Capacity. They are
+expressed in absolute units. So, if you add nodes to your cluster, this does *not*
+automatically give each namespace the ability to consume more resources.
+
+Sometimes more complex policies may be desired, such as:
+
+ - proportionally divide total cluster resources among several teams.
+ - allow each tenant to grow resource usage as needed, but have a generous
+ limit to prevent accidental resource exhaustion.
+ - detect demand from one namespace, add nodes, and increase quota.
+
+Such policies could be implemented using ResourceQuota as a building-block, by
+writing a 'controller' which watches the quota usage and adjusts the quota
+hard limits of each namespace according to other signals.
+
+Note that resource quota divides up aggregate cluster resources, but it creates no
+restrictions around nodes: pods from several namespaces may run on the same node.
+
+## Example
+
+See a [detailed example for how to use resource quota](/docs/admin/resourcequota/walkthrough/).
+
+## Read More
+
See [ResourceQuota design doc](https://github.com/kubernetes/kubernetes/blob/{{page.githubbranch}}/docs/design/admission_control_resource_quota.md) for more information.
-
-This example assumes you have a functional Kubernetes setup.
-
-## Step 1: Create a namespace
-
-This example will work in a custom namespace to demonstrate the concepts involved.
-
-Let's create a new namespace called quota-example:
-
-```shell
-$ kubectl create -f docs/admin/resourcequota/namespace.yaml
-namespace "quota-example" created
-$ kubectl get namespaces
-NAME LABELS STATUS AGE
-default Active 2m
-quota-example Active 39s
-```
-
-## Step 2: Apply a quota to the namespace
-
-By default, a pod will run with unbounded CPU and memory requests/limits. This means that any pod in the
-system will be able to consume as much CPU and memory on the node that executes the pod.
-
-Users may want to restrict how much of the cluster resources a given namespace may consume
-across all of its pods in order to manage cluster usage. To do this, a user applies a quota to
-a namespace. A quota lets the user set hard limits on the total amount of node resources (cpu, memory)
-and API resources (pods, services, etc.) that a namespace may consume. In term of resources, Kubernetes
-checks the total resource *requests*, not resource *limits* of all containers/pods in the namespace.
-
-Let's create a simple quota in our namespace:
-
-```shell
-$ kubectl create -f docs/admin/resourcequota/quota.yaml --namespace=quota-example
-resourcequota "quota" created
-```
-
-Once your quota is applied to a namespace, the system will restrict any creation of content
-in the namespace until the quota usage has been calculated. This should happen quickly.
-
-You can describe your current quota usage to see what resources are being consumed in your
-namespace.
-
-```shell
-$ kubectl describe quota quota --namespace=quota-example
-Name: quota
-Namespace: quota-example
-Resource Used Hard
--------- ---- ----
-cpu 0 20
-memory 0 1Gi
-persistentvolumeclaims 0 10
-pods 0 10
-replicationcontrollers 0 20
-resourcequotas 1 1
-secrets 1 10
-services 0 5
-```
-
-## Step 3: Applying default resource requests and limits
-
-Pod authors rarely specify resource requests and limits for their pods.
-
-Since we applied a quota to our project, let's see what happens when an end-user creates a pod that has unbounded
-cpu and memory by creating an nginx container.
-
-To demonstrate, lets create a replication controller that runs nginx:
-
-```shell
-$ kubectl run nginx --image=nginx --replicas=1 --namespace=quota-example
-replicationcontroller "nginx" created
-```
-
-Now let's look at the pods that were created.
-
-```shell
-$ kubectl get pods --namespace=quota-example
-NAME READY STATUS RESTARTS AGE
-```
-
-What happened? I have no pods! Let's describe the replication controller to get a view of what is happening.
-
-```shell
-kubectl describe rc nginx --namespace=quota-example
-Name: nginx
-Namespace: quota-example
-Image(s): nginx
-Selector: run=nginx
-Labels: run=nginx
-Replicas: 0 current / 1 desired
-Pods Status: 0 Running / 0 Waiting / 0 Succeeded / 0 Failed
-No volumes.
-Events:
- FirstSeen LastSeen Count From SubobjectPath Reason Message
- 42s 11s 3 {replication-controller } FailedCreate Error creating: Pod "nginx-" is forbidden: Must make a non-zero request for memory since it is tracked by quota.
-```
-
-The Kubernetes API server is rejecting the replication controllers requests to create a pod because our pods
-do not specify any memory usage *request*.
-
-So let's set some default values for the amount of cpu and memory a pod can consume:
-
-```shell
-$ kubectl create -f docs/admin/resourcequota/limits.yaml --namespace=quota-example
-limitrange "limits" created
-$ kubectl describe limits limits --namespace=quota-example
-Name: limits
-Namespace: quota-example
-Type Resource Min Max Request Limit Limit/Request
----- -------- --- --- ------- ----- -------------
-Container memory - - 256Mi 512Mi -
-Container cpu - - 100m 200m -
-```
-
-Now any time a pod is created in this namespace, if it has not specified any resource request/limit, the default
-amount of cpu and memory per container will be applied, and the request will be used as part of admission control.
-
-Now that we have applied default resource *request* for our namespace, our replication controller should be able to
-create its pods.
-
-```shell
-$ kubectl get pods --namespace=quota-example
-NAME READY STATUS RESTARTS AGE
-nginx-fca65 1/1 Running 0 1m
-```
-
-And if we print out our quota usage in the namespace:
-
-```shell
-$ kubectl describe quota quota --namespace=quota-example
-Name: quota
-Namespace: quota-example
-Resource Used Hard
--------- ---- ----
-cpu 100m 20
-memory 256Mi 1Gi
-persistentvolumeclaims 0 10
-pods 1 10
-replicationcontrollers 1 20
-resourcequotas 1 1
-secrets 1 10
-services 0 5
-```
-
-You can now see the pod that was created is consuming explicit amounts of resources (specified by resource *request*), and the usage is being tracked by the Kubernetes system properly.
-
-## Summary
-
-Actions that consume node resources for cpu and memory can be subject to hard quota limits defined by the namespace quota. The resource consumption is measured by resource *request* in pod specification.
-
-Any action that consumes those resources can be tweaked, or can pick up namespace level defaults to meet your end goal.
\ No newline at end of file
diff --git a/docs/admin/resourcequota/walkthrough.md b/docs/admin/resourcequota/walkthrough.md
new file mode 100644
index 00000000000..eeacbddabff
--- /dev/null
+++ b/docs/admin/resourcequota/walkthrough.md
@@ -0,0 +1,164 @@
+---
+---
+
+This example demonstrates how [resource quota](/docs/admin/admission-controllers/#resourcequota) and
+[limitsranger](/docs/admin/admission-controllers/#limitranger) can be applied to a Kubernetes namespace.
+See [ResourceQuota design doc](https://github.com/kubernetes/kubernetes/blob/{{page.githubbranch}}/docs/design/admission_control_resource_quota.md) for more information.
+
+This example assumes you have a functional Kubernetes setup.
+
+## Step 1: Create a namespace
+
+This example will work in a custom namespace to demonstrate the concepts involved.
+
+Let's create a new namespace called quota-example:
+
+```shell
+$ kubectl create namespace quota-example
+namespace "quota-example" created
+```
+
+Note that `kubectl` commands will print the type and name of the resource created or mutated, which can then be used in subsequent commands:
+
+```shell
+$ kubectl get namespaces
+NAME STATUS AGE
+default Active 50m
+quota-example Active 2s
+```
+
+## Step 2: Apply a quota to the namespace
+
+By default, a pod will run with unbounded CPU and memory requests/limits. This means that any pod in the
+system will be able to consume as much CPU and memory on the node that executes the pod.
+
+Users may want to restrict how much of the cluster resources a given namespace may consume
+across all of its pods in order to manage cluster usage. To do this, a user applies a quota to
+a namespace. A quota lets the user set hard limits on the total amount of node resources (cpu, memory)
+and API resources (pods, services, etc.) that a namespace may consume. In term of resources, Kubernetes
+checks the total resource *requests*, not resource *limits* of all containers/pods in the namespace.
+
+Let's create a simple quota in our namespace:
+
+```shell
+$ kubectl create -f docs/admin/resourcequota/quota.yaml --namespace=quota-example
+resourcequota "quota" created
+```
+
+Once your quota is applied to a namespace, the system will restrict any creation of content
+in the namespace until the quota usage has been calculated. This should happen quickly.
+
+You can describe your current quota usage to see what resources are being consumed in your
+namespace.
+
+```shell
+$ kubectl describe quota quota --namespace=quota-example
+Name: quota
+Namespace: quota-example
+Resource Used Hard
+-------- ---- ----
+cpu 0 20
+memory 0 1Gi
+persistentvolumeclaims 0 10
+pods 0 10
+replicationcontrollers 0 20
+resourcequotas 1 1
+secrets 1 10
+services 0 5
+```
+
+## Step 3: Applying default resource requests and limits
+
+Pod authors rarely specify resource requests and limits for their pods.
+
+Since we applied a quota to our project, let's see what happens when an end-user creates a pod that has unbounded
+cpu and memory by creating an nginx container.
+
+To demonstrate, lets create a Deployment that runs nginx:
+
+```shell
+$ kubectl run nginx --image=nginx --replicas=1 --namespace=quota-example
+deployment "nginx" created
+```
+
+This creates a Deployment "nginx" with its underlying resource, a ReplicaSet, which handles the creation and deletion of Pod replicas. Now let's look at the pods that were created.
+
+```shell
+$ kubectl get pods --namespace=quota-example
+NAME READY STATUS RESTARTS AGE
+```
+
+What happened? I have no pods! Let's describe the ReplicaSet managed by the nginx Deployment to get a view of what is happening.
+Note that `kubectl describe rs` works only on kubernetes cluster >= v1.2. If you are running older versions, use `kubectl describe rc` instead.
+If you want to obtain the old behavior, use `--generator=run/v1` to create replication controllers. See [`kubectl run`](/docs/user-guide/kubectl/kubectl_run/) for more details.
+
+```shell
+$ kubectl describe rs -l run=nginx --namespace=quota-example
+Name: nginx-2040093540
+Namespace: quota-example
+Image(s): nginx
+Selector: pod-template-hash=2040093540,run=nginx
+Labels: pod-template-hash=2040093540,run=nginx
+Replicas: 0 current / 1 desired
+Pods Status: 0 Running / 0 Waiting / 0 Succeeded / 0 Failed
+No volumes.
+Events:
+ FirstSeen LastSeen Count From SubobjectPath Type Reason Message
+ --------- -------- ----- ---- ------------- -------- ------ -------
+ 48s 26s 4 {replicaset-controller } Warning FailedCreate Error creating: pods "nginx-2040093540-" is forbidden: Failed quota: quota: must specify cpu,memory
+```
+
+The Kubernetes API server is rejecting the ReplicaSet requests to create a pod because our pods
+do not specify any memory usage *request*.
+
+So let's set some default values for the amount of cpu and memory a pod can consume:
+
+```shell
+$ kubectl create -f docs/admin/resourcequota/limits.yaml --namespace=quota-example
+limitrange "limits" created
+$ kubectl describe limits limits --namespace=quota-example
+Name: limits
+Namespace: quota-example
+Type Resource Min Max Default Request Default Limit Max Limit/Request Ratio
+---- -------- --- --- --------------- ------------- -----------------------
+Container cpu - - 100m 200m -
+Container memory - - 256Mi 512Mi -
+```
+
+Now any time a pod is created in this namespace, if it has not specified any resource request/limit, the default
+amount of cpu and memory per container will be applied, and the request will be used as part of admission control.
+
+Now that we have applied default resource *request* for our namespace, our Deployment should be able to
+create its pods.
+
+```shell
+$ kubectl get pods --namespace=quota-example
+NAME READY STATUS RESTARTS AGE
+nginx-2040093540-miohp 1/1 Running 0 5s
+```
+
+And if we print out our quota usage in the namespace:
+
+```shell
+$ kubectl describe quota quota --namespace=quota-example
+Name: quota
+Namespace: quota-example
+Resource Used Hard
+-------- ---- ----
+cpu 100m 20
+memory 256Mi 1Gi
+persistentvolumeclaims 0 10
+pods 1 10
+replicationcontrollers 1 20
+resourcequotas 1 1
+secrets 1 10
+services 0 5
+```
+
+You can now see the pod that was created is consuming explicit amounts of resources (specified by resource *request*), and the usage is being tracked by the Kubernetes system properly.
+
+## Summary
+
+Actions that consume node resources for cpu and memory can be subject to hard quota limits defined by the namespace quota. The resource consumption is measured by resource *request* in pod specification.
+
+Any action that consumes those resources can be tweaked, or can pick up namespace level defaults to meet your end goal.
diff --git a/docs/api-reference/autoscaling/v1/operations.html b/docs/api-reference/autoscaling/v1/operations.html
index a5b2eafe9aa..a0db63a9471 100755
--- a/docs/api-reference/autoscaling/v1/operations.html
+++ b/docs/api-reference/autoscaling/v1/operations.html
@@ -184,7 +184,7 @@
200 |
success |
-v1.HorizontalPodAutoscalerList |
+v1.HorizontalPodAutoscalerList |
@@ -332,7 +332,7 @@
200 |
success |
-v1.HorizontalPodAutoscalerList |
+v1.HorizontalPodAutoscalerList |
@@ -480,7 +480,7 @@
200 |
success |
-unversioned.Status |
+unversioned.Status |
@@ -562,7 +562,7 @@
body |
|
true |
-v1.HorizontalPodAutoscaler |
+v1.HorizontalPodAutoscaler |
|
@@ -596,7 +596,7 @@
200 |
success |
-v1.HorizontalPodAutoscaler |
+v1.HorizontalPodAutoscaler |
@@ -728,7 +728,7 @@
200 |
success |
-v1.HorizontalPodAutoscaler |
+v1.HorizontalPodAutoscaler |
@@ -810,7 +810,7 @@
body |
|
true |
-v1.HorizontalPodAutoscaler |
+v1.HorizontalPodAutoscaler |
|
@@ -852,7 +852,7 @@
200 |
success |
-v1.HorizontalPodAutoscaler |
+v1.HorizontalPodAutoscaler |
@@ -934,7 +934,7 @@
body |
|
true |
-v1.DeleteOptions |
+v1.DeleteOptions |
|
@@ -976,7 +976,7 @@
200 |
success |
-unversioned.Status |
+unversioned.Status |
@@ -1058,7 +1058,7 @@
body |
|
true |
-unversioned.Patch |
+unversioned.Patch |
|
@@ -1100,7 +1100,7 @@
200 |
success |
-v1.HorizontalPodAutoscaler |
+v1.HorizontalPodAutoscaler |
@@ -1188,7 +1188,7 @@
body |
|
true |
-v1.HorizontalPodAutoscaler |
+v1.HorizontalPodAutoscaler |
|
@@ -1230,7 +1230,7 @@
200 |
success |
-v1.HorizontalPodAutoscaler |
+v1.HorizontalPodAutoscaler |
@@ -1370,7 +1370,7 @@
200 |
success |
-json.WatchEvent |
+json.WatchEvent |
@@ -1515,7 +1515,7 @@
200 |
success |
-json.WatchEvent |
+json.WatchEvent |
@@ -1668,7 +1668,7 @@
200 |
success |
-json.WatchEvent |
+json.WatchEvent |
diff --git a/docs/api-reference/batch/v1/operations.html b/docs/api-reference/batch/v1/operations.html
index d24af0b0320..ae7d44159b9 100755
--- a/docs/api-reference/batch/v1/operations.html
+++ b/docs/api-reference/batch/v1/operations.html
@@ -184,7 +184,7 @@
200 |
success |
-v1.JobList |
+v1.JobList |
@@ -332,7 +332,7 @@
200 |
success |
-v1.JobList |
+v1.JobList |
@@ -480,7 +480,7 @@
200 |
success |
-unversioned.Status |
+unversioned.Status |
@@ -562,7 +562,7 @@
body |
|
true |
-v1.Job |
+v1.Job |
|
@@ -596,7 +596,7 @@
200 |
success |
-v1.Job |
+v1.Job |
@@ -728,7 +728,7 @@
200 |
success |
-v1.Job |
+v1.Job |
@@ -810,7 +810,7 @@
body |
|
true |
-v1.Job |
+v1.Job |
|
@@ -852,7 +852,7 @@
200 |
success |
-v1.Job |
+v1.Job |
@@ -934,7 +934,7 @@
body |
|
true |
-v1.DeleteOptions |
+v1.DeleteOptions |
|
@@ -976,7 +976,7 @@
200 |
success |
-unversioned.Status |
+unversioned.Status |
@@ -1058,7 +1058,7 @@
body |
|
true |
-unversioned.Patch |
+unversioned.Patch |
|
@@ -1100,7 +1100,7 @@
200 |
success |
-v1.Job |
+v1.Job |
@@ -1188,7 +1188,7 @@
body |
|
true |
-v1.Job |
+v1.Job |
|
@@ -1230,7 +1230,7 @@
200 |
success |
-v1.Job |
+v1.Job |
@@ -1370,7 +1370,7 @@
200 |
success |
-json.WatchEvent |
+json.WatchEvent |
@@ -1515,7 +1515,7 @@
200 |
success |
-json.WatchEvent |
+json.WatchEvent |
@@ -1668,7 +1668,7 @@
200 |
success |
-json.WatchEvent |
+json.WatchEvent |
@@ -1714,4 +1714,4 @@ Last updated 2016-03-14 21:55:47 UTC
-