diff --git a/_config.yml b/_config.yml index cb06611999..fff0133fe9 100644 --- a/_config.yml +++ b/_config.yml @@ -32,11 +32,14 @@ defaults: permalink: pretty gems: - - jekyll-redirect-from - jekyll-feed - jekyll-sitemap - jekyll-seo-tag - jekyll-include-cache +# disabled gems +# - jekyll-redirect-from + +include: [_redirects] # SEO logo: /images/favicon.png diff --git a/_redirects b/_redirects new file mode 100644 index 0000000000..70e111ece7 --- /dev/null +++ b/_redirects @@ -0,0 +1,242 @@ +# +# set server-side redirects in this file +# see https://www.netlify.com/docs/redirects/ +# + +/docs/admin/addons /docs/concepts/cluster-administration/addons 301 +/docs/admin/apparmor/ /docs/tutorials/clusters/apparmor 301 +/docs/admin/audit /docs/tasks/debug-application-cluster/audit 301 +/docs/admin/cluster-components /docs/concepts/overview/components 301 +/docs/admin/cluster-management /docs/tasks/administer-cluster/cluster-management 301 +/docs/admin/cluster-troubleshooting /docs/tasks/debug-application-cluster/debug-cluster 301 +/docs/admin/daemons /docs/concepts/workloads/controllers/daemonset 301 +/docs/admin/disruptions /docs/concepts/workloads/pods/disruptions 301 +/docs/admin/dns /docs/concepts/services-networking/dns-pod-service 301 +/docs/admin/etcd /docs/tasks/administer-cluster/configure-upgrade-etcd 301 +/docs/admin/etcd_upgrade /docs/tasks/administer-cluster/configure-upgrade-etcd 301 +/docs/admin/federation/kubefed /docs/tasks/federation/set-up-cluster-federation-kubefed 301 +/docs/admin/garbage-collection /docs/concepts/cluster-administration/kubelet-garbage-collection 301 +/docs/admin/ha-master-gce /docs/tasks/administer-cluster/highly-available-master 301 +/docs/admin/ /docs/concepts/cluster-administration/cluster-administration-overview 301 +/docs/admin/kubeadm-upgrade-1-7 /docs/tasks/administer-cluster/kubeadm-upgrade-1-7 301 +/docs/admin/limitrange/ /docs/tasks/administer-cluster/cpu-memory-limit 301 +/docs/admin/master-node-communication /docs/concepts/architecture/master-node-communication 301 +/docs/admin/multi-cluster /docs/concepts/cluster-administration/federation 301 +/docs/admin/multiple-schedulers /docs/tasks/administer-cluster/configure-multiple-schedulers 301 +/docs/admin/namespaces/ /docs/tasks/administer-cluster/namespaces 301 +/docs/admin/namespaces/walkthrough /docs/tasks/administer-cluster/namespaces-walkthrough 301 +/docs/admin/network-plugins /docs/concepts/cluster-administration/network-plugins 301 +/docs/admin/networking /docs/concepts/cluster-administration/networking 301 +/docs/admin/node /docs/concepts/architecture/nodes 301 +/docs/admin/node-allocatable /docs/tasks/administer-cluster/reserve-compute-resources 301 +/docs/admin/node-problem /docs/tasks/debug-application-cluster/monitor-node-health 301 +/docs/admin/out-of-resource /docs/tasks/administer-cluster/out-of-resource 301 +/docs/admin/rescheduler /docs/tasks/administer-cluster/guaranteed-scheduling-critical-addon-pods 301 +/docs/admin/resourcequota/ /docs/concepts/policy/resource-quotas 301 +/docs/admin/resourcequota/limitstorageconsumption /docs/tasks/administer-cluster/limit-storage-consumption 301 +/docs/admin/resourcequota/walkthrough /docs/tasks/administer-cluster/apply-resource-quota-limit 301 +/docs/admin/static-pods /docs/tasks/administer-cluster/static-pod 301 +/docs/admin/sysctls /docs/concepts/cluster-administration/sysctl-cluster 301 +/docs/admin/upgrade-1-6 /docs/tasks/administer-cluster/upgrade-1-6 301 + +/docs/api /docs/concepts/overview/kubernetes-api 301 + +/docs/concepts/abstractions/controllers/garbage-collection /docs/concepts/workloads/controllers/garbage-collection 301 +/docs/concepts/abstractions/controllers/petsets /docs/concepts/workloads/controllers/petset 301 +/docs/concepts/abstractions/controllers/statefulsets /docs/concepts/workloads/controllers/statefulset 301 +/docs/concepts/abstractions/init-containers /docs/concepts/workloads/pods/init-containers 301 +/docs/concepts/abstractions/overview /docs/concepts/overview/working-with-objects/kubernetes-objects 301 +/docs/concepts/abstractions/pod /docs/concepts/workloads/pods/pod-overview 301 + +/docs/concepts/cluster-administration/access-cluster /docs/tasks/access-application-cluster/access-cluster 301 +/docs/concepts/cluster-administration/audit /docs/tasks/debug-application-cluster/audit 301 +/docs/concepts/cluster-administration/authenticate-across-clusters-kubeconfig /docs/tasks/access-application-cluster/authenticate-across-clusters-kubeconfig 301 +/docs/concepts/cluster-administration/cluster-management /docs/tasks/administer-cluster/cluster-management 301 +/docs/concepts/cluster-administration/configure-etcd /docs/tasks/administer-cluster/configure-upgrade-etcd 301 +/docs/concepts/cluster-administration/etcd-upgrade /docs/tasks/administer-cluster/configure-upgrade-etcd 301 +/docs/concepts/cluster-administration/federation-service-discovery /docs/tasks/federation/federation-service-discovery 301 +/docs/concepts/cluster-administration/guaranteed-scheduling-critical-addon-pods /docs/tasks/administer-cluster/guaranteed-scheduling-critical-addon-pods 301 +/docs/concepts/cluster-administration/master-node-communication /docs/concepts/architecture/master-node-communication 301 +/docs/concepts/cluster-administration/multiple-clusters /docs/concepts/cluster-administration/federation 301 +/docs/concepts/cluster-administration/out-of-resource /docs/tasks/administer-cluster/out-of-resource 301 +/docs/concepts/cluster-administration/resource-usage-monitoring /docs/tasks/debug-application-cluster/resource-usage-monitoring 301 +/docs/concepts/cluster-administration/static-pod /docs/tasks/administer-cluster/static-pod 301 + +/docs/concepts/clusters/logging /docs/concepts/cluster-administration/logging 301 +/docs/concepts/configuration/container-command-arg /docs/tasks/inject-data-application/define-command-argument-container/docs/concepts/ecosystem/thirdpartyresource 301 /docs/tasks/access-kubernetes-api/extend-api-third-party-resource +/docs/concepts/jobs/cron-jobs /docs/concepts/workloads/controllers/cron-jobs 301 +/docs/concepts/jobs/run-to-completion-finite-workloads /docs/concepts/workloads/controllers/jobs-run-to-completion 301 +/docs/concepts/nodes/node /docs/concepts/architecture/nodes 301 +/docs/concepts/storage/etcd-store-api-object /docs/tasks/administer-cluster/configure-upgrade-etcd 301 + +/docs/concepts/tools/kubectl/object-management-overview /docs/tutorials/object-management-kubectl/object-management 301 +/docs/concepts/tools/kubectl/object-management-using-declarative-config /docs/tutorials/object-management-kubectl/declarative-object-management-configuration 301 +/docs/concepts/tools/kubectl/object-management-using-imperative-commands /docs/tutorials/object-management-kubectl/imperative-object-management-command 301 +/docs/concepts/tools/kubectl/object-management-using-imperative-config /docs/tutorials/object-management-kubectl/imperative-object-management-configuration 301 + +/docs/getting-started-guides/ /docs/setup/pick-right-solution 301 +/docs/getting-started-guides/kubeadm /docs/setup/independent/create-cluster-kubeadm 301 +/docs/getting-started-guides/network-policy/calico /docs/tasks/administer-cluster/calico-network-policy 301 +/docs/getting-started-guides/network-policy/romana /docs/tasks/administer-cluster/romana-network-policy 301 +/docs/getting-started-guides/network-policy/walkthrough /docs/tasks/administer-cluster/declare-network-policy 301 +/docs/getting-started-guides/network-policy/weave /docs/tasks/administer-cluster/weave-network-policy 301 +/docs/getting-started-guides/running-cloud-controller /docs/tasks/administer-cluster/running-cloud-controller 301 +/docs/getting-started-guides/ubuntu/calico /docs/getting-started-guides/ubuntu/ 301 + +/docs/hellonode /docs/tutorials/stateless-application/hello-minikube 301 +/docs/ /docs/home/ 301 +/docs/samples /docs/tutorials/ 301 +/docs/tasks/administer-cluster/assign-pods-nodes /docs/tasks/configure-pod-container/assign-pods-nodes 301 +/docs/tasks/administer-cluster/overview /docs/concepts/cluster-administration/cluster-administration-overview 301 + +/docs/tasks/configure-pod-container/apply-resource-quota-limit /docs/tasks/administer-cluster/apply-resource-quota-limit 301 +/docs/tasks/configure-pod-container/calico-network-policy /docs/tasks/administer-cluster/calico-network-policy 301 +/docs/tasks/configure-pod-container/communicate-containers-same-pod /docs/tasks/access-application-cluster/communicate-containers-same-pod-shared-volume 301 +/docs/tasks/configure-pod-container/declare-network-policy /docs/tasks/administer-cluster/declare-network-policy 301 +/docs/tasks/configure-pod-container/define-environment-variable-container /docs/tasks/inject-data-application/define-environment-variable-container 301 +/docs/tasks/configure-pod-container/distribute-credentials-secure /docs/tasks/inject-data-application/distribute-credentials-secure 301 +/docs/tasks/configure-pod-container/downward-api-volume-expose-pod-information /docs/tasks/inject-data-application/downward-api-volume-expose-pod-information 301 +/docs/tasks/configure-pod-container/environment-variable-expose-pod-information /docs/tasks/inject-data-application/environment-variable-expose-pod-information 301 +/docs/tasks/configure-pod-container/limit-range /docs/tasks/administer-cluster/cpu-memory-limit 301 +/docs/tasks/configure-pod-container/romana-network-policy /docs/tasks/administer-cluster/romana-network-policy 301 +/docs/tasks/configure-pod-container/weave-network-policy /docs/tasks/administer-cluster/weave-network-policy 301 + +/docs/tasks/kubectl/get-shell-running-container /docs/tasks/debug-application-cluster/get-shell-running-container 301 +/docs/tasks/kubectl/install /docs/tasks/tools/install-kubectl 301 +/docs/tasks/kubectl/list-all-running-container-images /docs/tasks/access-application-cluster/list-all-running-container-images 301 + +/docs/tasks/manage-stateful-set/debugging-a-statefulset /docs/tasks/debug-application-cluster/debug-stateful-set 301 +/docs/tasks/manage-stateful-set/deleting-a-statefulset /docs/tasks/run-application/delete-stateful-set 301 +/docs/tasks/manage-stateful-set/scale-stateful-set /docs/tasks/run-application/scale-stateful-set 301 +/docs/tasks/manage-stateful-set/upgrade-pet-set-to-stateful-set /docs/tasks/run-application/upgrade-pet-set-to-stateful-set 301 + +/docs/tasks/run-application/podpreset /docs/tasks/inject-data-application/podpreset 301 +/docs/tasks/troubleshoot/debug-init-containers /docs/tasks/debug-application-cluster/debug-init-containers 301 +/docs/tasks/web-ui-dashboard /docs/tasks/access-application-cluster/web-ui-dashboard 301 +/docs/templatedemos /docs/home/contribute/page-templates 301 +/docs/tools/kompose/ /docs/tools/kompose/user-guide 301 + +/docs/tutorials/clusters/multiple-schedulers /docs/tasks/administer-cluster/configure-multiple-schedulers 301 +/docs/tutorials/connecting-apps/connecting-frontend-backend /docs/tasks/access-application-cluster/connecting-frontend-backend 301 +/docs/tutorials/federation/set-up-cluster-federation-kubefed /docs/tasks/federation/set-up-cluster-federation-kubefed 301 +/docs/tutorials/federation/set-up-coredns-provider-federation /docs/tasks/federation/set-up-coredns-provider-federation 301 +/docs/tutorials/federation/set-up-placement-policies-federation /docs/tasks/federation/set-up-placement-policies-federation 301 +/docs/tutorials/getting-started/create-cluster /docs/tutorials/kubernetes-basics/cluster-intro 301 +/docs/tutorials/stateful-application/run-replicated-stateful-application /docs/tasks/run-application/run-replicated-stateful-application 301 +/docs/tutorials/stateful-application/run-stateful-application /docs/tasks/run-application/run-single-instance-stateful-application 301 +/docs/tutorials/stateless-application/expose-external-ip-address-service /docs/tasks/access-application-cluster/service-access-application-cluster 301 +/docs/tutorials/stateless-application/run-stateless-ap-replication-controller /docs/tasks/run-application/run-stateless-application-deployment 301 +/docs/tutorials/stateless-application/run-stateless-application-deployment /docs/tasks/run-application/run-stateless-application-deployment 301 + +/docs/user-guide/accessing-the-cluster /docs/tasks/access-application-cluster/access-cluster 301 +/docs/user-guide/add-entries-to-pod-etc-hosts-with-host-aliases/ /docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases 301 +/docs/user-guide/annotations /docs/concepts/overview/working-with-objects/annotations 301 +/docs/user-guide/application-troubleshooting /docs/tasks/debug-application-cluster/debug-application 301 +/docs/user-guide/compute-resources /docs/concepts/configuration/manage-compute-resources-container 301 +/docs/user-guide/config-best-practices /docs/concepts/configuration/overview 301 +/docs/user-guide/configmap/ /docs/tasks/configure-pod-container/configmap 301 +/docs/user-guide/configuring-containers /docs/tasks/ 301 +/docs/user-guide/connecting-applications /docs/concepts/services-networking/connect-applications-service 301 +/docs/user-guide/connecting-to-applications-port-forward /docs/tasks/access-application-cluster/port-forward-access-application-cluster 301 +/docs/user-guide/connecting-to-applications-proxy /docs/tasks/access-kubernetes-api/http-proxy-access-api 301 +/docs/user-guide/container-environment /docs/concepts/containers/container-lifecycle-hooks 301 +/docs/user-guide/cron-jobs /docs/concepts/workloads/controllers/cron-jobs 301 +/docs/user-guide/debugging-pods-and-replication-controllers /docs/tasks/debug-application-cluster/debug-pod-replication-controller 301 +/docs/user-guide/debugging-services /docs/tasks/debug-application-cluster/debug-service 301 +/docs/user-guide/deploying-applications /docs/tasks/run-application/run-stateless-application-deployment 301 +/docs/user-guide/deployments /docs/concepts/workloads/controllers/deployment 301 +/docs/user-guide/downward-api/ /docs/tasks/inject-data-application/downward-api-volume-expose-pod-information 301 +/docs/user-guide/downward-api/volume/ /docs/tasks/inject-data-application/downward-api-volume-expose-pod-information 301 +/docs/user-guide/environment-guide/ /docs/tasks/inject-data-application/environment-variable-expose-pod-information 301 +/docs/user-guide/federation/cluster /docs/tasks/administer-federation/cluster 301 +/docs/user-guide/federation/configmap /docs/tasks/administer-federation/configmap 301 +/docs/user-guide/federation/daemonsets /docs/tasks/administer-federation/daemonset 301 +/docs/user-guide/federation/deployment /docs/tasks/administer-federation/deployment 301 +/docs/user-guide/federation/events /docs/tasks/administer-federation/events 301 +/docs/user-guide/federation/federated-ingress /docs/tasks/administer-federation/ingress 301 +/docs/user-guide/federation/federated-services /docs/tasks/federation/federation-service-discovery 301 +/docs/user-guide/federation/ /docs/concepts/cluster-administration/federation 301 +/docs/user-guide/federation/namespaces /docs/tasks/administer-federation/namespaces 301 +/docs/user-guide/federation/replicasets /docs/tasks/administer-federation/replicaset 301 +/docs/user-guide/federation/secrets /docs/tasks/administer-federation/secret 301 +/docs/user-guide/garbage-collection /docs/concepts/workloads/controllers/garbage-collection 301 +/docs/user-guide/getting-into-containers /docs/tasks/debug-application-cluster/get-shell-running-container 301 +/docs/user-guide/gpus /docs/tasks/manage-gpus/scheduling-gpus 301 +/docs/user-guide/horizontal-pod-autoscaling/ /docs/tasks/run-application/horizontal-pod-autoscale 301 +/docs/user-guide/horizontal-pod-autoscaling/walkthrough /docs/tasks/run-application/horizontal-pod-autoscale-walkthrough 301 +/docs/user-guide/identifiers /docs/concepts/overview/working-with-objects/names 301 +/docs/user-guide/images /docs/concepts/containers/images 301 +/docs/user-guide/ /docs/home/ 301 +/docs/user-guide/ingress /docs/concepts/services-networking/ingress 301 +/docs/user-guide/introspection-and-debugging /docs/tasks/debug-application-cluster/debug-application-introspection 301 +/docs/user-guide/jobs /docs/concepts/workloads/controllers/jobs-run-to-completion 301 +/docs/user-guide/jobs/expansions/ /docs/tasks/job/parallel-processing-expansion 301 +/docs/user-guide/jobs/work-queue-1/ /docs/tasks/job/coarse-parallel-processing-work-queue/ 301 +/docs/user-guide/jobs/work-queue-2/ /docs/tasks/job/fine-parallel-processing-work-queue/ 301 +/docs/user-guide/kubeconfig-file /docs/tasks/access-application-cluster/authenticate-across-clusters-kubeconfig 301 +/docs/user-guide/labels /docs/concepts/overview/working-with-objects/labels 301 +/docs/user-guide/liveness /docs/tasks/configure-pod-container/configure-liveness-readiness-probes 301 +/docs/user-guide/load-balancer /docs/tasks/access-application-cluster/create-external-load-balancer 301 +/docs/user-guide/logging/elasticsearch /docs/tasks/debug-application-cluster/logging-elasticsearch-kibana 301 +/docs/user-guide/logging/overview /docs/concepts/cluster-administration/logging 301 +/docs/user-guide/logging/stackdriver /docs/tasks/debug-application-cluster/logging-stackdriver 301 +/docs/user-guide/managing-deployments /docs/concepts/cluster-administration/manage-deployment 301 +/docs/user-guide/monitoring /docs/tasks/debug-application-cluster/resource-usage-monitoring 301 +/docs/user-guide/namespaces /docs/concepts/overview/working-with-objects/namespaces 301 +/docs/user-guide/networkpolicies /docs/concepts/services-networking/network-policies 301 +/docs/user-guide/node-selection/ /docs/concepts/configuration/assign-pod-node 301 +/docs/user-guide/persistent-volumes/ /docs/concepts/storage/persistent-volumes 301 +/docs/user-guide/persistent-volumes/walkthrough /docs/tasks/configure-pod-container/configure-persistent-volume-storage 301 +/docs/user-guide/petset /docs/concepts/workloads/controllers/petset 301 +/docs/user-guide/petset/bootstrapping/ /docs/concepts/workloads/controllers/petset 301 +/docs/user-guide/pod-preset/ /docs/tasks/inject-data-application/podpreset 301 +/docs/user-guide/pod-security-policy/ /docs/concepts/policy/pod-security-policy 301 +/docs/user-guide/pod-states /docs/concepts/workloads/pods/pod-lifecycle 301 +/docs/user-guide/pod-templates /docs/concepts/workloads/pods/pod-overview 301 +/docs/user-guide/pods/ /docs/concepts/workloads/pods/pod 301 +/docs/user-guide/pods/init-container /docs/concepts/workloads/pods/init-containers 301 +/docs/user-guide/pods/multi-container /docs/tasks/access-application-cluster/communicate-containers-same-pod-shared-volume 301 +/docs/user-guide/pods/single-container /docs/tasks/run-application/run-stateless-application-deployment 301 +/docs/user-guide/prereqs /docs/tasks/tools/install-kubectl 301 +/docs/user-guide/production-pods /docs/tasks/ 301 +/docs/user-guide/projected-volume/ /docs/tasks/configure-pod-container/configure-projected-volume-storage 301 +/docs/user-guide/quick-start /docs/tasks/access-application-cluster/service-access-application-cluster 301 +/docs/user-guide/replicasets /docs/concepts/workloads/controllers/replicaset 301 +/docs/user-guide/replication-controller/ /docs/concepts/workloads/controllers/replicationcontroller 301 +/docs/user-guide/rolling-updates /docs/tasks/run-application/rolling-update-replication-controller 301 +/docs/user-guide/secrets/ /docs/concepts/configuration/secret 301 +/docs/user-guide/secrets/walkthrough /docs/tasks/inject-data-application/distribute-credentials-secure 301 +/docs/user-guide/service-accounts /docs/tasks/configure-pod-container/configure-service-account 301 +/docs/user-guide/services-firewalls /docs/tasks/access-application-cluster/configure-cloud-provider-firewall 301 +/docs/user-guide/services/ /docs/concepts/services-networking/service 301 +/docs/user-guide/services/operations /docs/tasks/access-application-cluster/connecting-frontend-backend 301 +/docs/user-guide/sharing-clusters /docs/tasks/administer-cluster/share-configuration 301 +/docs/user-guide/simple-nginx /docs/tasks/run-application/run-stateless-application-deployment 301 +/docs/user-guide/thirdpartyresources /docs/tasks/access-kubernetes-api/extend-api-third-party-resource 301 +/docs/user-guide/ui /docs/tasks/access-application-cluster/web-ui-dashboard 301 +/docs/user-guide/update-demo/ /docs/tasks/run-application/rolling-update-replication-controller 301 +/docs/user-guide/volumes /docs/concepts/storage/volumes 301 +/docs/user-guide/working-with-resources /docs/tutorials/object-management-kubectl/object-management 301 + +/docs/whatisk8s /docs/concepts/overview/what-is-kubernetes 301 + +# +# redirects from /js/redirects.js +# +/resource-quota /docs/concepts/policy/resource-quotas 301 +/horizontal-pod-autoscaler /docs/tasks/run-application/horizontal-pod-autoscale 301 +/docs/roadmap https://github.com/kubernetes/kubernetes/milestones/ 301 +/api-ref https://github.com/kubernetes/kubernetes/milestones/ 301 +/kubernetes/third_party/swagger-ui /docs/reference 301 +/docs/user-guide/overview /docs/concepts/overview/what-is-kubernetes 301 +/docs/troubleshooting /docs/tasks/debug-application-cluster/troubleshooting 301 +/docs/concepts/services-networking/networkpolicies /docs/concepts/services-networking/network-policies 301 +/docs/getting-started-guides/meanstack https://medium.com/google-cloud/running-a-mean-stack-on-google-cloud-platform-with-kubernetes-149ca81c2b5d 301 +/docs/samples /docs/tutorials 301 +/v1.1 / 301 +/v1.0 / 301 + +# +# Redirect users with chinese language preference to /cn +# +#/ /cn 302 Language=zh diff --git a/docs/concepts/architecture/master-node-communication.md b/docs/concepts/architecture/master-node-communication.md index 1e35922b1a..48b668990d 100644 --- a/docs/concepts/architecture/master-node-communication.md +++ b/docs/concepts/architecture/master-node-communication.md @@ -4,11 +4,6 @@ assignees: - roberthbailey - liggitt title: Master-Node communication -redirect_from: -- "/docs/admin/master-node-communication/" -- "/docs/admin/master-node-communication.html" -- "/docs/concepts/cluster-administration/master-node-communication/" -- "/docs/concepts/cluster-administration/master-node-communication.html" --- * TOC @@ -30,18 +25,18 @@ services). In a typical deployment, the apiserver is configured to listen for remote connections on a secure HTTPS port (443) with one or more forms of client [authentication](/docs/admin/authentication/) enabled. One or more forms of [authorization](/docs/admin/authorization/) should be enabled, especially -if [anonymous requests](/docs/admin/authentication/#anonymous-requests) or -[service account tokens](/docs/admin/authentication/#service-account-tokens) +if [anonymous requests](/docs/admin/authentication/#anonymous-requests) or +[service account tokens](/docs/admin/authentication/#service-account-tokens) are allowed. Nodes should be provisioned with the public root certificate for the cluster such that they can connect securely to the apiserver along with valid client credentials. For example, on a default GCE deployment, the client credentials -provided to the kubelet are in the form of a client certificate. See -[kubelet TLS bootstrapping](/docs/admin/kubelet-tls-bootstrapping/) for -automated provisioning of kubelet client certificates. +provided to the kubelet are in the form of a client certificate. See +[kubelet TLS bootstrapping](/docs/admin/kubelet-tls-bootstrapping/) for +automated provisioning of kubelet client certificates. -Pods that wish to connect to the apiserver can do so securely by leveraging a +Pods that wish to connect to the apiserver can do so securely by leveraging a service account so that Kubernetes will automatically inject the public root certificate and a valid bearer token into the pod when it is instantiated. The `kubernetes` service (in all namespaces) is configured with a virtual IP @@ -71,23 +66,23 @@ or service through the apiserver's proxy functionality. The connections from the apiserver to the kubelet are used for fetching logs for pods, attaching (through kubectl) to running pods, and using the kubelet's -port-forwarding functionality. These connections terminate at the kubelet's +port-forwarding functionality. These connections terminate at the kubelet's HTTPS endpoint. By default, the apiserver does not verify the kubelet's serving certificate, -which makes the connection subject to man-in-the-middle attacks, and +which makes the connection subject to man-in-the-middle attacks, and **unsafe** to run over untrusted and/or public networks. -To verify this connection, use the `--kubelet-certificate-authority` flag to -provide the apiserver with a root certificates bundle to use to verify the +To verify this connection, use the `--kubelet-certificate-authority` flag to +provide the apiserver with a root certificates bundle to use to verify the kubelet's serving certificate. If that is not possible, use [SSH tunneling](/docs/admin/master-node-communication/#ssh-tunnels) -between the apiserver and kubelet if required to avoid connecting over an +between the apiserver and kubelet if required to avoid connecting over an untrusted or public network. Finally, [Kubelet authentication and/or authorization](/docs/admin/kubelet-authentication-authorization/) -should be enabled to secure the kubelet API. +should be enabled to secure the kubelet API. ### apiserver -> nodes, pods, and services diff --git a/docs/concepts/architecture/nodes.md b/docs/concepts/architecture/nodes.md index 7afc36a1ee..dcf78ca4c7 100644 --- a/docs/concepts/architecture/nodes.md +++ b/docs/concepts/architecture/nodes.md @@ -3,11 +3,6 @@ assignees: - caesarxuchao - dchen1107 title: Nodes -redirect_from: -- "/docs/admin/node/" -- "/docs/admin/node.html" -- "/docs/concepts/nodes/node/" -- "/docs/concepts/nodes/node.html" --- * TOC @@ -68,7 +63,7 @@ The node condition is represented as a JSON object. For example, the following r ] ``` -If the Status of the Ready condition is "Unknown" or "False" for longer than the `pod-eviction-timeout`, an argument passed to the [kube-controller-manager](/docs/admin/kube-controller-manager/), all of the Pods on the node are scheduled for deletion by the Node Controller. The default eviction timeout duration is **five minutes**. In some cases when the node is unreachable, the apiserver is unable to communicate with the kubelet on it. The decision to delete the pods cannot be communicated to the kubelet until it re-establishes communication with the apiserver. In the meantime, the pods which are scheduled for deletion may continue to run on the partitioned node. +If the Status of the Ready condition is "Unknown" or "False" for longer than the `pod-eviction-timeout`, an argument passed to the [kube-controller-manager](/docs/admin/kube-controller-manager/), all of the Pods on the node are scheduled for deletion by the Node Controller. The default eviction timeout duration is **five minutes**. In some cases when the node is unreachable, the apiserver is unable to communicate with the kubelet on it. The decision to delete the pods cannot be communicated to the kubelet until it re-establishes communication with the apiserver. In the meantime, the pods which are scheduled for deletion may continue to run on the partitioned node. In versions of Kubernetes prior to 1.5, the node controller would [force delete](/docs/concepts/workloads/pods/pod/#force-deletion-of-pods) these unreachable pods from the apiserver. However, in 1.5 and higher, the node controller does not force delete pods until it is confirmed that they have stopped running in the cluster. One can see these pods which may be running on an unreachable node as being in the "Terminating" or "Unknown" states. In cases where Kubernetes cannot deduce from the underlying infrastructure if a node has permanently left a cluster, the cluster administrator may need to delete the node object by hand. Deleting the node object from Kubernetes causes all the Pod objects running on it to be deleted from the apiserver, freeing up their names. diff --git a/docs/concepts/cluster-administration/addons.md b/docs/concepts/cluster-administration/addons.md index b8c492679c..53b94997dc 100644 --- a/docs/concepts/cluster-administration/addons.md +++ b/docs/concepts/cluster-administration/addons.md @@ -1,8 +1,5 @@ --- title: Installing Addons -redirect_from: -- "/docs/admin/addons/" -- "/docs/admin/addons.html" --- ## Overview diff --git a/docs/concepts/cluster-administration/cluster-administration-overview.md b/docs/concepts/cluster-administration/cluster-administration-overview.md index fa6aee797d..e47b15c117 100644 --- a/docs/concepts/cluster-administration/cluster-administration-overview.md +++ b/docs/concepts/cluster-administration/cluster-administration-overview.md @@ -3,11 +3,6 @@ assignees: - davidopp - lavalamp title: Cluster Administration Overview -redirect_from: -- "/docs/admin/" -- "/docs/admin/index.html" -- "/docs/tasks/administer-cluster/overview/" -- "/docs/tasks/administer-cluster/overview.html" --- {% capture overview %} diff --git a/docs/concepts/cluster-administration/federation.md b/docs/concepts/cluster-administration/federation.md index 5a698c7fb7..c62be6f39f 100644 --- a/docs/concepts/cluster-administration/federation.md +++ b/docs/concepts/cluster-administration/federation.md @@ -1,12 +1,5 @@ --- title: Federation -redirect_from: -- "/docs/user-guide/federation/" -- "/docs/user-guide/federation/index.html" -- "/docs/concepts/cluster-administration/multiple-clusters/" -- "/docs/concepts/cluster-administration/multiple-clusters.html" -- "/docs/admin/multi-cluster/" -- "/docs/admin/multi-cluster.html" --- {% capture overview %} @@ -48,7 +41,7 @@ why you might want multiple clusters are: * [Hybrid cloud](###hybrid-cloud-capabilities): You can have multiple clusters on different cloud providers or on-premises data centers. -### Caveats +### Caveats While there are a lot of attractive use cases for federation, there are also some caveats: diff --git a/docs/concepts/cluster-administration/kubelet-garbage-collection.md b/docs/concepts/cluster-administration/kubelet-garbage-collection.md index bc9ea79282..e4b00f765b 100644 --- a/docs/concepts/cluster-administration/kubelet-garbage-collection.md +++ b/docs/concepts/cluster-administration/kubelet-garbage-collection.md @@ -2,9 +2,6 @@ assignees: - mikedanese title: Configuring kubelet Garbage Collection -redirect_from: -- "/docs/admin/garbage-collection/" -- "/docs/admin/garbage-collection.html" --- * TOC diff --git a/docs/concepts/cluster-administration/logging.md b/docs/concepts/cluster-administration/logging.md index 13773c90fd..d74e1eafc6 100644 --- a/docs/concepts/cluster-administration/logging.md +++ b/docs/concepts/cluster-administration/logging.md @@ -3,12 +3,6 @@ assignees: - crassirostris - piosz title: Logging Architecture -redirect_from: -- "/docs/concepts/clusters/logging/" -- "/docs/concepts/clusters/logging.html" -redirect_from: -- "/docs/user-guide/logging/overview/" -- "/docs/user-guide/logging/overview.html" --- Application and systems logs can help you understand what is happening inside your cluster. The logs are particularly useful for debugging problems and monitoring cluster activity. Most modern applications have some kind of logging mechanism; as such, most container engines are likewise designed to support some kind of logging. The easiest and most embraced logging method for containerized applications is to write to the standard output and standard error streams. diff --git a/docs/concepts/cluster-administration/manage-deployment.md b/docs/concepts/cluster-administration/manage-deployment.md index 904c0b61e6..c59278786c 100644 --- a/docs/concepts/cluster-administration/manage-deployment.md +++ b/docs/concepts/cluster-administration/manage-deployment.md @@ -4,9 +4,6 @@ assignees: - janetkuo - mikedanese title: Managing Resources -redirect_from: -- "/docs/user-guide/managing-deployments/" -- "/docs/user-guide/managing-deployments.html" --- You've deployed your application and exposed it via a service. Now what? Kubernetes provides a number of tools to help you manage your application deployment, including scaling and updating. Among the features we'll discuss in more depth are [configuration files](/docs/user-guide/configuring-containers/#configuration-in-kubernetes) and [labels](/docs/user-guide/deploying-applications/#labels). diff --git a/docs/concepts/cluster-administration/network-plugins.md b/docs/concepts/cluster-administration/network-plugins.md index d36a6c7f42..c0eee99423 100644 --- a/docs/concepts/cluster-administration/network-plugins.md +++ b/docs/concepts/cluster-administration/network-plugins.md @@ -4,9 +4,6 @@ assignees: - freehan - thockin title: Network Plugins -redirect_from: -- "/docs/admin/network-plugins/" -- "/docs/admin/network-plugins.html" --- * TOC diff --git a/docs/concepts/cluster-administration/networking.md b/docs/concepts/cluster-administration/networking.md index c65f3ca158..fccf11262a 100644 --- a/docs/concepts/cluster-administration/networking.md +++ b/docs/concepts/cluster-administration/networking.md @@ -2,9 +2,6 @@ assignees: - thockin title: Cluster Networking -redirect_from: -- "/docs/admin/networking/" -- "/docs/admin/networking.html" --- Kubernetes approaches networking somewhat differently than Docker does by @@ -85,7 +82,7 @@ talk to other VMs in your project. This is the same basic model. Until now this document has talked about containers. In reality, Kubernetes applies IP addresses at the `Pod` scope - containers within a `Pod` share their network namespaces - including their IP address. This means that containers -within a `Pod` can all reach each other's ports on `localhost`. This does imply +within a `Pod` can all reach each other's ports on `localhost`. This does imply that containers within a `Pod` must coordinate port usage, but this is no different than processes in a VM. We call this the "IP-per-pod" model. This is implemented in Docker as a "pod container" which holds the network namespace @@ -217,9 +214,9 @@ Calico can also be run in policy enforcement mode in conjunction with other netw ### Weave Net from Weaveworks -[Weave Net](https://www.weave.works/products/weave-net/) is a -resilient and simple to use network for Kubernetes and its hosted applications. -Weave Net runs as a [CNI plug-in](https://www.weave.works/docs/net/latest/cni-plugin/) +[Weave Net](https://www.weave.works/products/weave-net/) is a +resilient and simple to use network for Kubernetes and its hosted applications. +Weave Net runs as a [CNI plug-in](https://www.weave.works/docs/net/latest/cni-plugin/) or stand-alone. In either version, it doesn't require any configuration or extra code to run, and in both cases, the network provides one IP address per pod - as is standard for Kubernetes. diff --git a/docs/concepts/cluster-administration/proxies.md b/docs/concepts/cluster-administration/proxies.md index 2f7dbe400f..41e29d6cef 100644 --- a/docs/concepts/cluster-administration/proxies.md +++ b/docs/concepts/cluster-administration/proxies.md @@ -1,8 +1,5 @@ --- title: Proxies in Kubernetes -redirect_from: -- "/docs/user-guide/accessing-the-cluster/" -- "/docs/user-guide/accessing-the-cluster.html" --- {% capture overview %} diff --git a/docs/concepts/cluster-administration/sysctl-cluster.md b/docs/concepts/cluster-administration/sysctl-cluster.md index f37f520354..dad60e3f73 100644 --- a/docs/concepts/cluster-administration/sysctl-cluster.md +++ b/docs/concepts/cluster-administration/sysctl-cluster.md @@ -2,9 +2,6 @@ assignees: - sttts title: Using Sysctls in a Kubernetes Cluster -redirect_from: -- "/docs/admin/sysctls/" -- "/docs/admin/sysctls.html" --- * TOC diff --git a/docs/concepts/configuration/assign-pod-node.md b/docs/concepts/configuration/assign-pod-node.md index 799edcce2d..eef8ca1908 100644 --- a/docs/concepts/configuration/assign-pod-node.md +++ b/docs/concepts/configuration/assign-pod-node.md @@ -4,9 +4,6 @@ assignees: - kevin-wangzefeng - bsalamat title: Assigning Pods to Nodes -redirect_from: -- "/docs/user-guide/node-selection/" -- "/docs/user-guide/node-selection/index.html" --- You can constrain a [pod](/docs/concepts/workloads/pods/pod/) to only be able to run on particular [nodes](/docs/concepts/nodes/node/) or to prefer to @@ -205,7 +202,7 @@ If omitted, it defaults to the namespace of the pod where the affinity/anti-affi If defined but empty, it means "all namespaces." All `matchExpressions` associated with `requiredDuringSchedulingIgnoredDuringExecution` affinity and anti-affinity -must be satisfied for the pod to schedule onto a node. +must be satisfied for the pod to schedule onto a node. For more information on inter-pod affinity/anti-affinity, see the design doc [here](https://git.k8s.io/community/contributors/design-proposals/podaffinity.md). @@ -236,7 +233,7 @@ taint created by the `kubectl taint` line above, and thus a pod with either tole to schedule onto `node1`: ```yaml -tolerations: +tolerations: - key: "key" operator: "Equal" value: "value" @@ -244,7 +241,7 @@ tolerations: ``` ```yaml -tolerations: +tolerations: - key: "key" operator: "Exists" effect: "NoSchedule" @@ -304,7 +301,7 @@ kubectl taint nodes node1 key2=value2:NoSchedule And a pod has two tolerations: ```yaml -tolerations: +tolerations: - key: "key1" operator: "Equal" value: "value1" @@ -327,7 +324,7 @@ an optional `tolerationSeconds` field that dictates how long the pod will stay b to the node after the taint is added. For example, ```yaml -tolerations: +tolerations: - key: "key1" operator: "Equal" value: "value1" @@ -345,7 +342,7 @@ Taints and tolerations are a flexible way to steer pods away from nodes or evict pods that shouldn't be running. A few of the use cases are * **dedicated nodes**: If you want to dedicate a set of nodes for exclusive use by -a particular set of users, you can add a taint to those nodes (say, +a particular set of users, you can add a taint to those nodes (say, `kubectl taint nodes nodename dedicated=groupName:NoSchedule`) and then add a corresponding toleration to their pods (this would be done most easily by writing a custom [admission controller](/docs/admin/admission-controllers/)). @@ -410,7 +407,7 @@ that the partition will recover and thus the pod eviction can be avoided. The toleration the pod would use in that case would look like ```yaml -tolerations: +tolerations: - key: "node.alpha.kubernetes.io/unreachable" operator: "Exists" effect: "NoExecute" diff --git a/docs/concepts/configuration/manage-compute-resources-container.md b/docs/concepts/configuration/manage-compute-resources-container.md index da0b5175bf..b9077f63f4 100644 --- a/docs/concepts/configuration/manage-compute-resources-container.md +++ b/docs/concepts/configuration/manage-compute-resources-container.md @@ -1,8 +1,5 @@ --- title: Managing Compute Resources for Containers -redirect_from: -- "/docs/user-guide/compute-resources/" -- "/docs/user-guide/compute-resources.html" --- {% capture overview %} diff --git a/docs/concepts/configuration/overview.md b/docs/concepts/configuration/overview.md index a31d3c2156..e2e4b0f2e5 100644 --- a/docs/concepts/configuration/overview.md +++ b/docs/concepts/configuration/overview.md @@ -2,9 +2,6 @@ assignees: - mikedanese title: Configuration Best Practices -redirect_from: -- "/docs/user-guide/config-best-practices/" -- "/docs/user-guide/config-best-practices.html" --- {% capture overview %} diff --git a/docs/concepts/configuration/secret.md b/docs/concepts/configuration/secret.md index bcfc3df241..d64a918e2b 100644 --- a/docs/concepts/configuration/secret.md +++ b/docs/concepts/configuration/secret.md @@ -2,9 +2,6 @@ assignees: - mikedanese title: Secrets -redirect_from: -- "/docs/user-guide/secrets/index/" -- "/docs/user-guide/secrets/index.html" --- Objects of type `secret` are intended to hold sensitive information, such as diff --git a/docs/concepts/containers/container-lifecycle-hooks.md b/docs/concepts/containers/container-lifecycle-hooks.md index c53749d9d1..63ca4df1c4 100644 --- a/docs/concepts/containers/container-lifecycle-hooks.md +++ b/docs/concepts/containers/container-lifecycle-hooks.md @@ -3,15 +3,12 @@ assignees: - mikedanese - thockin title: Container Lifecycle Hooks -redirect_from: -- "/docs/user-guide/container-environment/" -- "/docs/user-guide/container-environment.html" --- {% capture overview %} This page describes how kubelet managed Containers can use the Container lifecycle hook framework -to run code triggered by events during their management lifecycle. +to run code triggered by events during their management lifecycle. {% endcapture %} @@ -34,14 +31,14 @@ There are two hooks that are exposed to Containers: This hook executes immediately after a container is created. However, there is no guarantee that the hook will execute before the container ENTRYPOINT. -No parameters are passed to the handler. +No parameters are passed to the handler. `PreStop` This hook is called immediately before a container is terminated. It is blocking, meaning it is synchronous, -so it must complete before the call to delete the container can be sent. -No parameters are passed to the handler. +so it must complete before the call to delete the container can be sent. +No parameters are passed to the handler. A more detailed description of the termination behavior can be found in [Termination of Pods](/docs/concepts/workloads/pods/pod/#termination-of-pods). @@ -58,13 +55,13 @@ Resources consumed by the command are counted against the Container. ### Hook handler execution When a Container lifecycle management hook is called, -the Kubernetes management system executes the handler in the Container registered for that hook.  +the Kubernetes management system executes the handler in the Container registered for that hook.  Hook handler calls are synchronous within the context of the Pod containing the Container. This means that for a `PostStart` hook, the Container ENTRYPOINT and hook fire asynchronously. However, if the hook takes too long to run or hangs, -the Container cannot reach a `running` state. +the Container cannot reach a `running` state. The behavior is similar for a `PreStop` hook. If the hook hangs during execution, @@ -87,16 +84,16 @@ Generally, only single deliveries are made. If, for example, an HTTP hook receiver is down and is unable to take traffic, there is no attempt to resend. In some rare cases, however, double delivery may occur. -For instance, if a kubelet restarts in the middle of sending a hook, +For instance, if a kubelet restarts in the middle of sending a hook, the hook might be resent after the kubelet comes back up. ### Debugging Hook handlers The logs for a Hook handler are not exposed in Pod events. If a handler fails for some reason, it broadcasts an event. -For `PostStart`, this is the `FailedPostStartHook` event, -and for `PreStop`, this is the `FailedPreStopHook` event. -You can see these events by running `kubectl describe pod `. +For `PostStart`, this is the `FailedPostStartHook` event, +and for `PreStop`, this is the `FailedPreStopHook` event. +You can see these events by running `kubectl describe pod `. Here is some example output of events from running this command: ``` @@ -111,7 +108,7 @@ Events: 38s 38s 1 {kubelet gke-test-cluster-default-pool-a07e5d30-siqd} spec.containers{main} Normal Killing Killing container with docker id 5c6a256a2567: PostStart handler: Error executing in Docker Container: 1 37s 37s 1 {kubelet gke-test-cluster-default-pool-a07e5d30-siqd} spec.containers{main} Normal Killing Killing container with docker id 8df9fdfd7054: PostStart handler: Error executing in Docker Container: 1 38s 37s 2 {kubelet gke-test-cluster-default-pool-a07e5d30-siqd} Warning FailedSync Error syncing pod, skipping: failed to "StartContainer" for "main" with RunContainerError: "PostStart handler: Error executing in Docker Container: 1" - 1m 22s 2 {kubelet gke-test-cluster-default-pool-a07e5d30-siqd} spec.containers{main} Warning FailedPostStartHook + 1m 22s 2 {kubelet gke-test-cluster-default-pool-a07e5d30-siqd} spec.containers{main} Warning FailedPostStartHook ``` {% endcapture %} diff --git a/docs/concepts/containers/images.md b/docs/concepts/containers/images.md index 9c9221ada8..9960185f56 100644 --- a/docs/concepts/containers/images.md +++ b/docs/concepts/containers/images.md @@ -3,9 +3,6 @@ assignees: - erictune - thockin title: Images -redirect_from: -- "/docs/user-guide/images/" -- "/docs/user-guide/images.html" --- {% capture overview %} @@ -83,7 +80,7 @@ images in the ECR registry. The kubelet will fetch and periodically refresh ECR credentials. It needs the following permissions to do this: -- `ecr:GetAuthorizationToken` +- `ecr:GetAuthorizationToken` - `ecr:BatchCheckLayerAvailability` - `ecr:GetDownloadUrlForLayer` - `ecr:GetRepositoryPolicy` diff --git a/docs/concepts/overview/components.md b/docs/concepts/overview/components.md index acf252e893..3e6dba96c3 100644 --- a/docs/concepts/overview/components.md +++ b/docs/concepts/overview/components.md @@ -2,10 +2,8 @@ assignees: - lavalamp title: Kubernetes Components -redirect_from: -- "/docs/admin/cluster-components/" -- "/docs/admin/cluster-components.html" --- + {% capture overview %} This document outlines the various binary components needed to deliver a functioning Kubernetes cluster. @@ -15,7 +13,7 @@ deliver a functioning Kubernetes cluster. ## Master Components Master components provide the cluster's control plane. Master components make global decisions about the -cluster (for example, scheduling), and detecting and responding to cluster events (starting up a new pod when a replication controller's 'replicas' field is unsatisfied). +cluster (for example, scheduling), and detecting and responding to cluster events (starting up a new pod when a replication controller's 'replicas' field is unsatisfied). Master components can be run on any node in the cluster. However, for simplicity, set up scripts typically start all master components on @@ -28,7 +26,7 @@ Kubernetes control plane. It is designed to scale horizontally -- that is, it sc ### etcd -[etcd](/docs/admin/etcd) is used as Kubernetes' backing store. All cluster data is stored here. Always have a backup plan for etcd's data for your Kubernetes cluster. +[etcd](/docs/admin/etcd) is used as Kubernetes' backing store. All cluster data is stored here. Always have a backup plan for etcd's data for your Kubernetes cluster. ### kube-controller-manager @@ -41,12 +39,12 @@ These controllers include: controller object in the system. * Endpoints Controller: Populates the Endpoints object (that is, joins Services & Pods). * Service Account & Token Controllers: Create default accounts and API access tokens for new namespaces. - + ### cloud-controller-manager -cloud-controller-manager runs controllers that interact with the underlying cloud providers. The cloud-controller-manager binary is an alpha feature introduced in Kubernetes release 1.6. +cloud-controller-manager runs controllers that interact with the underlying cloud providers. The cloud-controller-manager binary is an alpha feature introduced in Kubernetes release 1.6. -cloud-controller-manager runs cloud-provider-specific controller loops only. You must disable these controller loops in the kube-controller-manager. You can disable the controller loops by setting the `--cloud-provider` flag to `external` when starting the kube-controller-manager. +cloud-controller-manager runs cloud-provider-specific controller loops only. You must disable these controller loops in the kube-controller-manager. You can disable the controller loops by setting the `--cloud-provider` flag to `external` when starting the kube-controller-manager. cloud-controller-manager allows cloud vendors code and the Kubernetes core to evolve independent of each other. In prior releases, the core Kubernetes code was dependent upon cloud-provider-specific code for functionality. In future releases, code specific to cloud vendors should be maintained by the cloud vendor themselves, and linked to cloud-controller-manager while running Kubernetes. @@ -55,7 +53,7 @@ The following controllers have cloud provider dependencies: * Node Controller: For checking the cloud provider to determine if a node has been deleted in the cloud after it stops responding * Route Controller: For setting up routes in the underlying cloud infrastructure * Service Controller: For creating, updating and deleting cloud provider load balancers - * Volume Controller: For creating, attaching, and mounting volumes, and interacting with the cloud provider to orchestrate volumes + * Volume Controller: For creating, attaching, and mounting volumes, and interacting with the cloud provider to orchestrate volumes ### kube-scheduler diff --git a/docs/concepts/overview/kubernetes-api.md b/docs/concepts/overview/kubernetes-api.md index 3de65c5bef..bd227ecc74 100644 --- a/docs/concepts/overview/kubernetes-api.md +++ b/docs/concepts/overview/kubernetes-api.md @@ -2,9 +2,6 @@ assignees: - chenopis title: The Kubernetes API -redirect_from: -- "/docs/api/" -- "/docs/api.html" --- Overall API conventions are described in the [API conventions doc](https://git.k8s.io/community/contributors/devel/api-conventions.md). diff --git a/docs/concepts/overview/what-is-kubernetes.md b/docs/concepts/overview/what-is-kubernetes.md index 370af24b38..b2246d22ef 100644 --- a/docs/concepts/overview/what-is-kubernetes.md +++ b/docs/concepts/overview/what-is-kubernetes.md @@ -3,10 +3,8 @@ assignees: - bgrant0607 - mikedanese title: What is Kubernetes? -redirect_from: -- "/docs/whatisk8s/" -- "/docs/whatisk8s.html" --- + {% capture overview %} This page is an overview of Kubernetes. {% endcapture %} @@ -19,7 +17,7 @@ With Kubernetes, you are able to quickly and efficiently respond to customer dem - Deploy your applications quickly and predictably. - Scale your applications on the fly. - Roll out new features seamlessly. - - Limit hardware usage to required resources only. + - Limit hardware usage to required resources only. Our goal is to foster an ecosystem of components and tools that relieve the burden of running applications in public and private clouds. diff --git a/docs/concepts/overview/working-with-objects/annotations.md b/docs/concepts/overview/working-with-objects/annotations.md index 031d68b2cc..2bb89e17e5 100644 --- a/docs/concepts/overview/working-with-objects/annotations.md +++ b/docs/concepts/overview/working-with-objects/annotations.md @@ -1,8 +1,5 @@ --- title: Annotations -redirect_from: -- "/docs/user-guide/annotations/" -- "/docs/user-guide/annotations.html" --- {% capture overview %} diff --git a/docs/concepts/overview/working-with-objects/kubernetes-objects.md b/docs/concepts/overview/working-with-objects/kubernetes-objects.md index e5873ff105..62af7d59ae 100644 --- a/docs/concepts/overview/working-with-objects/kubernetes-objects.md +++ b/docs/concepts/overview/working-with-objects/kubernetes-objects.md @@ -1,9 +1,5 @@ --- title: Understanding Kubernetes Objects - -redirect_from: -- "/docs/concepts/abstractions/overview/" -- "/docs/concepts/abstractions/overview.html" --- {% capture overview %} diff --git a/docs/concepts/overview/working-with-objects/labels.md b/docs/concepts/overview/working-with-objects/labels.md index 8e4019fb77..ff3637345f 100644 --- a/docs/concepts/overview/working-with-objects/labels.md +++ b/docs/concepts/overview/working-with-objects/labels.md @@ -2,9 +2,6 @@ assignees: - mikedanese title: Labels and Selectors -redirect_from: -- "/docs/user-guide/labels/" -- "/docs/user-guide/labels.html" --- _Labels_ are key/value pairs that are attached to objects, such as pods. @@ -60,7 +57,7 @@ An empty label selector (that is, one with zero requirements) selects every obje A null label selector (which is only possible for optional selector fields) selects no objects. -**Note**: the label selectors of two controllers must not overlap within a namespace, otherwise they will fight with each other. +**Note**: the label selectors of two controllers must not overlap within a namespace, otherwise they will fight with each other. ### _Equality-based_ requirement diff --git a/docs/concepts/overview/working-with-objects/names.md b/docs/concepts/overview/working-with-objects/names.md index a213435795..71147450fd 100644 --- a/docs/concepts/overview/working-with-objects/names.md +++ b/docs/concepts/overview/working-with-objects/names.md @@ -3,9 +3,6 @@ assignees: - mikedanese - thockin title: Names -redirect_from: -- "/docs/user-guide/identifiers/" -- "/docs/user-guide/identifiers.html" --- All objects in the Kubernetes REST API are unambiguously identified by a Name and a UID. diff --git a/docs/concepts/overview/working-with-objects/namespaces.md b/docs/concepts/overview/working-with-objects/namespaces.md index 74af64b7e8..254ff95fa2 100644 --- a/docs/concepts/overview/working-with-objects/namespaces.md +++ b/docs/concepts/overview/working-with-objects/namespaces.md @@ -4,9 +4,6 @@ assignees: - mikedanese - thockin title: Namespaces -redirect_from: -- "/docs/user-guide/namespaces/" -- "/docs/user-guide/namespaces.html" --- Kubernetes supports multiple virtual clusters backed by the same physical cluster. diff --git a/docs/concepts/policy/pod-security-policy.md b/docs/concepts/policy/pod-security-policy.md index fc6ffd18e2..13d2377672 100644 --- a/docs/concepts/policy/pod-security-policy.md +++ b/docs/concepts/policy/pod-security-policy.md @@ -2,13 +2,10 @@ assignees: - pweil- title: Pod Security Policies -redirect_from: -- "/docs/user-guide/pod-security-policy/" -- "/docs/user-guide/pod-security-policy/index.html" --- Objects of type `PodSecurityPolicy` govern the ability -to make requests on a pod that affect the `SecurityContext` that will be +to make requests on a pod that affect the `SecurityContext` that will be applied to a pod and container. See [PodSecurityPolicy proposal](https://git.k8s.io/community/contributors/design-proposals/security-context-constraints.md) for more information. @@ -18,10 +15,10 @@ See [PodSecurityPolicy proposal](https://git.k8s.io/community/contributors/desig ## What is a Pod Security Policy? -A _Pod Security Policy_ is a cluster-level resource that controls the +A _Pod Security Policy_ is a cluster-level resource that controls the actions that a pod can perform and what it has the ability to access. The -`PodSecurityPolicy` objects define a set of conditions that a pod must -run with in order to be accepted into the system. They allow an +`PodSecurityPolicy` objects define a set of conditions that a pod must +run with in order to be accepted into the system. They allow an administrator to control the following: | Control Aspect | Field Name | @@ -41,16 +38,16 @@ administrator to control the following: | Allocating an FSGroup that owns the pod's volumes | [`fsGroup`](#fsgroup) | | Requiring the use of a read only root file system | `readOnlyRootFilesystem` | -_Pod Security Policies_ are comprised of settings and strategies that -control the security features a pod has access to. These settings fall +_Pod Security Policies_ are comprised of settings and strategies that +control the security features a pod has access to. These settings fall into three categories: -- *Controlled by a boolean*: Fields of this type default to the most -restrictive value. -- *Controlled by an allowable set*: Fields of this type are checked +- *Controlled by a boolean*: Fields of this type default to the most +restrictive value. +- *Controlled by an allowable set*: Fields of this type are checked against the set to ensure their value is allowed. - *Controlled by a strategy*: Items that have a strategy to provide -a mechanism to generate the value and a mechanism to ensure that a +a mechanism to generate the value and a mechanism to ensure that a specified value falls into the set of allowable values. @@ -75,22 +72,22 @@ specified. ### SupplementalGroups -- *MustRunAs* - Requires at least one range to be specified. Uses the +- *MustRunAs* - Requires at least one range to be specified. Uses the minimum value of the first range as the default. Validates against all ranges. - *RunAsAny* - No default provided. Allows any `supplementalGroups` to be specified. ### FSGroup -- *MustRunAs* - Requires at least one range to be specified. Uses the -minimum value of the first range as the default. Validates against the +- *MustRunAs* - Requires at least one range to be specified. Uses the +minimum value of the first range as the default. Validates against the first ID in the first range. - *RunAsAny* - No default provided. Allows any `fsGroup` ID to be specified. ### Controlling Volumes -The usage of specific volume types can be controlled by setting the -volumes field of the PSP. The allowable values of this field correspond +The usage of specific volume types can be controlled by setting the +volumes field of the PSP. The allowable values of this field correspond to the volume sources that are defined when creating a volume: 1. azureFile @@ -122,7 +119,7 @@ to the volume sources that are defined when creating a volume: 1. storageos 1. \* (allow all volumes) -The recommended minimum set of allowed volumes for new PSPs are +The recommended minimum set of allowed volumes for new PSPs are configMap, downwardAPI, emptyDir, persistentVolumeClaim, secret, and projected. ### Host Network @@ -193,7 +190,7 @@ podsecuritypolicy "permissive" deleted ## Enabling Pod Security Policies -In order to use Pod Security Policies in your cluster you must ensure the +In order to use Pod Security Policies in your cluster you must ensure the following 1. You have enabled the api type `extensions/v1beta1/podsecuritypolicy` (only for versions prior 1.6) diff --git a/docs/concepts/policy/resource-quotas.md b/docs/concepts/policy/resource-quotas.md index cc2daa5c5e..d9a4f022c3 100644 --- a/docs/concepts/policy/resource-quotas.md +++ b/docs/concepts/policy/resource-quotas.md @@ -2,9 +2,6 @@ assignees: - derekwaynecarr title: Resource Quotas -redirect_from: -- "/docs/admin/resourcequota/" -- "/docs/admin/resourcequota/index.html" --- When several users or teams share a cluster with a fixed number of nodes, @@ -56,7 +53,7 @@ Resource Quota is enforced in a particular namespace when there is a ## Compute Resource Quota You can limit the total sum of [compute resources](/docs/user-guide/compute-resources) that can be requested in a given namespace. - + The following resource types are supported: | Resource Name | Description | @@ -70,7 +67,7 @@ The following resource types are supported: ## Storage Resource Quota -You can limit the total sum of [storage resources](/docs/user-guide/persistent-volumes) that can be requested in a given namespace. +You can limit the total sum of [storage resources](/docs/user-guide/persistent-volumes) that can be requested in a given namespace. In addition, you can limit consumption of storage resources based on associated storage-class. diff --git a/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases.md b/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases.md index 7247e6cd63..36252dda0c 100644 --- a/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases.md +++ b/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases.md @@ -3,9 +3,6 @@ assignees: - rickypai - thockin title: Adding entries to Pod /etc/hosts with HostAliases -redirect_from: -- "/docs/user-guide/add-entries-to-pod-etc-hosts-with-host-aliases/" -- "/docs/user-guide/add-entries-to-pod-etc-hosts-with-host-aliases.md" --- * TOC diff --git a/docs/concepts/services-networking/connect-applications-service.md b/docs/concepts/services-networking/connect-applications-service.md index 551ea80bb2..1d8d5e7250 100644 --- a/docs/concepts/services-networking/connect-applications-service.md +++ b/docs/concepts/services-networking/connect-applications-service.md @@ -4,9 +4,6 @@ assignees: - lavalamp - thockin title: Connecting Applications with Services -redirect_from: -- "/docs/user-guide/connecting-applications/" -- "/docs/user-guide/connecting-applications.html" --- * TOC diff --git a/docs/concepts/services-networking/dns-pod-service.md b/docs/concepts/services-networking/dns-pod-service.md index 3d88bb322d..1aea614159 100644 --- a/docs/concepts/services-networking/dns-pod-service.md +++ b/docs/concepts/services-networking/dns-pod-service.md @@ -3,9 +3,6 @@ assignees: - davidopp - thockin title: DNS Pods and Services -redirect_from: -- "/docs/admin/dns/" -- "/docs/admin/dns.html" --- ## Introduction @@ -105,7 +102,7 @@ spec: clusterIP: None ports: - name: foo # Actually, no port is needed. - port: 1234 + port: 1234 targetPort: 1234 --- apiVersion: v1 @@ -142,7 +139,7 @@ spec: ``` If there exists a headless service in the same namespace as the pod and with the same name as the subdomain, the cluster's KubeDNS Server also returns an A record for the Pod's fully qualified hostname. -Given a Pod with the hostname set to "busybox-1" and the subdomain set to "default-subdomain", and a headless Service named "default-subdomain" in the same namespace, the pod will see it's own FQDN as "busybox-1.default-subdomain.my-namespace.svc.cluster.local". DNS serves an A record at that name, pointing to the Pod's IP. Both pods "busybox1" and "busybox2" can have their distinct A records. +Given a Pod with the hostname set to "busybox-1" and the subdomain set to "default-subdomain", and a headless Service named "default-subdomain" in the same namespace, the pod will see it's own FQDN as "busybox-1.default-subdomain.my-namespace.svc.cluster.local". DNS serves an A record at that name, pointing to the Pod's IP. Both pods "busybox1" and "busybox2" can have their distinct A records. As of Kubernetes v1.2, the Endpoints object also has the annotation `endpoints.beta.kubernetes.io/hostnames-map`. Its value is the json representation of map[string(IP)][endpoints.HostRecord], for example: '{"10.245.1.6":{HostName: "my-webserver"}}'. If the Endpoints are for a headless service, an A record is created with the format ...svc. diff --git a/docs/concepts/services-networking/ingress.md b/docs/concepts/services-networking/ingress.md index fbd82415f5..eddb9cc08d 100644 --- a/docs/concepts/services-networking/ingress.md +++ b/docs/concepts/services-networking/ingress.md @@ -2,9 +2,6 @@ assignees: - bprashanth title: Ingress Resources -redirect_from: -- "/docs/user-guide/ingress/" -- "/docs/user-guide/ingress.html" --- * TOC diff --git a/docs/concepts/services-networking/network-policies.md b/docs/concepts/services-networking/network-policies.md index 63d464f8e0..1ea2cdfe9d 100644 --- a/docs/concepts/services-networking/network-policies.md +++ b/docs/concepts/services-networking/network-policies.md @@ -4,9 +4,6 @@ assignees: - caseydavenport - danwinship title: Network Policies -redirect_from: -- "/docs/user-guide/networkpolicies/" -- "/docs/user-guide/networkpolicies.html" --- * TOC diff --git a/docs/concepts/services-networking/service.md b/docs/concepts/services-networking/service.md index 2536969105..24a91dd366 100644 --- a/docs/concepts/services-networking/service.md +++ b/docs/concepts/services-networking/service.md @@ -2,9 +2,6 @@ assignees: - bprashanth title: Services -redirect_from: -- "/docs/user-guide/services/" -- "/docs/user-guide/services/index.html" --- Kubernetes [`Pods`](/docs/user-guide/pods) are mortal. They are born and when they die, they @@ -319,9 +316,9 @@ Sometimes you don't need or want load-balancing and a single service IP. In this case, you can create "headless" services by specifying `"None"` for the cluster IP (`spec.clusterIP`). -This option allows developers to reduce coupling to the Kubernetes system by -allowing them freedom to do discovery their own way. Applications can still use -a self-registration pattern and adapters for other discovery systems could easily +This option allows developers to reduce coupling to the Kubernetes system by +allowing them freedom to do discovery their own way. Applications can still use +a self-registration pattern and adapters for other discovery systems could easily be built upon this API. For such `Services`, a cluster IP is not allocated, kube-proxy does not handle @@ -356,15 +353,15 @@ The default is `ClusterIP`. `Type` values and their behaviors are: - * `ClusterIP`: Exposes the service on a cluster-internal IP. Choosing this value - makes the service only reachable from within the cluster. This is the + * `ClusterIP`: Exposes the service on a cluster-internal IP. Choosing this value + makes the service only reachable from within the cluster. This is the default `ServiceType`. - * `NodePort`: Exposes the service on each Node's IP at a static port (the `NodePort`). - A `ClusterIP` service, to which the NodePort service will route, is automatically - created. You'll be able to contact the `NodePort` service, from outside the cluster, + * `NodePort`: Exposes the service on each Node's IP at a static port (the `NodePort`). + A `ClusterIP` service, to which the NodePort service will route, is automatically + created. You'll be able to contact the `NodePort` service, from outside the cluster, by requesting `:`. - * `LoadBalancer`: Exposes the service externally using a cloud provider's load balancer. - `NodePort` and `ClusterIP` services, to which the external load balancer will route, + * `LoadBalancer`: Exposes the service externally using a cloud provider's load balancer. + `NodePort` and `ClusterIP` services, to which the external load balancer will route, are automatically created. * `ExternalName`: Maps the service to the contents of the `externalName` field (e.g. `foo.bar.example.com`), by returning a `CNAME` record with its value. @@ -441,9 +438,9 @@ This can be achieved by adding the following annotations to the service based on For AWS: ```yaml [...] -metadata: +metadata: name: my-service - annotations: + annotations: service.beta.kubernetes.io/aws-load-balancer-internal: 0.0.0.0/0 [...] ``` @@ -516,7 +513,7 @@ spec: protocol: TCP port: 80 targetPort: 9376 - externalIPs: + externalIPs: - 80.11.12.10 ``` diff --git a/docs/concepts/storage/persistent-volumes.md b/docs/concepts/storage/persistent-volumes.md index ee079a19a3..e96c7380b7 100644 --- a/docs/concepts/storage/persistent-volumes.md +++ b/docs/concepts/storage/persistent-volumes.md @@ -5,9 +5,6 @@ assignees: - saad-ali - thockin title: Persistent Volumes -redirect_from: -- "/docs/user-guide/persistent-volumes/" -- "/docs/user-guide/persistent-volumes/index.html" --- This document describes the current state of `PersistentVolumes` in Kubernetes. Familiarity with [volumes](/docs/concepts/storage/volumes/) is suggested. @@ -265,7 +262,7 @@ spec: pdName: "gce-disk-1" ``` -A mount option is a string which will be cumulatively joined and used while mounting volume to the disk. +A mount option is a string which will be cumulatively joined and used while mounting volume to the disk. Note that not all Persistent volume types support mount options. In Kubernetes version 1.6, the following volume types support mount options. @@ -734,7 +731,7 @@ parameters: If storage account is not provided, all storage accounts associated with the resource group are searched to find one that matches `skuName` and `location`. If storage account is provided, it must reside in the same resource group as the cluster, and `skuName` and `location` are ignored. During provision, a secret will be created for mounting credentials. If the cluster has enabled both [RBAC](/docs/admin/authorization/rbac/) and [Controller Roles](/docs/admin/authorization/rbac/#controller-roles), you will first need to add `create` permission of resource `secret` for clusterrole `system:controller:persistent-volume-binder`. - + #### Portworx Volume ```yaml @@ -786,7 +783,7 @@ parameters: * `readOnly`: specifies the access mode to the mounted volume * `fsType`: the file system to use for the volume -The ScaleIO Kubernetes volume plugin requires a configured Secret object. +The ScaleIO Kubernetes volume plugin requires a configured Secret object. The secret must be created with type `kubernetes.io/scaleio` and use the same namespace value as that of the PVC where it is referenced as shown in the following command: diff --git a/docs/concepts/storage/volumes.md b/docs/concepts/storage/volumes.md index eedf110009..153a22f5bf 100644 --- a/docs/concepts/storage/volumes.md +++ b/docs/concepts/storage/volumes.md @@ -5,9 +5,6 @@ assignees: - saad-ali - thockin title: Volumes -redirect_from: -- "/docs/user-guide/volumes/" -- "/docs/user-guide/volumes.html" --- {% capture overview %} @@ -788,7 +785,7 @@ spec: Note that local PersistentVolume cleanup and deletion requires manual intervention without the external provisioner. -For details on the `local` volume type, see the [Local Persistent Storage +For details on the `local` volume type, see the [Local Persistent Storage user guide](https://github.com/kubernetes-incubator/external-storage/tree/master/local-volume) ## Using subPath diff --git a/docs/concepts/workloads/controllers/cron-jobs.md b/docs/concepts/workloads/controllers/cron-jobs.md index f50179e638..480acd3d0b 100644 --- a/docs/concepts/workloads/controllers/cron-jobs.md +++ b/docs/concepts/workloads/controllers/cron-jobs.md @@ -4,11 +4,6 @@ assignees: - soltysh - janetkuo title: Cron Jobs -redirect_from: -- "/docs/concepts/jobs/cron-jobs/" -- "/docs/concepts/jobs/cron-jobs.html" -- "/docs/user-guide/cron-jobs/" -- "/docs/user-guide/cron-jobs.html" --- * TOC diff --git a/docs/concepts/workloads/controllers/daemonset.md b/docs/concepts/workloads/controllers/daemonset.md index 8962dba065..fbd3bf4f1f 100644 --- a/docs/concepts/workloads/controllers/daemonset.md +++ b/docs/concepts/workloads/controllers/daemonset.md @@ -2,9 +2,6 @@ assignees: - erictune title: Daemon Sets -redirect_from: -- "/docs/admin/daemons/" -- "/docs/admin/daemons.html" --- * TOC @@ -77,7 +74,7 @@ a node for testing. If you specify a `.spec.template.spec.nodeSelector`, then the DaemonSet controller will create pods on nodes which match that [node -selector](/docs/concepts/configuration/assign-pod-node/). Likewise if you specify a `.spec.template.spec.affinity` +selector](/docs/concepts/configuration/assign-pod-node/). Likewise if you specify a `.spec.template.spec.affinity` then DaemonSet controller will create pods on nodes which match that [node affinity](/docs/concepts/configuration/assign-pod-node/). If you do not specify either, then the DaemonSet controller will create pods on all nodes. @@ -91,7 +88,7 @@ when the pod is created, so it is ignored by the scheduler). Therefore: by the DaemonSet controller. - DaemonSet controller can make pods even when the scheduler has not been started, which can help cluster bootstrap. - + Daemon pods do respect [taints and tolerations](/docs/concepts/configuration/assign-pod-node/#taints-and-tolerations-beta-feature), but they are created with `NoExecute` tolerations for the `node.alpha.kubernetes.io/notReady` and `node.alpha.kubernetes.io/unreachable` taints with no `tolerationSeconds`. This ensures that when the `TaintBasedEvictions` alpha feature is enabled, diff --git a/docs/concepts/workloads/controllers/deployment.md b/docs/concepts/workloads/controllers/deployment.md index c4ad4ecb77..ac0bed2287 100644 --- a/docs/concepts/workloads/controllers/deployment.md +++ b/docs/concepts/workloads/controllers/deployment.md @@ -3,9 +3,6 @@ assignees: - bgrant0607 - janetkuo title: Deployments -redirect_from: -- "/docs/user-guide/deployments/" -- "/docs/user-guide/deployments.html" --- {:toc} @@ -503,7 +500,7 @@ nginx-deployment-1989198191 7 7 0 7m nginx-deployment-618515232 11 11 11 7m ``` -## Pausing and Resuming a Deployment +## Pausing and Resuming a Deployment You can pause a Deployment before triggering one or more updates and then resume it. This will allow you to apply multiple fixes in between pausing and resuming without triggering unnecesarry rollouts. @@ -549,7 +546,7 @@ deployment "nginx" resource requirements updated ``` The initial state of the Deployment prior to pausing it will continue its function, but new updates to -the Deployment will not have any effect as long as the Deployment is paused. +the Deployment will not have any effect as long as the Deployment is paused. Eventually, resume the Deployment and observe a new ReplicaSet coming up with all the new updates: ```shell @@ -754,13 +751,13 @@ to a previous revision, or even pause it if you need to apply multiple tweaks in You can set `.spec.revisionHistoryLimit` field in a Deployment to specify how many old ReplicaSets for this Deployment you want to retain. The rest will be garbage-collected in the background. By default, -all revision history will be kept. In a future version, it will default to switch to 2. +all revision history will be kept. In a future version, it will default to switch to 2. **Note:** Explicitly setting this field to 0, will result in cleaning up all the history of your Deployment thus that Deployment will not be able to roll back. -## Use Cases +## Use Cases ### Canary Deployment @@ -900,7 +897,7 @@ ReplicaSets will be kept by default, consuming resources in `etcd` and crowding if this field is not set. The configuration of each Deployment revision is stored in its ReplicaSets; therefore, once an old ReplicaSet is deleted, you lose the ability to rollback to that revision of Deployment. -More specifically, setting this field to zero means that all old ReplicaSets with 0 replica will be cleaned up. +More specifically, setting this field to zero means that all old ReplicaSets with 0 replica will be cleaned up. In this case, a new Deployment rollout cannot be undone, since its revision history is cleaned up. ### Paused diff --git a/docs/concepts/workloads/controllers/garbage-collection.md b/docs/concepts/workloads/controllers/garbage-collection.md index 3e65e08198..0672d3a479 100644 --- a/docs/concepts/workloads/controllers/garbage-collection.md +++ b/docs/concepts/workloads/controllers/garbage-collection.md @@ -1,11 +1,5 @@ --- title: Garbage Collection -redirect_from: -- "/docs/concepts/abstractions/controllers/garbage-collection/" -- "/docs/concepts/abstractions/controllers/garbage-collection.html" -- "/docs/user-guide/garbage-collection/" -- "/docs/user-guide/garbage-collection.html" - --- {% capture overview %} @@ -70,15 +64,15 @@ metadata: When you delete an object, you can specify whether the object's dependents are also deleted automatically. Deleting dependents automatically is called *cascading -deletion*. There are two modes of *cascading deletion*: *background* and *foreground*. +deletion*. There are two modes of *cascading deletion*: *background* and *foreground*. If you delete an object without deleting its dependents -automatically, the dependents are said to be *orphaned*. +automatically, the dependents are said to be *orphaned*. ### Background cascading deletion -In *background cascading deletion*, Kubernetes deletes the owner object -immediately and the garbage collector then deletes the dependents in +In *background cascading deletion*, Kubernetes deletes the owner object +immediately and the garbage collector then deletes the dependents in the background. ### Foreground cascading deletion @@ -90,7 +84,7 @@ the following things are true: * The object is still visible via the REST API * The object's `deletionTimestamp` is set * The object's `metadata.finalizers` contains the value "foregroundDeletion". - + Once the "deletion in progress" state is set, the garbage collector deletes the object's dependents. Once the garbage collector has deleted all "blocking" dependents (objects with `ownerReference.blockOwnerDeletion=true`), it delete @@ -100,7 +94,7 @@ Note that in the "foregroundDeletion", only dependents with `ownerReference.blockOwnerDeletion` block the deletion of the owner object. Kubernetes version 1.7 will add an admission controller that controls user access to set `blockOwnerDeletion` to true based on delete permissions on the owner object, so that -unauthorized dependents cannot delay deletion of an owner object. +unauthorized dependents cannot delay deletion of an owner object. If an object's `ownerReferences` field is set by a controller (such as Deployment or ReplicaSet), blockOwnerDeletion is set automatically and you do not need to manually modify this field. diff --git a/docs/concepts/workloads/controllers/jobs-run-to-completion.md b/docs/concepts/workloads/controllers/jobs-run-to-completion.md index 8b225969a3..b9aaed4b49 100644 --- a/docs/concepts/workloads/controllers/jobs-run-to-completion.md +++ b/docs/concepts/workloads/controllers/jobs-run-to-completion.md @@ -3,11 +3,6 @@ assignees: - erictune - soltysh title: Jobs - Run to Completion -redirect_from: -- "/docs/concepts/jobs/run-to-completion-finite-workloads/" -- "/docs/concepts/jobs/run-to-completion-finite-workloads.html" -- "/docs/user-guide/jobs/" -- "/docs/user-guide/jobs.html" --- * TOC diff --git a/docs/concepts/workloads/controllers/petset.md b/docs/concepts/workloads/controllers/petset.md index 253a9639a4..00513c5973 100644 --- a/docs/concepts/workloads/controllers/petset.md +++ b/docs/concepts/workloads/controllers/petset.md @@ -8,13 +8,6 @@ assignees: - kow3ns - smarterclayton title: PetSets -redirect_from: -- "/docs/concepts/abstractions/controllers/petsets/" -- "/docs/concepts/abstractions/controllers/petsets.html" -- "/docs/user-guide/petset/bootstrapping/" -- "/docs/user-guide/petset/bootstrapping/index.html" -- "/docs/user-guide/petset/" -- "/docs/user-guide/petset.html" --- __Warning:__ Starting in Kubernetes version 1.5, PetSet has been renamed to [StatefulSet](/docs/concepts/abstractions/controllers/statefulsets). To use (or continue to use) PetSet in Kubernetes 1.5, you _must_ [migrate](/docs/tasks/manage-stateful-set/upgrade-pet-set-to-stateful-set/) your existing PetSets to StatefulSets. For information on working with StatefulSet, see the tutorial on [how to run replicated stateful applications](/docs/tutorials/stateful-application/run-replicated-stateful-application). diff --git a/docs/concepts/workloads/controllers/replicaset.md b/docs/concepts/workloads/controllers/replicaset.md index 0a48fa16aa..769902f0a6 100644 --- a/docs/concepts/workloads/controllers/replicaset.md +++ b/docs/concepts/workloads/controllers/replicaset.md @@ -4,9 +4,6 @@ assignees: - bprashanth - madhusudancs title: Replica Sets -redirect_from: -- "/docs/user-guide/replicasets/" -- "/docs/user-guide/replicasets.html" --- * TOC diff --git a/docs/concepts/workloads/controllers/replicationcontroller.md b/docs/concepts/workloads/controllers/replicationcontroller.md index 00effd9ee1..2c2e3bef56 100644 --- a/docs/concepts/workloads/controllers/replicationcontroller.md +++ b/docs/concepts/workloads/controllers/replicationcontroller.md @@ -3,9 +3,6 @@ assignees: - bprashanth - janetkuo title: Replication Controller -redirect_from: -- "/docs/user-guide/replication-controller/" -- "/docs/user-guide/replication-controller/index.html" --- * TOC @@ -194,7 +191,7 @@ Ideally, the rolling update controller would take application readiness into acc The two ReplicationControllers would need to create pods with at least one differentiating label, such as the image tag of the primary container of the pod, since it is typically image updates that motivate rolling updates. Rolling update is implemented in the client tool -[`kubectl rolling-update`](/docs/user-guide/kubectl/{{page.version}}/#rolling-update). Visit [`kubectl rolling-update` task](/docs/tasks/run-application/rolling-update-replication-controller/) for more concrete examples. +[`kubectl rolling-update`](/docs/user-guide/kubectl/{{page.version}}/#rolling-update). Visit [`kubectl rolling-update` task](/docs/tasks/run-application/rolling-update-replication-controller/) for more concrete examples. ### Multiple release tracks @@ -240,7 +237,7 @@ Note that we recommend using Deployments instead of directly using Replica Sets, ### Deployment (Recommended) [`Deployment`](/docs/concepts/workloads/controllers/deployment/) is a higher-level API object that updates its underlying Replica Sets and their Pods -in a similar fashion as `kubectl rolling-update`. Deployments are recommended if you want this rolling update functionality, +in a similar fashion as `kubectl rolling-update`. Deployments are recommended if you want this rolling update functionality, because unlike `kubectl rolling-update`, they are declarative, server-side, and have additional features. ### Bare Pods diff --git a/docs/concepts/workloads/controllers/statefulset.md b/docs/concepts/workloads/controllers/statefulset.md index fa58f58b77..f57278ae33 100644 --- a/docs/concepts/workloads/controllers/statefulset.md +++ b/docs/concepts/workloads/controllers/statefulset.md @@ -7,14 +7,11 @@ assignees: - kow3ns - smarterclayton title: StatefulSets -redirect_from: -- "/docs/concepts/abstractions/controllers/statefulsets/" -- "/docs/concepts/abstractions/controllers/statefulsets.html" --- {% capture overview %} -**StatefulSets are a beta feature in 1.7. This feature replaces the -PetSets feature from 1.4. Users of PetSets are referred to the 1.5 +**StatefulSets are a beta feature in 1.7. This feature replaces the +PetSets feature from 1.4. Users of PetSets are referred to the 1.5 [Upgrade Guide](/docs/tasks/manage-stateful-set/upgrade-pet-set-to-stateful-set/) for further information on how to upgrade existing PetSets to StatefulSets.** @@ -26,7 +23,7 @@ guarantees about the ordering of deployment and scaling. ## Using StatefulSets -StatefulSets are valuable for applications that require one or more of the +StatefulSets are valuable for applications that require one or more of the following. * Stable, unique network identifiers. @@ -36,10 +33,10 @@ following. * Ordered, automated rolling updates. In the above, stable is synonymous with persistence across Pod (re)scheduling. -If an application doesn't require any stable identifiers or ordered deployment, -deletion, or scaling, you should deploy your application with a controller that -provides a set of stateless replicas. Controllers such as -[Deployment](/docs/concepts/workloads/controllers/deployment/) or +If an application doesn't require any stable identifiers or ordered deployment, +deletion, or scaling, you should deploy your application with a controller that +provides a set of stateless replicas. Controllers such as +[Deployment](/docs/concepts/workloads/controllers/deployment/) or [ReplicaSet](/docs/concepts/workloads/controllers/replicaset/) may be better suited to your stateless needs. ## Limitations @@ -50,11 +47,11 @@ provides a set of stateless replicas. Controllers such as * StatefulSets currently require a [Headless Service](/docs/concepts/services-networking/service/#headless-services) to be responsible for the network identity of the Pods. You are responsible for creating this Service. ## Components -The example below demonstrates the components of a StatefulSet. +The example below demonstrates the components of a StatefulSet. -* A Headless Service, named nginx, is used to control the network domain. +* A Headless Service, named nginx, is used to control the network domain. * The StatefulSet, named web, has a Spec that indicates that 3 replicas of the nginx container will be launched in unique Pods. -* The volumeClaimTemplates will provide stable storage using [PersistentVolumes](/docs/concepts/storage/volumes/) provisioned by a +* The volumeClaimTemplates will provide stable storage using [PersistentVolumes](/docs/concepts/storage/volumes/) provisioned by a PersistentVolume Provisioner. ```yaml @@ -107,30 +104,30 @@ spec: ``` ## Pod Identity -StatefulSet Pods have a unique identity that is comprised of an ordinal, a -stable network identity, and stable storage. The identity sticks to the Pod, +StatefulSet Pods have a unique identity that is comprised of an ordinal, a +stable network identity, and stable storage. The identity sticks to the Pod, regardless of which node it's (re)scheduled on. ### Ordinal Index -For a StatefulSet with N replicas, each Pod in the StatefulSet will be -assigned an integer ordinal, in the range [0,N), that is unique over the Set. +For a StatefulSet with N replicas, each Pod in the StatefulSet will be +assigned an integer ordinal, in the range [0,N), that is unique over the Set. ### Stable Network ID -Each Pod in a StatefulSet derives its hostname from the name of the StatefulSet -and the ordinal of the Pod. The pattern for the constructed hostname -is `$(statefulset name)-$(ordinal)`. The example above will create three Pods +Each Pod in a StatefulSet derives its hostname from the name of the StatefulSet +and the ordinal of the Pod. The pattern for the constructed hostname +is `$(statefulset name)-$(ordinal)`. The example above will create three Pods named `web-0,web-1,web-2`. A StatefulSet can use a [Headless Service](/docs/concepts/services-networking/service/#headless-services) -to control the domain of its Pods. The domain managed by this Service takes the form: -`$(service name).$(namespace).svc.cluster.local`, where "cluster.local" -is the [cluster domain](http://releases.k8s.io/{{page.githubbranch}}/cluster/addons/dns/README.md). -As each Pod is created, it gets a matching DNS subdomain, taking the form: -`$(podname).$(governing service domain)`, where the governing service is defined +to control the domain of its Pods. The domain managed by this Service takes the form: +`$(service name).$(namespace).svc.cluster.local`, where "cluster.local" +is the [cluster domain](http://releases.k8s.io/{{page.githubbranch}}/cluster/addons/dns/README.md). +As each Pod is created, it gets a matching DNS subdomain, taking the form: +`$(podname).$(governing service domain)`, where the governing service is defined by the `serviceName` field on the StatefulSet. -Here are some examples of choices for Cluster Domain, Service name, +Here are some examples of choices for Cluster Domain, Service name, StatefulSet name, and how that affects the DNS names for the StatefulSet's Pods. Cluster Domain | Service (ns/name) | StatefulSet (ns/name) | StatefulSet Domain | Pod DNS | Pod Hostname | @@ -139,96 +136,96 @@ Cluster Domain | Service (ns/name) | StatefulSet (ns/name) | StatefulSet Domain cluster.local | foo/nginx | foo/web | nginx.foo.svc.cluster.local | web-{0..N-1}.nginx.foo.svc.cluster.local | web-{0..N-1} | kube.local | foo/nginx | foo/web | nginx.foo.svc.kube.local | web-{0..N-1}.nginx.foo.svc.kube.local | web-{0..N-1} | -Note that Cluster Domain will be set to `cluster.local` unless +Note that Cluster Domain will be set to `cluster.local` unless [otherwise configured](http://releases.k8s.io/{{page.githubbranch}}/cluster/addons/dns/README.md). ### Stable Storage -Kubernetes creates one [PersistentVolume](/docs/concepts/storage/volumes/) for each -VolumeClaimTemplate. In the nginx example above, each Pod will receive a single PersistentVolume -with a storage class of `anything` and 1 Gib of provisioned storage. When a Pod is (re)scheduled -onto a node, its `volumeMounts` mount the PersistentVolumes associated with its -PersistentVolume Claims. Note that, the PersistentVolumes associated with the -Pods' PersistentVolume Claims are not deleted when the Pods, or StatefulSet are deleted. +Kubernetes creates one [PersistentVolume](/docs/concepts/storage/volumes/) for each +VolumeClaimTemplate. In the nginx example above, each Pod will receive a single PersistentVolume +with a storage class of `anything` and 1 Gib of provisioned storage. When a Pod is (re)scheduled +onto a node, its `volumeMounts` mount the PersistentVolumes associated with its +PersistentVolume Claims. Note that, the PersistentVolumes associated with the +Pods' PersistentVolume Claims are not deleted when the Pods, or StatefulSet are deleted. This must be done manually. ## Deployment and Scaling Guarantees -* For a StatefulSet with N replicas, when Pods are being deployed, they are created sequentially, in order from {0..N-1}. +* For a StatefulSet with N replicas, when Pods are being deployed, they are created sequentially, in order from {0..N-1}. * When Pods are being deleted, they are terminated in reverse order, from {N-1..0}. -* Before a scaling operation is applied to a Pod, all of its predecessors must be Running and Ready. +* Before a scaling operation is applied to a Pod, all of its predecessors must be Running and Ready. * Before a Pod is terminated, all of its successors must be completely shutdown. The StatefulSet should not specify a `pod.Spec.TerminationGracePeriodSeconds` of 0. This practice is unsafe and strongly discouraged. For further explanation, please refer to [force deleting StatefulSet Pods](/docs/tasks/run-application/force-delete-stateful-set-pod/). -When the nginx example above is created, three Pods will be deployed in the order -web-0, web-1, web-2. web-1 will not be deployed before web-0 is -[Running and Ready](/docs/user-guide/pod-states), and web-2 will not be deployed until -web-1 is Running and Ready. If web-0 should fail, after web-1 is Running and Ready, but before -web-2 is launched, web-2 will not be launched until web-0 is successfully relaunched and -becomes Running and Ready. +When the nginx example above is created, three Pods will be deployed in the order +web-0, web-1, web-2. web-1 will not be deployed before web-0 is +[Running and Ready](/docs/user-guide/pod-states), and web-2 will not be deployed until +web-1 is Running and Ready. If web-0 should fail, after web-1 is Running and Ready, but before +web-2 is launched, web-2 will not be launched until web-0 is successfully relaunched and +becomes Running and Ready. If a user were to scale the deployed example by patching the StatefulSet such that -`replicas=1`, web-2 would be terminated first. web-1 would not be terminated until web-2 -is fully shutdown and deleted. If web-0 were to fail after web-2 has been terminated and -is completely shutdown, but prior to web-1's termination, web-1 would not be terminated +`replicas=1`, web-2 would be terminated first. web-1 would not be terminated until web-2 +is fully shutdown and deleted. If web-0 were to fail after web-2 has been terminated and +is completely shutdown, but prior to web-1's termination, web-1 would not be terminated until web-0 is Running and Ready. ### Pod Management Policies -In Kubernetes 1.7 and later, StatefulSet allows you to relax its ordering guarantees while +In Kubernetes 1.7 and later, StatefulSet allows you to relax its ordering guarantees while preserving its uniqueness and identity guarantees via its `.spec.podManagementPolicy` field. #### OrderedReady Pod Management -`OrderedReady` pod management is the default for StatefulSets. It implements the behavior +`OrderedReady` pod management is the default for StatefulSets. It implements the behavior described [above](#deployment-and-scaling-guarantees). #### Parallel Pod Management -`Parallel` pod management tells the StatefulSet controller to launch or -terminate all Pods in parallel, and to not wait for Pods to become Running -and Ready or completely terminated prior to launching or terminating another +`Parallel` pod management tells the StatefulSet controller to launch or +terminate all Pods in parallel, and to not wait for Pods to become Running +and Ready or completely terminated prior to launching or terminating another Pod. ## Update Strategies -In Kuberentes 1.7 and later, StatefulSet's `.spec.updateStrategy` field allows you to configure -and disable automated rolling updates for containers, labels, resource request/limits, and +In Kuberentes 1.7 and later, StatefulSet's `.spec.updateStrategy` field allows you to configure +and disable automated rolling updates for containers, labels, resource request/limits, and annotations for the Pods in a StatefulSet. ### On Delete -The `OnDelete` update strategy implements the legacy (1.6 and prior) behavior. It is the default -strategy when `spec.updateStrategy` is left unspecified. When a StatefulSet's -`.spec.updateStrategy.type` is set to `OnDelete`, the StatefulSet controller will not automatically -update the Pods in a StatefulSet. Users must manually delete Pods to cause the controller to +The `OnDelete` update strategy implements the legacy (1.6 and prior) behavior. It is the default +strategy when `spec.updateStrategy` is left unspecified. When a StatefulSet's +`.spec.updateStrategy.type` is set to `OnDelete`, the StatefulSet controller will not automatically +update the Pods in a StatefulSet. Users must manually delete Pods to cause the controller to create new Pods that reflect modifications made to a StatefulSet's `.spec.template`. ### Rolling Updates -The `RollingUpdate` update strategy implements automated, rolling update for the Pods in a -StatefulSet. When a StatefulSet's `.spec.updateStrategy.type` is set to `RollingUpdate`, the -StatefulSet controller will delete and recreate each Pod in the StatefulSet. It will proceed -in the same order as Pod termination (from the largest ordinal to the smallest), updating -each Pod one at a time. It will wait until an updated Pod is Running and Ready prior to +The `RollingUpdate` update strategy implements automated, rolling update for the Pods in a +StatefulSet. When a StatefulSet's `.spec.updateStrategy.type` is set to `RollingUpdate`, the +StatefulSet controller will delete and recreate each Pod in the StatefulSet. It will proceed +in the same order as Pod termination (from the largest ordinal to the smallest), updating +each Pod one at a time. It will wait until an updated Pod is Running and Ready prior to updating its predecessor. #### Partitions -The `RollingUpdate` update strategy can be partitioned, by specifying a -`.spec.updateStrategy.rollingUpdate.partition`. If a partition is specified, all Pods with an -ordinal that is greater than or equal to the partition will be updated when the StatefulSet's -`.spec.template` is updated. All Pods with an ordinal that is less than the partition will not -be updated, and, even if they are deleted, they will be recreated at the previous version. If a -StatefulSet's `.spec.updateStrategy.rollingUpdate.partition` is greater than its `.spec.replicas`, +The `RollingUpdate` update strategy can be partitioned, by specifying a +`.spec.updateStrategy.rollingUpdate.partition`. If a partition is specified, all Pods with an +ordinal that is greater than or equal to the partition will be updated when the StatefulSet's +`.spec.template` is updated. All Pods with an ordinal that is less than the partition will not +be updated, and, even if they are deleted, they will be recreated at the previous version. If a +StatefulSet's `.spec.updateStrategy.rollingUpdate.partition` is greater than its `.spec.replicas`, updates to its `.spec.template` will not be propagated to its Pods. -In most cases you will not need to use a partition, but they are useful if you want to stage an +In most cases you will not need to use a partition, but they are useful if you want to stage an update, roll out a canary, or perform a phased roll out. {% endcapture %} {% capture whatsnext %} -* Follow an example of [deploying a stateful application](/docs/tutorials/stateful-application/basic-stateful-set). +* Follow an example of [deploying a stateful application](/docs/tutorials/stateful-application/basic-stateful-set). {% endcapture %} {% include templates/concept.md %} diff --git a/docs/concepts/workloads/pods/disruptions.md b/docs/concepts/workloads/pods/disruptions.md index 11d6efd6e8..757834288b 100644 --- a/docs/concepts/workloads/pods/disruptions.md +++ b/docs/concepts/workloads/pods/disruptions.md @@ -4,11 +4,6 @@ assignees: - foxish - davidopp title: Disruptions -redirect_from: -- "/docs/admin/disruptions/" -- "/docs/admin/disruptions.html" -- "/docs/tasks/configure-pod-container/configure-pod-disruption-budget/" -- "/docs/tasks/administer-cluster/configure-pod-disruption-budget/" --- {% capture overview %} diff --git a/docs/concepts/workloads/pods/init-containers.md b/docs/concepts/workloads/pods/init-containers.md index f18583fd13..6cac2b0532 100644 --- a/docs/concepts/workloads/pods/init-containers.md +++ b/docs/concepts/workloads/pods/init-containers.md @@ -2,11 +2,6 @@ assignees: - erictune title: Init Containers -redirect_from: -- "/docs/concepts/abstractions/init-containers/" -- "/docs/concepts/abstractions/init-containers.html" -- "/docs/user-guide/pods/init-container/" -- "/docs/user-guide/pods/init-container.html" --- {% capture overview %} diff --git a/docs/concepts/workloads/pods/pod-lifecycle.md b/docs/concepts/workloads/pods/pod-lifecycle.md index 9b7400e6f0..01986e993f 100644 --- a/docs/concepts/workloads/pods/pod-lifecycle.md +++ b/docs/concepts/workloads/pods/pod-lifecycle.md @@ -1,8 +1,5 @@ --- title: Pod Lifecycle -redirect_from: -- "/docs/user-guide/pod-states/" -- "/docs/user-guide/pod-states.html" --- {% capture overview %} diff --git a/docs/concepts/workloads/pods/pod-overview.md b/docs/concepts/workloads/pods/pod-overview.md index 73bc80c17c..7256868552 100644 --- a/docs/concepts/workloads/pods/pod-overview.md +++ b/docs/concepts/workloads/pods/pod-overview.md @@ -2,11 +2,6 @@ assignees: - erictune title: Pod Overview -redirect_from: -- "/docs/concepts/abstractions/pod/" -- "/docs/concepts/abstractions/pod.html" -- "/docs/user-guide/pod-templates/" -- "/docs/user-guide/pod-templates.html" --- {% capture overview %} @@ -64,7 +59,7 @@ Pods do not, by themselves, self-heal. If a Pod is scheduled to a Node that fail ### Pods and Controllers -A Controller can create and manage multiple Pods for you, handling replication and rollout and providing self-healing capabilities at cluster scope. For example, if a Node fails, the Controller might automatically replace the Pod by scheduling an identical replacement on a different Node. +A Controller can create and manage multiple Pods for you, handling replication and rollout and providing self-healing capabilities at cluster scope. For example, if a Node fails, the Controller might automatically replace the Pod by scheduling an identical replacement on a different Node. Some examples of Controllers that contain one or more pods include: diff --git a/docs/concepts/workloads/pods/pod.md b/docs/concepts/workloads/pods/pod.md index f49d4b12b2..ac20c50175 100644 --- a/docs/concepts/workloads/pods/pod.md +++ b/docs/concepts/workloads/pods/pod.md @@ -1,9 +1,6 @@ --- assignees: title: Pods -redirect_from: -- "/docs/user-guide/pods/index/" -- "/docs/user-guide/pods/index.html" --- * TOC diff --git a/docs/getting-started-guides/ubuntu/index.md b/docs/getting-started-guides/ubuntu/index.md index ba50e5dd81..e732c75d0a 100644 --- a/docs/getting-started-guides/ubuntu/index.md +++ b/docs/getting-started-guides/ubuntu/index.md @@ -1,12 +1,9 @@ --- title: Kubernetes on Ubuntu -redirect_from: -- "/docs/getting-started-guides/ubuntu/calico/" -- "/docs/getting-started-guides/ubuntu/calico.html" --- {% capture overview %} -There are multiple ways to run a Kubernetes cluster with Ubuntu. These pages explain how to deploy Kubernetes on Ubuntu on multiple public and private clouds, as well as bare metal. +There are multiple ways to run a Kubernetes cluster with Ubuntu. These pages explain how to deploy Kubernetes on Ubuntu on multiple public and private clouds, as well as bare metal. {% endcapture %} {% capture body %} @@ -20,7 +17,7 @@ Supports AWS, GCE, Azure, Joyent, OpenStack, VMWare, Bare Metal and localhost de [conjure-up](http://conjure-up.io/) provides the quickest way to deploy Kubernetes on Ubuntu for multiple clouds and bare metal. It provides a user-friendly UI that prompts you for cloud credentials and configuration options -Available for Ubuntu 16.04 and newer: +Available for Ubuntu 16.04 and newer: ``` sudo snap install conjure-up --classic @@ -37,7 +34,7 @@ conjure-up kubernetes ### Operational Guides -These are more in-depth guides for users choosing to run Kubernetes in production: +These are more in-depth guides for users choosing to run Kubernetes in production: - [Installation](/docs/getting-started-guides/ubuntu/installation) - [Validation](/docs/getting-started-guides/ubuntu/validation) diff --git a/docs/home/contribute/page-templates.md b/docs/home/contribute/page-templates.md index ec9369af30..726969d2a4 100644 --- a/docs/home/contribute/page-templates.md +++ b/docs/home/contribute/page-templates.md @@ -1,7 +1,4 @@ --- -redirect_from: -- "/docs/templatedemos/" -- "/docs/templatedemos.html" title: Using Page Templates --- diff --git a/docs/home/index.md b/docs/home/index.md index ea6b927032..89266aaa91 100644 --- a/docs/home/index.md +++ b/docs/home/index.md @@ -3,11 +3,6 @@ assignees: - bgrant0607 - thockin title: Kubernetes Documentation -redirect_from: -- "/docs/" -- "/docs/index.html" -- "/docs/user-guide/" -- "/docs/user-guide/index.html" --- Kubernetes documentation can help you set up Kubernetes, learn about the system, or get your applications and workloads running on Kubernetes. To learn the basics of what Kubernetes is and how it works, read "[What is Kubernetes](/docs/concepts/overview/what-is-kubernetes/)". diff --git a/docs/reference/federation/index.md b/docs/reference/federation/index.md index 8cfd620d4b..81ace6743a 100644 --- a/docs/reference/federation/index.md +++ b/docs/reference/federation/index.md @@ -1,8 +1,5 @@ --- title: Federation API Reference -redirect_from: -- "/docs/federation/api-reference/" -- "/docs/federation/api-reference/index.md" --- # API Reference diff --git a/docs/setup/independent/create-cluster-kubeadm.md b/docs/setup/independent/create-cluster-kubeadm.md index 10b84d3347..f57a483a38 100644 --- a/docs/setup/independent/create-cluster-kubeadm.md +++ b/docs/setup/independent/create-cluster-kubeadm.md @@ -5,9 +5,6 @@ assignees: - errordeveloper - jbeda title: Using kubeadm to Create a Cluster -redirect_from: -- "/docs/getting-started-guides/kubeadm/" -- "/docs/getting-started-guides/kubeadm.html" --- {% capture overview %} @@ -359,7 +356,7 @@ kubectl --kubeconfig ./admin.conf get nodes **Note:** If you are using GCE, instances disable ssh access for root by default. If that's the case you can log in to the machine, copy the file someplace that -can be accessed and then use +can be accessed and then use [`gcloud compute copy-files`](https://cloud.google.com/sdk/gcloud/reference/compute/copy-files) ### (Optional) Proxying API Server to localhost diff --git a/docs/setup/pick-right-solution.md b/docs/setup/pick-right-solution.md index 2e7d66a856..2a25d144c3 100644 --- a/docs/setup/pick-right-solution.md +++ b/docs/setup/pick-right-solution.md @@ -4,9 +4,6 @@ assignees: - erictune - mikedanese title: Picking the Right Solution -redirect_from: -- "/docs/getting-started-guides/index/" -- "/docs/getting-started-guides/index.html" --- Kubernetes can run on various platforms: from your laptop, to VMs on a cloud provider, to rack of diff --git a/docs/tasks/access-application-cluster/access-cluster.md b/docs/tasks/access-application-cluster/access-cluster.md index e4b8b26a60..5d7d0a7275 100644 --- a/docs/tasks/access-application-cluster/access-cluster.md +++ b/docs/tasks/access-application-cluster/access-cluster.md @@ -1,10 +1,5 @@ --- title: Accessing Clusters -redirect_from: -- "/docs/user-guide/accessing-the-cluster/" -- "/docs/user-guide/accessing-the-cluster.html" -- "/docs/concepts/cluster-administration/access-cluster/" -- "/docs/concepts/cluster-administration/access-cluster.html" --- * TOC diff --git a/docs/tasks/access-application-cluster/authenticate-across-clusters-kubeconfig.md b/docs/tasks/access-application-cluster/authenticate-across-clusters-kubeconfig.md index 99050a6204..82a7fe3c89 100644 --- a/docs/tasks/access-application-cluster/authenticate-across-clusters-kubeconfig.md +++ b/docs/tasks/access-application-cluster/authenticate-across-clusters-kubeconfig.md @@ -3,11 +3,6 @@ assignees: - mikedanese - thockin title: Authenticate Across Clusters with kubeconfig -redirect_from: -- "/docs/user-guide/kubeconfig-file/" -- "/docs/user-guide/kubeconfig-file.html" -- "/docs/concepts/cluster-administration/authenticate-across-clusters-kubeconfig/" -- "/docs/concepts/cluster-administration/authenticate-across-clusters-kubeconfig.html" --- Authentication in Kubernetes can differ for different individuals. diff --git a/docs/tasks/access-application-cluster/communicate-containers-same-pod-shared-volume.md b/docs/tasks/access-application-cluster/communicate-containers-same-pod-shared-volume.md index 3350b90536..2edb8e379b 100644 --- a/docs/tasks/access-application-cluster/communicate-containers-same-pod-shared-volume.md +++ b/docs/tasks/access-application-cluster/communicate-containers-same-pod-shared-volume.md @@ -1,10 +1,5 @@ --- title: Communicate Between Containers in the Same Pod Using a Shared Volume -redirect_from: -- "/docs/user-guide/pods/multi-container/" -- "/docs/user-guide/pods/multi-container.html" -- "docs/tasks/configure-pod-container/communicate-containers-same-pod/" -- "docs/tasks/configure-pod-container/communicate-containers-same-pod.html" --- {% capture overview %} diff --git a/docs/tasks/access-application-cluster/configure-cloud-provider-firewall.md b/docs/tasks/access-application-cluster/configure-cloud-provider-firewall.md index 042e49f16a..2941e3cb2b 100644 --- a/docs/tasks/access-application-cluster/configure-cloud-provider-firewall.md +++ b/docs/tasks/access-application-cluster/configure-cloud-provider-firewall.md @@ -3,9 +3,6 @@ assignees: - bprashanth - davidopp title: Configure Your Cloud Provider's Firewalls -redirect_from: -- "/docs/user-guide/services-firewalls/" -- "/docs/user-guide/services-firewalls.html" --- Many cloud providers (e.g. Google Compute Engine) define firewalls that help prevent inadvertent diff --git a/docs/tasks/access-application-cluster/connecting-frontend-backend.md b/docs/tasks/access-application-cluster/connecting-frontend-backend.md index 8f9c6046f4..76507c6f4b 100644 --- a/docs/tasks/access-application-cluster/connecting-frontend-backend.md +++ b/docs/tasks/access-application-cluster/connecting-frontend-backend.md @@ -1,10 +1,5 @@ --- title: Connect a Front End to a Back End Using a Service -redirect_from: -- "/docs/user-guide/services/operations/" -- "/docs/user-guide/services/operations.html" -- "/docs/tutorials/connecting-apps/connecting-frontend-backend/" -- "/docs/tutorials/connecting-apps/connecting-frontend-backend.html" --- {% capture overview %} diff --git a/docs/tasks/access-application-cluster/create-external-load-balancer.md b/docs/tasks/access-application-cluster/create-external-load-balancer.md index 1e197365b1..29533f315c 100644 --- a/docs/tasks/access-application-cluster/create-external-load-balancer.md +++ b/docs/tasks/access-application-cluster/create-external-load-balancer.md @@ -1,8 +1,5 @@ --- title: Create an External Load Balancer -redirect_from: -- "/docs/user-guide/load-balancer/" -- "/docs/user-guide/load-balancer.html" --- diff --git a/docs/tasks/access-application-cluster/list-all-running-container-images.md b/docs/tasks/access-application-cluster/list-all-running-container-images.md index 3cb33864e2..922e5c62d6 100644 --- a/docs/tasks/access-application-cluster/list-all-running-container-images.md +++ b/docs/tasks/access-application-cluster/list-all-running-container-images.md @@ -1,8 +1,5 @@ --- title: List All Container Images Running in a Cluster -redirect_from: -- "/docs/tasks/kubectl/list-all-running-container-images/" -- "/docs/tasks/kubectl/list-all-running-container-images.html" --- {% capture overview %} diff --git a/docs/tasks/access-application-cluster/port-forward-access-application-cluster.md b/docs/tasks/access-application-cluster/port-forward-access-application-cluster.md index 875c3cc78e..66ae046f3d 100644 --- a/docs/tasks/access-application-cluster/port-forward-access-application-cluster.md +++ b/docs/tasks/access-application-cluster/port-forward-access-application-cluster.md @@ -1,8 +1,5 @@ --- title: Use Port Forwarding to Access Applications in a Cluster -redirect_from: -- "/docs/user-guide/connecting-to-applications-port-forward/" -- "/docs/user-guide/connecting-to-applications-port-forward.html" --- {% capture overview %} diff --git a/docs/tasks/access-application-cluster/service-access-application-cluster.md b/docs/tasks/access-application-cluster/service-access-application-cluster.md index b32cfe22b3..a5ccaca4dc 100644 --- a/docs/tasks/access-application-cluster/service-access-application-cluster.md +++ b/docs/tasks/access-application-cluster/service-access-application-cluster.md @@ -1,10 +1,5 @@ --- title: Use a Service to Access an Application in a Cluster -redirect_from: -- "/docs/user-guide/quick-start/" -- "/docs/user-guide/quick-start.html" -- "/docs/tutorials/stateless-application/expose-external-ip-address-service/" -- "/docs/tutorials/stateless-application/expose-external-ip-address-service.html" --- {% capture overview %} diff --git a/docs/tasks/access-application-cluster/web-ui-dashboard.md b/docs/tasks/access-application-cluster/web-ui-dashboard.md index 4b541f9ad7..53b8cbe748 100644 --- a/docs/tasks/access-application-cluster/web-ui-dashboard.md +++ b/docs/tasks/access-application-cluster/web-ui-dashboard.md @@ -4,11 +4,6 @@ assignees: - mikedanese - rf232 title: Web UI (Dashboard) -redirect_from: -- "/docs/user-guide/ui/" -- "/docs/user-guide/ui.html" -- "/docs/tasks/web-ui-dashboard/" -- "/docs/tasks/web-ui-dashboard.html" --- Dashboard is a web-based Kubernetes user interface. You can use Dashboard to deploy containerized applications to a Kubernetes cluster, troubleshoot your containerized application, and manage the cluster itself along with its attendant resources. You can use Dashboard to get an overview of applications running on your cluster, as well as for creating or modifying individual Kubernetes resources (such as Deployments, Jobs, DaemonSets, etc). For example, you can scale a Deployment, initiate a rolling update, restart a pod or deploy new applications using a deploy wizard. diff --git a/docs/tasks/access-kubernetes-api/extend-api-third-party-resource.md b/docs/tasks/access-kubernetes-api/extend-api-third-party-resource.md index e436d4929f..ddc033e571 100644 --- a/docs/tasks/access-kubernetes-api/extend-api-third-party-resource.md +++ b/docs/tasks/access-kubernetes-api/extend-api-third-party-resource.md @@ -3,11 +3,6 @@ assignees: - enisoc - IanLewis title: Extend the Kubernetes API with ThirdPartyResources -redirect_from: -- "/docs/user-guide/thirdpartyresources/" -- "/docs/user-guide/thirdpartyresources.html" -- "/docs/concepts/ecosystem/thirdpartyresource/" -- "/docs/concepts/ecosystem/thirdpartyresource.html" --- {% assign for_k8s_version="1.7" %}{% include feature-state-deprecated.md %} diff --git a/docs/tasks/access-kubernetes-api/http-proxy-access-api.md b/docs/tasks/access-kubernetes-api/http-proxy-access-api.md index 9e78cd052d..9818a02968 100644 --- a/docs/tasks/access-kubernetes-api/http-proxy-access-api.md +++ b/docs/tasks/access-kubernetes-api/http-proxy-access-api.md @@ -1,8 +1,5 @@ --- title: Use an HTTP Proxy to Access the Kubernetes API -redirect_from: -- "/docs/user-guide/connecting-to-applications-proxy/" -- "/docs/user-guide/connecting-to-applications-proxy.html" --- {% capture overview %} diff --git a/docs/tasks/administer-cluster/access-cluster-api.md b/docs/tasks/administer-cluster/access-cluster-api.md index 2d07d05c54..c96524252a 100644 --- a/docs/tasks/administer-cluster/access-cluster-api.md +++ b/docs/tasks/administer-cluster/access-cluster-api.md @@ -1,9 +1,5 @@ --- title: Access Clusters Using the Kubernetes API -redirect_from: -- "/docs/user-guide/accessing-the-cluster/" -- "/docs/user-guide/accessing-the-cluster.html" -- "/docs/concepts/cluster-administration/access-cluster/" --- {% capture overview %} @@ -43,10 +39,10 @@ kubectl. Complete documentation is found in the [kubectl manual](/docs/user-gui Kubectl handles locating and authenticating to the apiserver. If you want to directly access the REST API with an http client like `curl` or `wget`, or a browser, there are multiple ways you can locate and authenticate against the apiserver: - 1. Run kubectl in proxy mode (recommended). This method is recommended, since it uses the stored apiserver location abd verifies the identity of the apiserver using a self-signed cert. No Man-in-the-middle (MITM) attack is possible using this method . + 1. Run kubectl in proxy mode (recommended). This method is recommended, since it uses the stored apiserver location abd verifies the identity of the apiserver using a self-signed cert. No Man-in-the-middle (MITM) attack is possible using this method . 1. Alternatively, you can provide the location and credentials directly to the http client. This works with for client code that is confused by proxies. To protect against man in the middle attacks, you'll need to import a root cert into your browser. - - Using the Go or Python client libraries provides accessing kubectl in proxy mode. + + Using the Go or Python client libraries provides accessing kubectl in proxy mode. #### Using kubectl proxy diff --git a/docs/tasks/administer-cluster/access-cluster-services.md b/docs/tasks/administer-cluster/access-cluster-services.md index db0d9e5253..0630274bc3 100644 --- a/docs/tasks/administer-cluster/access-cluster-services.md +++ b/docs/tasks/administer-cluster/access-cluster-services.md @@ -1,12 +1,9 @@ --- title: Access Services Running on Clusters -redirect_from: -- "/docs/user-guide/accessing-the-cluster/" -- "/docs/user-guide/accessing-the-cluster.html" --- {% capture overview %} -This page shows how to connect to services running on the Kubernetes cluster. +This page shows how to connect to services running on the Kubernetes cluster. {% endcapture %} {% capture prerequisites %} diff --git a/docs/tasks/administer-cluster/apply-resource-quota-limit.md b/docs/tasks/administer-cluster/apply-resource-quota-limit.md index 5183797a0c..a1ee37b3ea 100644 --- a/docs/tasks/administer-cluster/apply-resource-quota-limit.md +++ b/docs/tasks/administer-cluster/apply-resource-quota-limit.md @@ -3,11 +3,6 @@ assignees: - derekwaynecarr - janetkuo title: Apply Resource Quotas and Limits -redirect_from: -- "/docs/admin/resourcequota/walkthrough/" -- "/docs/admin/resourcequota/walkthrough.html" -- "/docs/tasks/configure-pod-container/apply-resource-quota-limit/" -- "/docs/tasks/configure-pod-container/apply-resource-quota-limit.html" --- {% capture overview %} @@ -359,7 +354,7 @@ the 2 pods we created in the `not-best-effort-nginx` quota. Scopes provide a mechanism to subdivide the set of resources that are tracked by any quota document to allow greater flexibility in how operators deploy and track resource -consumption. +consumption. In addition to `BestEffort` and `NotBestEffort` scopes, there are scopes to restrict long-running versus time-bound pods. The `Terminating` scope will match any pod diff --git a/docs/tasks/administer-cluster/calico-network-policy.md b/docs/tasks/administer-cluster/calico-network-policy.md index b513c547c9..ef4903b765 100644 --- a/docs/tasks/administer-cluster/calico-network-policy.md +++ b/docs/tasks/administer-cluster/calico-network-policy.md @@ -2,11 +2,6 @@ assignees: - caseydavenport title: Use Calico for NetworkPolicy -redirect_from: -- "/docs/getting-started-guides/network-policy/calico/" -- "/docs/getting-started-guides/network-policy/calico.html" -- "/docs/tasks/configure-pod-container/calico-network-policy/" -- "/docs/tasks/configure-pod-container/calico-network-policy.html" --- {% capture overview %} @@ -14,7 +9,7 @@ This page shows how to use Calico for NetworkPolicy. {% endcapture %} {% capture prerequisites %} -* Install Calico for Kubernetes. +* Install Calico for Kubernetes. {% endcapture %} {% capture steps %} @@ -34,7 +29,7 @@ See the [Calico documentation](http://docs.projectcalico.org/) for more options {% capture discussion %} ## Understanding Calico components -Deploying a cluster with Calico adds Pods that support Kubernetes NetworkPolicy. These Pods run in the `kube-system` Namespace. +Deploying a cluster with Calico adds Pods that support Kubernetes NetworkPolicy. These Pods run in the `kube-system` Namespace. To see this list of Pods run: diff --git a/docs/tasks/administer-cluster/cluster-management.md b/docs/tasks/administer-cluster/cluster-management.md index da24a4a2b2..bfac133344 100644 --- a/docs/tasks/administer-cluster/cluster-management.md +++ b/docs/tasks/administer-cluster/cluster-management.md @@ -3,11 +3,6 @@ assignees: - lavalamp - thockin title: Cluster Management -redirect_from: -- "/docs/admin/cluster-management/" -- "/docs/admin/cluster-management.html" -- "/docs/concepts/cluster-administration/cluster-management/" -- "/docs/concepts/cluster-administration/cluster-management.html" --- * TOC diff --git a/docs/tasks/administer-cluster/configure-multiple-schedulers.md b/docs/tasks/administer-cluster/configure-multiple-schedulers.md index 1acff68bfd..6c7303fc06 100644 --- a/docs/tasks/administer-cluster/configure-multiple-schedulers.md +++ b/docs/tasks/administer-cluster/configure-multiple-schedulers.md @@ -3,11 +3,6 @@ assignees: - davidopp - madhusudancs title: Configure Multiple Schedulers -redirect_from: -- "/docs/admin/multiple-schedulers/" -- "/docs/admin/multiple-schedulers.html" -- "/docs/tutorials/clusters/multiple-schedulers/" -- "/docs/tutorials/clusters/multiple-schedulers.html" --- Kubernetes ships with a default scheduler that is described [here](/docs/admin/kube-scheduler/). diff --git a/docs/tasks/administer-cluster/configure-upgrade-etcd.md b/docs/tasks/administer-cluster/configure-upgrade-etcd.md index 142c3c7d6b..b80642072b 100644 --- a/docs/tasks/administer-cluster/configure-upgrade-etcd.md +++ b/docs/tasks/administer-cluster/configure-upgrade-etcd.md @@ -3,17 +3,6 @@ assignees: - mml - wojtek-t title: Operating etcd clusters for Kubernetes -redirect_from: -- "/docs/concepts/storage/etcd-store-api-object/" -- "/docs/concepts/storage/etcd-store-api-object.html" -- "/docs/admin/etcd/" -- "/docs/admin/etcd.html" -- "/docs/admin/etcd_upgrade/" -- "/docs/admin/etcd_upgrade.html" -- "/docs/concepts/cluster-administration/configure-etcd/" -- "/docs/concepts/cluster-administration/configure-etcd.html" -- "/docs/concepts/cluster-administration/etcd-upgrade/" -- "/docs/concepts/cluster-administration/etcd-upgrade.html" --- etcd is a strong, consistent, and highly-available key value store which Kubernetes uses for persistent storage of all of its API objects. This documentation provides specific instruction on operating, upgrading, and rolling back etcd clusters for Kubernetes. For in-depth information on etcd, see [etcd documentation](https://github.com/coreos/etcd/blob/master/Documentation/docs.md). diff --git a/docs/tasks/administer-cluster/cpu-memory-limit.md b/docs/tasks/administer-cluster/cpu-memory-limit.md index 41ef006437..c6207c9ed9 100644 --- a/docs/tasks/administer-cluster/cpu-memory-limit.md +++ b/docs/tasks/administer-cluster/cpu-memory-limit.md @@ -3,11 +3,6 @@ assignees: - derekwaynecarr - janetkuo title: Set Pod CPU and Memory Limits -redirect_from: -- "/docs/admin/limitrange/" -- "/docs/admin/limitrange/index.html" -- "/docs/tasks/configure-pod-container/limit-range/" -- "/docs/tasks/configure-pod-container/limit-range.html" --- {% capture overview %} @@ -39,7 +34,7 @@ $ kubectl create namespace limit-example namespace "limit-example" created ``` -Note that `kubectl` commands will print the type and name of the resource created or mutated, which can then be used in subsequent commands: +Note that `kubectl` commands will print the type and name of the resource created or mutated, which can then be used in subsequent commands: ```shell $ kubectl get namespaces @@ -112,7 +107,7 @@ NAME READY STATUS RESTARTS AGE nginx-2040093540-s8vzu 1/1 Running 0 11s ``` -Let's print this Pod with yaml output format (using `-o yaml` flag), and then `grep` the `resources` field. Note that your pod name will be different. +Let's print this Pod with yaml output format (using `-o yaml` flag), and then `grep` the `resources` field. Note that your pod name will be different. ```shell $ kubectl get pods nginx-2040093540-s8vzu --namespace=limit-example -o yaml | grep resources -C 8 @@ -151,7 +146,7 @@ $ kubectl create -f https://k8s.io/docs/tasks/configure-pod-container/valid-pod. pod "valid-pod" created ``` -Now look at the Pod's resources field: +Now look at the Pod's resources field: ```shell $ kubectl get pods valid-pod --namespace=limit-example -o yaml | grep -C 6 resources diff --git a/docs/tasks/administer-cluster/declare-network-policy.md b/docs/tasks/administer-cluster/declare-network-policy.md index c1fc7feaf2..26b357010a 100644 --- a/docs/tasks/administer-cluster/declare-network-policy.md +++ b/docs/tasks/administer-cluster/declare-network-policy.md @@ -3,14 +3,9 @@ assignees: - caseydavenport - danwinship title: Declare Network Policy -redirect_from: -- "/docs/getting-started-guides/network-policy/walkthrough/" -- "/docs/getting-started-guides/network-policy/walkthrough.html" -- "/docs/tasks/configure-pod-container/declare-network-policy/" -- "/docs/tasks/configure-pod-container/declare-network-policy.html" --- {% capture overview %} -This document helps you get started using using the Kubernetes [NetworkPolicy API](/docs/user-guide/network-policies) to declare network policies that govern how pods communicate with each other. +This document helps you get started using using the Kubernetes [NetworkPolicy API](/docs/user-guide/network-policies) to declare network policies that govern how pods communicate with each other. {% endcapture %} {% capture prerequisites %} @@ -28,16 +23,16 @@ You'll need to have a Kubernetes cluster in place, with network policy support. ## Create an `nginx` deployment and expose it via a service -To see how Kubernetes network policy works, start off by creating an `nginx` deployment and exposing it via a service. +To see how Kubernetes network policy works, start off by creating an `nginx` deployment and exposing it via a service. ```console $ kubectl run nginx --image=nginx --replicas=2 deployment "nginx" created -$ kubectl expose deployment nginx --port=80 +$ kubectl expose deployment nginx --port=80 service "nginx" exposed ``` -This runs two `nginx` pods in the default namespace, and exposes them through a service called `nginx`. +This runs two `nginx` pods in the default namespace, and exposes them through a service called `nginx`. ```console $ kubectl get svc,pod @@ -104,7 +99,7 @@ Waiting for pod default/busybox-472357175-y0m47 to be running, status is Pending Hit enter for command prompt -/ # wget --spider --timeout=1 nginx +/ # wget --spider --timeout=1 nginx Connecting to nginx (10.100.0.16:80) wget: download timed out / # diff --git a/docs/tasks/administer-cluster/guaranteed-scheduling-critical-addon-pods.md b/docs/tasks/administer-cluster/guaranteed-scheduling-critical-addon-pods.md index 77415f3414..d131066f01 100644 --- a/docs/tasks/administer-cluster/guaranteed-scheduling-critical-addon-pods.md +++ b/docs/tasks/administer-cluster/guaranteed-scheduling-critical-addon-pods.md @@ -4,11 +4,6 @@ assignees: - filipg - piosz title: Guaranteed Scheduling For Critical Add-On Pods -redirect_from: -- "/docs/admin/rescheduler/" -- "/docs/admin/rescheduler.html" -- "/docs/concepts/cluster-administration/guaranteed-scheduling-critical-addon-pods/" -- "/docs/concepts/cluster-administration/guaranteed-scheduling-critical-addon-pods.html" --- * TOC diff --git a/docs/tasks/administer-cluster/highly-available-master.md b/docs/tasks/administer-cluster/highly-available-master.md index 666c2606f3..4b50fddbaf 100644 --- a/docs/tasks/administer-cluster/highly-available-master.md +++ b/docs/tasks/administer-cluster/highly-available-master.md @@ -2,9 +2,6 @@ assignees: - jszczepkowski title: Set up High-Availability Kubernetes Masters -redirect_from: -- "/docs/admin/ha-master-gce/" -- "/docs/admin/ha-master-gce.html" --- * TOC @@ -65,7 +62,7 @@ You can remove a master replica from an HA cluster by using a `kube-down` script * `KUBE_DELETE_NODES=false` - to restrain deletion of kubelets. * `KUBE_GCE_ZONE=zone` - the zone from where master replica will be removed. - + * `KUBE_REPLICA_NAME=replica_name` - (optional) the name of master replica to remove. If empty: any replica from the given zone will be removed. @@ -105,7 +102,7 @@ A two-replica cluster is thus inferior, in terms of HA, to a single replica clus * When you add a master replica, cluster state (etcd) is copied to a new instance. If the cluster is large, it may take a long time to duplicate its state. -This operation may be sped up by migrating etcd data directory, as described [here](https://coreos.com/etcd/docs/latest/admin_guide.html#member-migration) +This operation may be sped up by migrating etcd data directory, as described [here](https://coreos.com/etcd/docs/latest/admin_guide.html#member-migration) (we are considering adding support for etcd data dir migration in future). ## Implementation notes diff --git a/docs/tasks/administer-cluster/kubeadm-upgrade-1-7.md b/docs/tasks/administer-cluster/kubeadm-upgrade-1-7.md index ffdcec348d..44c540028a 100644 --- a/docs/tasks/administer-cluster/kubeadm-upgrade-1-7.md +++ b/docs/tasks/administer-cluster/kubeadm-upgrade-1-7.md @@ -2,9 +2,6 @@ assignees: - pipejakob title: Upgrading kubeadm clusters from 1.6 to 1.7 -redirect_from: -- "/docs/admin/kubeadm-upgrade-1-7/" -- "/docs/admin/kubeadm-upgrade-1-7.html" --- {% capture overview %} @@ -95,4 +92,4 @@ You need to have a Kubernetes cluster running version 1.6.x. {% endcapture %} -{% include templates/task.md %} \ No newline at end of file +{% include templates/task.md %} diff --git a/docs/tasks/administer-cluster/limit-storage-consumption.md b/docs/tasks/administer-cluster/limit-storage-consumption.md index 2b402c8743..018ff996c3 100644 --- a/docs/tasks/administer-cluster/limit-storage-consumption.md +++ b/docs/tasks/administer-cluster/limit-storage-consumption.md @@ -1,16 +1,13 @@ --- title: Limit Storage Consumption -redirect_from: -- "/docs/admin/resourcequota/limitstorageconsumption/" -- "/docs/admin/resourcequota/limitstorageconsumption.html" --- {% capture overview %} This example demonstrates an easy way to limit the amount of storage consumed in a namespace. -The following resources are used in the demonstration: [ResourceQuota](/docs/concepts/policy/resource-quotas/), -[LimitRange](/docs/tasks/configure-pod-container/limit-range/), +The following resources are used in the demonstration: [ResourceQuota](/docs/concepts/policy/resource-quotas/), +[LimitRange](/docs/tasks/configure-pod-container/limit-range/), and [PersistentVolumeClaim](/docs/concepts/storage/persistent-volumes/). {% endcapture %} @@ -56,17 +53,17 @@ spec: storage: 1Gi ``` -Minimum storage requests are used when the underlying storage provider requires certain minimums. For example, -AWS EBS volumes have a 1Gi minimum requirement. +Minimum storage requests are used when the underlying storage provider requires certain minimums. For example, +AWS EBS volumes have a 1Gi minimum requirement. ## StorageQuota to limit PVC count and cumulative storage capacity Admins can limit the number of PVCs in a namespace as well as the cumulative capacity of those PVCs. New PVCs that exceed either maximum value will be rejected. -In this example, a 6th PVC in the namespace would be rejected because it exceeds the maximum count of 5. Alternatively, +In this example, a 6th PVC in the namespace would be rejected because it exceeds the maximum count of 5. Alternatively, a 5Gi maximum quota when combined with the 2Gi max limit above, cannot have 3 PVCs where each has 2Gi. That would be 6Gi requested - for a namespace capped at 5Gi. + for a namespace capped at 5Gi. ``` apiVersion: v1 @@ -83,10 +80,10 @@ spec: {% capture discussion %} -## Summary +## Summary A limit range can put a ceiling on how much storage is requested while a resource quota can effectively cap the storage -consumed by a namespace through claim counts and cumulative storage capacity. The allows a cluster-admin to plan their +consumed by a namespace through claim counts and cumulative storage capacity. The allows a cluster-admin to plan their cluster's storage budget without risk of any one project going over their allotment. {% endcapture %} diff --git a/docs/tasks/administer-cluster/namespaces-walkthrough.md b/docs/tasks/administer-cluster/namespaces-walkthrough.md index 8b248e92c8..be275c798b 100644 --- a/docs/tasks/administer-cluster/namespaces-walkthrough.md +++ b/docs/tasks/administer-cluster/namespaces-walkthrough.md @@ -3,9 +3,6 @@ assignees: - derekwaynecarr - janetkuo title: Namespaces Walkthrough -redirect_from: -- "/docs/admin/namespaces/walkthrough/" -- "/docs/admin/namespaces/walkthrough.html" --- Kubernetes _namespaces_ help different projects, teams, or customers to share a Kubernetes cluster. @@ -153,9 +150,9 @@ Let's create some contents. ```shell $ kubectl run snowflake --image=kubernetes/serve_hostname --replicas=2 ``` -We have just created a deployment whose replica size is 2 that is running the pod called snowflake with a basic container that just serves the hostname. +We have just created a deployment whose replica size is 2 that is running the pod called snowflake with a basic container that just serves the hostname. Note that `kubectl run` creates deployments only on Kubernetes cluster >= v1.2. If you are running older versions, it creates replication controllers instead. -If you want to obtain the old behavior, use `--generator=run/v1` to create replication controllers. See [`kubectl run`](/docs/user-guide/kubectl/v1.6/#run) for more details. +If you want to obtain the old behavior, use `--generator=run/v1` to create replication controllers. See [`kubectl run`](/docs/user-guide/kubectl/v1.6/#run) for more details. ```shell $ kubectl get deployment diff --git a/docs/tasks/administer-cluster/namespaces.md b/docs/tasks/administer-cluster/namespaces.md index ad0da4e625..e1f00b84fb 100644 --- a/docs/tasks/administer-cluster/namespaces.md +++ b/docs/tasks/administer-cluster/namespaces.md @@ -3,9 +3,6 @@ assignees: - derekwaynecarr - janetkuo title: Share a Cluster with Namespaces -redirect_from: -- "/docs/admin/namespaces/" -- "/docs/admin/namespaces/index.html" --- A Namespace is a mechanism to partition resources created by users into diff --git a/docs/tasks/administer-cluster/out-of-resource.md b/docs/tasks/administer-cluster/out-of-resource.md index a076b0dc77..6fa0b45de7 100644 --- a/docs/tasks/administer-cluster/out-of-resource.md +++ b/docs/tasks/administer-cluster/out-of-resource.md @@ -4,11 +4,6 @@ assignees: - vishh - timstclair title: Configure Out Of Resource Handling -redirect_from: -- "/docs/admin/out-of-resource/" -- "/docs/admin/out-of-resource.html" -- "/docs/concepts/cluster-administration/out-of-resource/" -- "/docs/concepts/cluster-administration/out-of-resource.html" --- * TOC diff --git a/docs/tasks/administer-cluster/reserve-compute-resources.md b/docs/tasks/administer-cluster/reserve-compute-resources.md index 923c2b22af..a759fee87a 100644 --- a/docs/tasks/administer-cluster/reserve-compute-resources.md +++ b/docs/tasks/administer-cluster/reserve-compute-resources.md @@ -4,9 +4,6 @@ assignees: - derekwaynecarr - dashpole title: Reserve Compute Resources for System Daemons -redirect_from: -- "/docs/admin/node-allocatable/" -- "/docs/admin/node-allocatable.html" --- * TOC diff --git a/docs/tasks/administer-cluster/romana-network-policy.md b/docs/tasks/administer-cluster/romana-network-policy.md index 897298d863..a5f62a6ac7 100644 --- a/docs/tasks/administer-cluster/romana-network-policy.md +++ b/docs/tasks/administer-cluster/romana-network-policy.md @@ -2,11 +2,6 @@ assignees: - chrismarino title: Romana for NetworkPolicy -redirect_from: -- "/docs/getting-started-guides/network-policy/romana/" -- "/docs/getting-started-guides/network-policy/romana.html" -- "/docs/tasks/configure-pod-container/romana-network-policy/" -- "/docs/tasks/configure-pod-container/romana-network-policy.html" --- {% capture overview %} @@ -17,7 +12,7 @@ This page shows how to use Romana for NetworkPolicy. {% capture prerequisites %} -Complete steps 1, 2, and 3 of the [kubeadm getting started guide](/docs/getting-started-guides/kubeadm/). +Complete steps 1, 2, and 3 of the [kubeadm getting started guide](/docs/getting-started-guides/kubeadm/). {% endcapture %} @@ -25,16 +20,16 @@ Complete steps 1, 2, and 3 of the [kubeadm getting started guide](/docs/getting ## Installing Romana with kubeadm -Follow the [containerized installation guide](https://github.com/romana/romana/tree/master/containerize) for kubeadmin. +Follow the [containerized installation guide](https://github.com/romana/romana/tree/master/containerize) for kubeadmin. ## Applying network policies To apply network policies use one of the following: -* [Romana network policies](https://github.com/romana/romana/wiki/Romana-policies). +* [Romana network policies](https://github.com/romana/romana/wiki/Romana-policies). * [Example of Romana network policy](https://github.com/romana/core/tree/master/policy). * The NetworkPolicy API. - + {% endcapture %} {% capture whatsnext %} diff --git a/docs/tasks/administer-cluster/running-cloud-controller.md b/docs/tasks/administer-cluster/running-cloud-controller.md index 72a71a2a11..d10ba81955 100644 --- a/docs/tasks/administer-cluster/running-cloud-controller.md +++ b/docs/tasks/administer-cluster/running-cloud-controller.md @@ -1,15 +1,12 @@ --- assignees: -- thockin +- thockin title: Build and Run cloud-controller-manager -redirect_from: -- "/docs/getting-started-guides/running-cloud-controller/" -- "/docs/getting-started-guides/running-cloud-controller.html" --- Kubernetes version 1.6 contains a new binary called as `cloud-controller-manager`. `cloud-controller-manager` is a daemon that embeds cloud-specific control loops in Kubernetes. These cloud-specific control loops were originally in the kube-controller-manager. However, cloud providers move at a different pace and schedule compared to the Kubernetes project, and abstracting the provider-specific code to the `cloud-controller-manager` binary allows cloud provider vendors to evolve independently from the core Kubernetes code. -The `cloud-controller-manager` can be linked to any cloud provider that satisifies the [cloudprovider.Interface](https://git.k8s.io/kubernetes/pkg/cloudprovider/cloud.go). +The `cloud-controller-manager` can be linked to any cloud provider that satisifies the [cloudprovider.Interface](https://git.k8s.io/kubernetes/pkg/cloudprovider/cloud.go). In future Kubernetes releases, cloud vendors should link code that satisfies the above interface to the `cloud-controller-manager` project and compile `cloud-controller-manager` for their own clouds. Cloud providers would also be responsible for maintaining and evolving their code. * TOC diff --git a/docs/tasks/administer-cluster/share-configuration.md b/docs/tasks/administer-cluster/share-configuration.md index 77ae867707..57cd219b95 100644 --- a/docs/tasks/administer-cluster/share-configuration.md +++ b/docs/tasks/administer-cluster/share-configuration.md @@ -3,9 +3,6 @@ assignees: - mikedanese - thockin title: Share Cluster Access with kubeconfig -redirect_from: -- "/docs/user-guide/sharing-clusters/" -- "/docs/user-guide/sharing-clusters.html" --- Client access to a running Kubernetes cluster can be shared by copying diff --git a/docs/tasks/administer-cluster/static-pod.md b/docs/tasks/administer-cluster/static-pod.md index 84a35e696d..cb8a011ac9 100644 --- a/docs/tasks/administer-cluster/static-pod.md +++ b/docs/tasks/administer-cluster/static-pod.md @@ -2,11 +2,6 @@ assignees: - jsafrane title: Static Pods -redirect_from: -- "/docs/admin/static-pods/" -- "/docs/admin/static-pods.html" -- "/docs/concepts/cluster-administration/static-pod/" -- "/docs/concepts/cluster-administration/static-pod.html" --- **If you are running clustered Kubernetes and are using static pods to run a pod on every node, you should probably be using a [DaemonSet](/docs/concepts/workloads/controllers/daemonset/)!** diff --git a/docs/tasks/administer-cluster/upgrade-1-6.md b/docs/tasks/administer-cluster/upgrade-1-6.md index 0ad116fdab..f2a2a5e9ee 100644 --- a/docs/tasks/administer-cluster/upgrade-1-6.md +++ b/docs/tasks/administer-cluster/upgrade-1-6.md @@ -2,28 +2,25 @@ assignees: - mml title: Cluster Management Guide for Version 1.6 -redirect_from: -- "/docs/admin/upgrade-1-6/" -- "/docs/admin/upgrade-1-6.html" --- * TOC {:toc} -This document outlines the potentially disruptive changes that exist in the 1.6 release cycle. Operators, administrators, and developers should -take note of the changes below in order to maintain continuity across their upgrade process. +This document outlines the potentially disruptive changes that exist in the 1.6 release cycle. Operators, administrators, and developers should +take note of the changes below in order to maintain continuity across their upgrade process. -## Cluster defaults set to etcd 3 +## Cluster defaults set to etcd 3 -In the 1.6 release cycle, the default backend storage layer has been upgraded to fully leverage [etcd 3 capabilities](https://coreos.com/blog/etcd3-a-new-etcd.html) by default. -For new clusters, there is nothing an operator will need to do, it should "just work". However, if you are upgrading from a 1.5 cluster, care should be taken to ensure -continuity. +In the 1.6 release cycle, the default backend storage layer has been upgraded to fully leverage [etcd 3 capabilities](https://coreos.com/blog/etcd3-a-new-etcd.html) by default. +For new clusters, there is nothing an operator will need to do, it should "just work". However, if you are upgrading from a 1.5 cluster, care should be taken to ensure +continuity. -It is possible to maintain v2 compatibility mode while running etcd 3 for an interim period of time. To do this, you will simply need to update an argument passed to your apiserver during -startup: +It is possible to maintain v2 compatibility mode while running etcd 3 for an interim period of time. To do this, you will simply need to update an argument passed to your apiserver during +startup: ``` $ kube-apiserver --storage-backend='etcd2' $(EXISTING_ARGS) -``` +``` -However, for long-term maintenance of the cluster, we recommend that the operator plan an outage window in order to perform a [v2->v3 data upgrade](https://coreos.com/etcd/docs/latest/upgrades/upgrade_3_0.html). +However, for long-term maintenance of the cluster, we recommend that the operator plan an outage window in order to perform a [v2->v3 data upgrade](https://coreos.com/etcd/docs/latest/upgrades/upgrade_3_0.html). diff --git a/docs/tasks/administer-cluster/weave-network-policy.md b/docs/tasks/administer-cluster/weave-network-policy.md index 7960007655..a6c9fad9a7 100644 --- a/docs/tasks/administer-cluster/weave-network-policy.md +++ b/docs/tasks/administer-cluster/weave-network-policy.md @@ -2,11 +2,6 @@ assignees: - bboreham title: Weave Net for NetworkPolicy -redirect_from: -- "/docs/getting-started-guides/network-policy/weave/" -- "/docs/getting-started-guides/network-policy/weave.html" -- "/docs/tasks/configure-pod-container/weave-network-policy/" -- "/docs/tasks/configure-pod-container/weave-network-policy.html" --- {% capture overview %} @@ -17,13 +12,13 @@ This page shows how to use Weave Net for NetworkPolicy. {% capture prerequisites %} -Complete steps 1, 2, and 3 of the [kubeadm getting started guide](/docs/getting-started-guides/kubeadm/). +Complete steps 1, 2, and 3 of the [kubeadm getting started guide](/docs/getting-started-guides/kubeadm/). {% endcapture %} {% capture steps %} -## Installing Weave Net addon +## Installing Weave Net addon Follow the [Integrating Kubernetes via the Addon](https://www.weave.works/docs/net/latest/kube-addon/) guide. diff --git a/docs/tasks/administer-federation/cluster.md b/docs/tasks/administer-federation/cluster.md index a08919a180..c24fa89b90 100644 --- a/docs/tasks/administer-federation/cluster.md +++ b/docs/tasks/administer-federation/cluster.md @@ -1,8 +1,5 @@ --- title: Federated Cluster -redirect_from: -- "/docs/user-guide/federation/cluster/" -- "/docs/user-guide/federation/cluster.html" --- {% capture overview %} diff --git a/docs/tasks/administer-federation/configmap.md b/docs/tasks/administer-federation/configmap.md index 1e8e1acbc0..857ea8a59a 100644 --- a/docs/tasks/administer-federation/configmap.md +++ b/docs/tasks/administer-federation/configmap.md @@ -1,8 +1,5 @@ --- title: Federated ConfigMap -redirect_from: -- "/docs/user-guide/federation/configmap/" -- "/docs/user-guide/federation/configmap.html" --- {% capture overview %} @@ -75,7 +72,7 @@ the federation apiserver instead of sending it to a specific Kubernetes cluster. For example, you can do that using kubectl by running: ```shell -kubectl --context=federation-cluster delete configmap +kubectl --context=federation-cluster delete configmap ``` Note that at this point, deleting a Federated ConfigMap will not delete the diff --git a/docs/tasks/administer-federation/daemonset.md b/docs/tasks/administer-federation/daemonset.md index 4caf90d645..0b79f261e4 100644 --- a/docs/tasks/administer-federation/daemonset.md +++ b/docs/tasks/administer-federation/daemonset.md @@ -1,8 +1,5 @@ --- title: Federated DaemonSet -redirect_from: -- "/docs/user-guide/federation/daemonsets/" -- "/docs/user-guide/federation/daemonsets.html" --- {% capture overview %} @@ -43,7 +40,7 @@ request to the Federation apiserver instead of sending it to a Kubernetes cluster. Once a Federated Daemonset is created, the federation control plane will create -a matching DaemonSet in all underlying Kubernetes clusters. +a matching DaemonSet in all underlying Kubernetes clusters. You can verify this by checking each of the underlying clusters, for example: ``` shell diff --git a/docs/tasks/administer-federation/deployment.md b/docs/tasks/administer-federation/deployment.md index 22caa24e66..6aa8b3b803 100644 --- a/docs/tasks/administer-federation/deployment.md +++ b/docs/tasks/administer-federation/deployment.md @@ -1,8 +1,5 @@ --- title: Federated Deployment -redirect_from: -- "/docs/user-guide/federation/deployment/" -- "/docs/user-guide/federation/deployment.html" --- {% capture overview %} @@ -14,8 +11,8 @@ Deployment](/docs/concepts/workloads/controllers/deployment/) and provide the sa Creating them in the federation control plane ensures that the desired number of replicas exist across the registered clusters. -**As of Kubernetes version 1.5, Federated Deployment is an Alpha feature. The core -functionality of Deployment is present, but some features +**As of Kubernetes version 1.5, Federated Deployment is an Alpha feature. The core +functionality of Deployment is present, but some features (such as full rollout compatibility) are still in development.** {% endcapture %} @@ -60,7 +57,7 @@ These Deployments in underlying clusters will match the federation Deployment _except_ in the number of replicas and revision-related annotations. Federation control plane ensures that the sum of replicas in each cluster combined matches the desired number of replicas in the -Federated Deployment. +Federated Deployment. ### Spreading Replicas in Underlying Clusters @@ -81,7 +78,7 @@ Deployment; however, for a Federated Deployment, you must send the request to the federation apiserver instead of sending it to a specific Kubernetes cluster. The federation control plane ensures that whenever the Federated Deployment is updated, it updates the corresponding Deployments in all underlying clusters to -match it. So if the rolling update strategy was chosen then the underlying +match it. So if the rolling update strategy was chosen then the underlying cluster will do the rolling update independently and `maxSurge` and `maxUnavailable` will apply only to individual clusters. This behavior may change in the future. diff --git a/docs/tasks/administer-federation/events.md b/docs/tasks/administer-federation/events.md index de5f6cefd9..1d9f72ea0e 100644 --- a/docs/tasks/administer-federation/events.md +++ b/docs/tasks/administer-federation/events.md @@ -1,8 +1,5 @@ --- title: Federated Events -redirect_from: -- "/docs/user-guide/federation/events/" -- "/docs/user-guide/federation/events.html" --- This guide explains how to use events in federation control plane to help in debugging. diff --git a/docs/tasks/administer-federation/ingress.md b/docs/tasks/administer-federation/ingress.md index 60ef62697c..9149b761b1 100644 --- a/docs/tasks/administer-federation/ingress.md +++ b/docs/tasks/administer-federation/ingress.md @@ -1,9 +1,7 @@ --- title: Federated Ingress -redirect_from: -- "/docs/user-guide/federation/federated-ingress/" -- "/docs/user-guide/federation/federated-ingress.html" --- + {% capture overview %} This page explains how to use Kubernetes Federated Ingress to deploy a common HTTP(S) virtual IP load balancer across a federated service running in @@ -25,7 +23,7 @@ Federated Ingress is released as an alpha feature, and supports Google Cloud Pla GCE and hybrid scenarios involving both) in Kubernetes v1.4. Work is under way to support other cloud providers such as AWS, and other hybrid cloud scenarios (e.g. services spanning private on-premise as well as public cloud Kubernetes -clusters). +clusters). You create Federated Ingresses in much that same way as traditional [Kubernetes Ingresses](/docs/concepts/services-networking/ingress/): by making an API @@ -151,7 +149,7 @@ may take up to a few minutes). the network traffic directed to this ingress (that is, 'Service Endpoints' behind the service backing the Ingress), so the Federated Ingress does not yet consider these to be healthy shards and will not direct traffic to any of these clusters. -* The federation control system +* The federation control system automatically reconfigures the load balancer controllers in all of the clusters in your federation to make them consistent, and allows them to share global load balancers. But this reconfiguration can @@ -202,7 +200,7 @@ nginx 10.63.250.98 104.199.136.89 80/TCP 9m Federations of Kubernetes Clusters can include clusters running in different cloud providers (for example, Google Cloud, AWS), and on-premises (for example, on OpenStack). However, in Kubernetes v1.4, Federated Ingress is only -supported across Google Cloud clusters. +supported across Google Cloud clusters. ## Discovering a federated ingress @@ -301,11 +299,11 @@ Check that: {% capture whatsnext %} * If you need assistance, use one of the [support channels](/docs/tasks/debug-application-cluster/troubleshooting/) to seek assistance. - * For details about use cases that motivated this work, see + * For details about use cases that motivated this work, see [Federation proposal](https://git.k8s.io/community/contributors/design-proposals/federation.md). {% endcapture %} {% include templates/task.md %} - - - - + + + + diff --git a/docs/tasks/administer-federation/namespaces.md b/docs/tasks/administer-federation/namespaces.md index 1fc319f553..45f85d1c2c 100644 --- a/docs/tasks/administer-federation/namespaces.md +++ b/docs/tasks/administer-federation/namespaces.md @@ -1,8 +1,5 @@ --- title: Federated Namespaces -redirect_from: -- "/docs/user-guide/federation/namespaces/" -- "/docs/user-guide/federation/namespaces.html" --- {% capture overview %} diff --git a/docs/tasks/administer-federation/replicaset.md b/docs/tasks/administer-federation/replicaset.md index f2b6c5a9d6..ea20022148 100644 --- a/docs/tasks/administer-federation/replicaset.md +++ b/docs/tasks/administer-federation/replicaset.md @@ -1,8 +1,5 @@ --- title: Federated ReplicaSets -redirect_from: -- "/docs/user-guide/federation/replicasets/" -- "/docs/user-guide/federation/replicasets.html" --- {% capture overview %} diff --git a/docs/tasks/administer-federation/secret.md b/docs/tasks/administer-federation/secret.md index c4ea460fb5..2cd9aa26ea 100644 --- a/docs/tasks/administer-federation/secret.md +++ b/docs/tasks/administer-federation/secret.md @@ -1,8 +1,5 @@ --- title: Federated Secrets -redirect_from: -- "/docs/user-guide/federation/secrets/" -- "/docs/user-guide/federation/secrets.html" --- This guide explains how to use secrets in Federation control plane. @@ -81,7 +78,7 @@ the federation apiserver instead of sending it to a specific Kubernetes cluster. For example, you can do that using kubectl by running: ```shell -kubectl --context=federation-cluster delete secret mysecret +kubectl --context=federation-cluster delete secret mysecret ``` Note that at this point, deleting a federated secret will not delete the diff --git a/docs/tasks/configure-pod-container/assign-pods-nodes.md b/docs/tasks/configure-pod-container/assign-pods-nodes.md index 29dc7b6b3f..de002421eb 100644 --- a/docs/tasks/configure-pod-container/assign-pods-nodes.md +++ b/docs/tasks/configure-pod-container/assign-pods-nodes.md @@ -1,8 +1,5 @@ --- title: Assign Pods to Nodes -redirect_from: -- "/docs/tasks/administer-cluster/assign-pods-nodes/" -- "/docs/tasks/administer-cluster/assign-pods-nodes.html" --- {% capture overview %} diff --git a/docs/tasks/configure-pod-container/configmap.md b/docs/tasks/configure-pod-container/configmap.md index a82740a0d9..4cc1f7d191 100644 --- a/docs/tasks/configure-pod-container/configmap.md +++ b/docs/tasks/configure-pod-container/configmap.md @@ -3,9 +3,6 @@ assignees: - eparis - pmorie title: Configure Containers Using a ConfigMap -redirect_from: -- "/docs/user-guide/configmap/index/" -- "/docs/user-guide/configmap/index.html" --- @@ -24,7 +21,7 @@ This page shows you how to configure an application using a ConfigMap. ConfigMap {% capture steps %} -## Use kubectl to create a ConfigMap +## Use kubectl to create a ConfigMap Use the `kubectl create configmap` command to create configmaps from [directories](#creating-configmaps-from-directories), [files](#creating-configmaps-from-files), or [literal values](#creating-configmaps-from-literal-values): @@ -33,17 +30,17 @@ kubectl create configmap ``` where \ is the name you want to assign to the ConfigMap and \ is the directory, file, or literal value to draw the data from. - + The data source corresponds to a key-value pair in the ConfigMap, where -* key = the file name or the key you provided on the command line, and +* key = the file name or the key you provided on the command line, and * value = the file contents or the literal value you provided on the command line. - + You can use [`kubectl describe`](/docs/user-guide/kubectl/v1.6/#describe) or [`kubectl get`](/docs/user-guide/kubectl/v1.6/#get) to retrieve information about a ConfigMap. The former shows a summary of the ConfigMap, while the latter returns the full contents of the ConfigMap. ### Create ConfigMaps from directories -You can use `kubectl create configmap` to create a ConfigMap from multiple files in the same directory. +You can use `kubectl create configmap` to create a ConfigMap from multiple files in the same directory. For example: @@ -110,10 +107,10 @@ metadata: You can use `kubectl create configmap` to create a ConfigMap from an individual file, or from multiple files. -For example, +For example, ```shell -kubectl create configmap game-config-2 --from-file=docs/user-guide/configmap/kubectl/game.properties +kubectl create configmap game-config-2 --from-file=docs/user-guide/configmap/kubectl/game.properties ``` would produce the following ConfigMap: @@ -131,9 +128,9 @@ game.properties: 158 bytes ``` You can pass in the `--from-file` argument multiple times to create a ConfigMap from multiple data sources. - + ```shell -kubectl create configmap game-config-2 --from-file=docs/user-guide/configmap/kubectl/game.properties --from-file=docs/user-guide/configmap/kubectl/ui.properties +kubectl create configmap game-config-2 --from-file=docs/user-guide/configmap/kubectl/game.properties --from-file=docs/user-guide/configmap/kubectl/ui.properties ``` ```shell @@ -158,8 +155,8 @@ kubectl create configmap game-config-3 --from-file== ``` where `` is the key you want to use in the ConfigMap and `` is the location of the data source file you want the key to represent. - -For example: + +For example: ```shell kubectl create configmap game-config-3 --from-file=game-special-key=docs/user-guide/configmap/kubectl/game.properties @@ -221,12 +218,12 @@ metadata: {% capture discussion %} -## Understanding ConfigMaps +## Understanding ConfigMaps -ConfigMaps allow you to decouple configuration artifacts from image content to keep containerized applications portable. +ConfigMaps allow you to decouple configuration artifacts from image content to keep containerized applications portable. The ConfigMap API resource stores configuration data as key-value pairs. The data can be consumed in pods or provide the configurations for system components such as controllers. ConfigMap is similar to [Secrets](/docs/concepts/configuration/secret/), but provides a means of working with strings that don't contain sensitive information. Users and system components alike can store configuration data in ConfigMap. -Note: ConfigMaps should reference properties files, not replace them. Think of the ConfigMap as representing something similar to the a Linux `/etc` directory and its contents. For example, if you create a [Kubernetes Volume](/docs/concepts/storage/volumes/) from a ConfigMap, each data item in the ConfigMap is represented by an individual file in the volume. +Note: ConfigMaps should reference properties files, not replace them. Think of the ConfigMap as representing something similar to the a Linux `/etc` directory and its contents. For example, if you create a [Kubernetes Volume](/docs/concepts/storage/volumes/) from a ConfigMap, each data item in the ConfigMap is represented by an individual file in the volume. The ConfigMap's `data` field contains the configuration data. As shown in the example below, this can be simple -- like individual properties defined using `--from-literal` -- or complex -- like configuration files or JSON blobs defined using `--from-file`. diff --git a/docs/tasks/configure-pod-container/configure-liveness-readiness-probes.md b/docs/tasks/configure-pod-container/configure-liveness-readiness-probes.md index 51d44e6a02..4dc47cc5a6 100644 --- a/docs/tasks/configure-pod-container/configure-liveness-readiness-probes.md +++ b/docs/tasks/configure-pod-container/configure-liveness-readiness-probes.md @@ -1,7 +1,4 @@ --- -redirect_from: -- "/docs/user-guide/liveness/" -- "/docs/user-guide.liveness.html" title: Configure Liveness and Readiness Probes --- diff --git a/docs/tasks/configure-pod-container/configure-persistent-volume-storage.md b/docs/tasks/configure-pod-container/configure-persistent-volume-storage.md index 114d9a9588..f4170b7a41 100644 --- a/docs/tasks/configure-pod-container/configure-persistent-volume-storage.md +++ b/docs/tasks/configure-pod-container/configure-persistent-volume-storage.md @@ -1,8 +1,5 @@ --- title: Configure a Pod to Use a PersistentVolume for Storage -redirect_from: -- "/docs/user-guide/persistent-volumes/walkthrough/" -- "/docs/user-guide/persistent-volumes/walkthrough.html" --- {% capture overview %} diff --git a/docs/tasks/configure-pod-container/configure-projected-volume-storage.md b/docs/tasks/configure-pod-container/configure-projected-volume-storage.md index 896a96acc5..8a3a1505b0 100644 --- a/docs/tasks/configure-pod-container/configure-projected-volume-storage.md +++ b/docs/tasks/configure-pod-container/configure-projected-volume-storage.md @@ -3,10 +3,6 @@ assignees: - jpeeler - pmorie title: Configure a Pod to Use a Projected Volume for Storage -redirect_from: -- "/docs/tasks/configure-pod-container/projected-volume/" -- "/docs/user-guide/projected-volume/" -- "/docs/user-guide/projected-volume/index.html" --- {% capture overview %} diff --git a/docs/tasks/configure-pod-container/configure-service-account.md b/docs/tasks/configure-pod-container/configure-service-account.md index 4c8aecf290..2909ca9984 100644 --- a/docs/tasks/configure-pod-container/configure-service-account.md +++ b/docs/tasks/configure-pod-container/configure-service-account.md @@ -4,9 +4,6 @@ assignees: - liggitt - thockin title: Configure Service Accounts for Pods -redirect_from: -- "/docs/user-guide/service-accounts/" -- "/docs/user-guide/service-accounts.html" --- A service account provides an identity for processes that run in a Pod. @@ -134,7 +131,7 @@ apiVersion: v1 kind: Secret metadata: name: build-robot-secret - annotations: + annotations: kubernetes.io/service-account.name: build-robot type: kubernetes.io/service-account-token EOF @@ -147,7 +144,7 @@ Now you can confirm that the newly built secret is populated with an API token f Any tokens for non-existent service accounts will be cleaned up by the token controller. ```shell -$ kubectl describe secrets/build-robot-secret +$ kubectl describe secrets/build-robot-secret Name: build-robot-secret Namespace: default Labels: diff --git a/docs/tasks/configure-pod-container/security-context.md b/docs/tasks/configure-pod-container/security-context.md index 85fcf20818..0becdb1c31 100644 --- a/docs/tasks/configure-pod-container/security-context.md +++ b/docs/tasks/configure-pod-container/security-context.md @@ -4,9 +4,6 @@ assignees: - mikedanese - thockin title: Configure a Security Context for a Pod or Container -redirect_from: -- "/docs/user-guide/security-context/" -- "/docs/concepts/policy/container-capabilities/" --- {% capture overview %} diff --git a/docs/tasks/debug-application-cluster/audit.md b/docs/tasks/debug-application-cluster/audit.md index 0a5705f470..b378f7b723 100644 --- a/docs/tasks/debug-application-cluster/audit.md +++ b/docs/tasks/debug-application-cluster/audit.md @@ -4,11 +4,6 @@ assignees: - sttts - ericchiang title: Auditing -redirect_from: -- "/docs/admin/audit/" -- "/docs/admin/audit.html" -- "/docs/concepts/cluster-administration/audit/" -- "/docs/concepts/cluster-administration/audit.html" --- * TOC diff --git a/docs/tasks/debug-application-cluster/debug-application-introspection.md b/docs/tasks/debug-application-cluster/debug-application-introspection.md index 8004407d3d..656a2f8fa5 100644 --- a/docs/tasks/debug-application-cluster/debug-application-introspection.md +++ b/docs/tasks/debug-application-cluster/debug-application-introspection.md @@ -3,9 +3,6 @@ assignees: - janetkuo - thockin title: Application Introspection and Debugging -redirect_from: -- "/docs/user-guide/introspection-and-debugging/" -- "/docs/user-guide/introspection-and-debugging.html" --- Once your application is running, you'll inevitably need to debug problems with it. @@ -90,7 +87,7 @@ Containers: Environment Variables: Conditions: Type Status - Ready True + Ready True Volumes: default-token-4bcbi: Type: Secret (a volume populated by a Secret) @@ -140,7 +137,7 @@ $ kubectl describe pod nginx-deployment-1370807587-fz9sd Node: / Labels: app=nginx,pod-template-hash=1370807587 Status: Pending - IP: + IP: Controllers: ReplicaSet/nginx-deployment-1370807587 Containers: nginx: @@ -278,7 +275,7 @@ Labels: kubernetes.io/hostname=kubernetes-node-861h CreationTimestamp: Fri, 10 Jul 2015 14:32:29 -0700 Conditions: Type Status LastHeartbeatTime LastTransitionTime Reason Message - Ready Unknown Fri, 10 Jul 2015 14:34:32 -0700 Fri, 10 Jul 2015 14:35:15 -0700 Kubelet stopped posting node status. + Ready Unknown Fri, 10 Jul 2015 14:34:32 -0700 Fri, 10 Jul 2015 14:35:15 -0700 Kubelet stopped posting node status. Addresses: 10.240.115.55,104.197.0.26 Capacity: cpu: 1 diff --git a/docs/tasks/debug-application-cluster/debug-application.md b/docs/tasks/debug-application-cluster/debug-application.md index 543d1145f0..86d66ffe93 100644 --- a/docs/tasks/debug-application-cluster/debug-application.md +++ b/docs/tasks/debug-application-cluster/debug-application.md @@ -3,9 +3,6 @@ assignees: - mikedanese - thockin title: Troubleshoot Applications -redirect_from: -- "/docs/user-guide/application-troubleshooting/" -- "/docs/user-guide/application-troubleshooting.html" --- This guide is to help users debug applications that are deployed into Kubernetes and not behaving correctly. diff --git a/docs/tasks/debug-application-cluster/debug-cluster.md b/docs/tasks/debug-application-cluster/debug-cluster.md index 7322ccdcd9..c7b86010e3 100644 --- a/docs/tasks/debug-application-cluster/debug-cluster.md +++ b/docs/tasks/debug-application-cluster/debug-cluster.md @@ -2,9 +2,6 @@ assignees: - davidopp title: Troubleshoot Clusters -redirect_from: -- "/docs/admin/cluster-troubleshooting/" -- "/docs/admin/cluster-troubleshooting.html" --- This doc is about cluster troubleshooting; we assume you have already ruled out your application as the root cause of the diff --git a/docs/tasks/debug-application-cluster/debug-init-containers.md b/docs/tasks/debug-application-cluster/debug-init-containers.md index 913023649b..1100c40025 100644 --- a/docs/tasks/debug-application-cluster/debug-init-containers.md +++ b/docs/tasks/debug-application-cluster/debug-init-containers.md @@ -8,9 +8,6 @@ assignees: - kow3ns - smarterclayton title: Debug Init Containers -redirect_from: -- "/docs/tasks/troubleshoot/debug-init-containers/" -- "/docs/tasks/troubleshoot/debug-init-containers.html" --- {% capture overview %} @@ -28,7 +25,7 @@ Init Containers. The example command lines below refer to the Pod as * You should be familiar with the basics of [Init Containers](/docs/concepts/abstractions/init-containers/). -* You should have [Configured an Init Container](/docs/tasks/configure-pod-container/configure-pod-initialization/#creating-a-pod-that-has-an-init-container/). +* You should have [Configured an Init Container](/docs/tasks/configure-pod-container/configure-pod-initialization/#creating-a-pod-that-has-an-init-container/). {% endcapture %} diff --git a/docs/tasks/debug-application-cluster/debug-pod-replication-controller.md b/docs/tasks/debug-application-cluster/debug-pod-replication-controller.md index 8c0c8e2e5e..6644f9a8cc 100644 --- a/docs/tasks/debug-application-cluster/debug-pod-replication-controller.md +++ b/docs/tasks/debug-application-cluster/debug-pod-replication-controller.md @@ -2,9 +2,6 @@ assignees: - bprashanth title: Debug Pods and Replication Controllers -redirect_from: -- "/docs/user-guide/debugging-pods-and-replication-controllers/" -- "/docs/user-guide/debugging-pods-and-replication-controllers.html" --- * TOC diff --git a/docs/tasks/debug-application-cluster/debug-service.md b/docs/tasks/debug-application-cluster/debug-service.md index 981225f55c..08db4fa1a2 100644 --- a/docs/tasks/debug-application-cluster/debug-service.md +++ b/docs/tasks/debug-application-cluster/debug-service.md @@ -4,9 +4,6 @@ assignees: - janetkuo - thockin title: Debug Services -redirect_from: -- "/docs/user-guide/debugging-services/" -- "/docs/user-guide/debugging-services.html" --- An issue that comes up rather frequently for new installations of Kubernetes is @@ -86,7 +83,7 @@ $ kubectl run hostnames --image=gcr.io/google_containers/serve_hostname \ deployment "hostnames" created ``` -`kubectl` commands will print the type and name of the resource created or mutated, which can then be used in subsequent commands. +`kubectl` commands will print the type and name of the resource created or mutated, which can then be used in subsequent commands. Note that this is the same as if you had started the `Deployment` with the following YAML: diff --git a/docs/tasks/debug-application-cluster/debug-stateful-set.md b/docs/tasks/debug-application-cluster/debug-stateful-set.md index 0dc1fdbbf1..2b0c52e0e7 100644 --- a/docs/tasks/debug-application-cluster/debug-stateful-set.md +++ b/docs/tasks/debug-application-cluster/debug-stateful-set.md @@ -8,9 +8,6 @@ assignees: - kow3ns - smarterclayton title: Debug a StatefulSet -redirect_from: -- "/docs/tasks/manage-stateful-set/debugging-a-statefulset/" -- "/docs/tasks/manage-stateful-set/debugging-a-statefulset.html" --- {% capture overview %} @@ -22,7 +19,7 @@ This task shows you how to debug a StatefulSet. {% capture prerequisites %} -* You need to have a Kubernetes cluster, and the kubectl command-line tool must be configured to communicate with your cluster. +* You need to have a Kubernetes cluster, and the kubectl command-line tool must be configured to communicate with your cluster. * You should have a StatefulSet running that you want to investigate. {% endcapture %} @@ -31,13 +28,13 @@ This task shows you how to debug a StatefulSet. ## Debugging a StatefulSet -In order to list all the pods which belong to a StatefulSet, which have a label `app=myapp` set on them, you can use the following: +In order to list all the pods which belong to a StatefulSet, which have a label `app=myapp` set on them, you can use the following: ```shell kubectl get pods -l app=myapp ``` -If you find that any Pods listed are in `Unknown` or `Terminating` state for an extended period of time, refer to the [Deleting StatefulSet Pods](/docs/tasks/manage-stateful-set/delete-pods/) task for instructions on how to deal with them. You can debug individual Pods in a StatefulSet using the [Debugging Pods](/docs/user-guide/debugging-pods-and-replication-controllers/#debugging-pods) guide. +If you find that any Pods listed are in `Unknown` or `Terminating` state for an extended period of time, refer to the [Deleting StatefulSet Pods](/docs/tasks/manage-stateful-set/delete-pods/) task for instructions on how to deal with them. You can debug individual Pods in a StatefulSet using the [Debugging Pods](/docs/user-guide/debugging-pods-and-replication-controllers/#debugging-pods) guide. StatefulSets provide a debug mechanism to pause all controller operations on Pods using an annotation. Setting the `pod.alpha.kubernetes.io/initialized` annotation to `"false"` on any StatefulSet Pod will *pause* all operations of the StatefulSet. When paused, the StatefulSet will not perform any scaling operations. Once the debug hook is set, you can execute commands within the containers of StatefulSet pods without interference from scaling operations. You can set the annotation to `"false"` by executing the following: @@ -45,11 +42,11 @@ StatefulSets provide a debug mechanism to pause all controller operations on Pod kubectl annotate pods pod.alpha.kubernetes.io/initialized="false" --overwrite ``` -When the annotation is set to `"false"`, the StatefulSet will not respond to its Pods becoming unhealthy or unavailable. It will not create replacement Pods till the annotation is removed or set to `"true"` on each StatefulSet Pod. +When the annotation is set to `"false"`, the StatefulSet will not respond to its Pods becoming unhealthy or unavailable. It will not create replacement Pods till the annotation is removed or set to `"true"` on each StatefulSet Pod. ### Step-wise Initialization -You can also use the same annotation to debug race conditions during bootstrapping of the StatefulSet by setting the `pod.alpha.kubernetes.io/initialized` annotation to `"false"` in the `.spec.template.metadata.annotations` field of the StatefulSet prior to creating it. +You can also use the same annotation to debug race conditions during bootstrapping of the StatefulSet by setting the `pod.alpha.kubernetes.io/initialized` annotation to `"false"` in the `.spec.template.metadata.annotations` field of the StatefulSet prior to creating it. ```yaml apiVersion: apps/v1beta1 @@ -67,12 +64,12 @@ spec: pod.alpha.kubernetes.io/initialized: "false" ... ... -... +... ``` After setting the annotation, if you create the StatefulSet, you can wait for each Pod to come up and verify that it has initialized correctly. The StatefulSet will not create any subsequent Pods till the debug annotation is set to `"true"` (or removed) on each Pod that has already been created. You can set the annotation to `"true"` by executing the following: - + ```shell kubectl annotate pods pod.alpha.kubernetes.io/initialized="true" --overwrite ``` diff --git a/docs/tasks/debug-application-cluster/get-shell-running-container.md b/docs/tasks/debug-application-cluster/get-shell-running-container.md index 627eec5c15..5b2eae35f2 100644 --- a/docs/tasks/debug-application-cluster/get-shell-running-container.md +++ b/docs/tasks/debug-application-cluster/get-shell-running-container.md @@ -3,11 +3,6 @@ assignees: - caesarxuchao - mikedanese title: Get a Shell to a Running Container -redirect_from: -- "/docs/user-guide/getting-into-containers/" -- "/docs/user-guide/getting-into-containers.html" -- "/docs/tasks/kubectl/get-shell-running-container/" -- "/docs/tasks/kubectl/get-shell-running-container.html" --- {% capture overview %} diff --git a/docs/tasks/debug-application-cluster/logging-elasticsearch-kibana.md b/docs/tasks/debug-application-cluster/logging-elasticsearch-kibana.md index f369ce8c98..41fb8f43d9 100644 --- a/docs/tasks/debug-application-cluster/logging-elasticsearch-kibana.md +++ b/docs/tasks/debug-application-cluster/logging-elasticsearch-kibana.md @@ -3,9 +3,6 @@ assignees: - crassirostris - piosz title: Logging Using Elasticsearch and Kibana -redirect_from: -- "/docs/user-guide/logging/elasticsearch/" -- "/docs/user-guide/logging/elasticsearch.html" --- On the Google Compute Engine (GCE) platform, the default logging support targets diff --git a/docs/tasks/debug-application-cluster/logging-stackdriver.md b/docs/tasks/debug-application-cluster/logging-stackdriver.md index b9b4b77426..da83d7e791 100644 --- a/docs/tasks/debug-application-cluster/logging-stackdriver.md +++ b/docs/tasks/debug-application-cluster/logging-stackdriver.md @@ -3,9 +3,6 @@ assignees: - crassirostris - piosz title: Logging Using Stackdriver -redirect_from: -- "/docs/user-guide/logging/stackdriver/" -- "/docs/user-guide/logging/stackdriver.html" --- Before reading this page, it's highly recommended to familiarize yourself diff --git a/docs/tasks/debug-application-cluster/monitor-node-health.md b/docs/tasks/debug-application-cluster/monitor-node-health.md index fa4dde3532..917514f4c6 100644 --- a/docs/tasks/debug-application-cluster/monitor-node-health.md +++ b/docs/tasks/debug-application-cluster/monitor-node-health.md @@ -3,9 +3,6 @@ assignees: - Random-Liu - dchen1107 title: Monitor Node Health -redirect_from: -- "/docs/admin/node-problem/" -- "/docs/admin/node-problem.html" --- * TOC diff --git a/docs/tasks/debug-application-cluster/resource-usage-monitoring.md b/docs/tasks/debug-application-cluster/resource-usage-monitoring.md index f6424f8596..e23382df52 100644 --- a/docs/tasks/debug-application-cluster/resource-usage-monitoring.md +++ b/docs/tasks/debug-application-cluster/resource-usage-monitoring.md @@ -2,11 +2,6 @@ assignees: - mikedanese title: Tools for Monitoring Compute, Storage, and Network Resources -redirect_from: -- "/docs/user-guide/monitoring/" -- "/docs/user-guide/monitoring.html" -- "/docs/concepts/cluster-administration/resource-usage-monitoring/" -- "/docs/concepts/cluster-administration/resource-usage-monitoring.html" --- Understanding how an application behaves when deployed is crucial to scaling the application and providing a reliable service. In a Kubernetes cluster, application performance can be examined at many different levels: containers, [pods](/docs/user-guide/pods), [services](/docs/user-guide/services), and whole clusters. As part of Kubernetes we want to provide users with detailed resource usage information about their running applications at all these levels. This will give users deep insights into how their applications are performing and where possible application bottlenecks may be found. In comes [Heapster](https://github.com/kubernetes/heapster), a project meant to provide a base monitoring platform on Kubernetes. diff --git a/docs/tasks/debug-application-cluster/troubleshooting.md b/docs/tasks/debug-application-cluster/troubleshooting.md index 9253963ff3..a0e7a56196 100644 --- a/docs/tasks/debug-application-cluster/troubleshooting.md +++ b/docs/tasks/debug-application-cluster/troubleshooting.md @@ -3,8 +3,6 @@ assignees: - brendandburns - davidopp title: Troubleshooting -redirect_from: -- "/docs/troubleshooting/" --- Sometimes things go wrong. This guide is aimed at making them right. It has diff --git a/docs/tasks/federation/federation-service-discovery.md b/docs/tasks/federation/federation-service-discovery.md index 03875c3c48..dd5fc97ac1 100644 --- a/docs/tasks/federation/federation-service-discovery.md +++ b/docs/tasks/federation/federation-service-discovery.md @@ -3,11 +3,6 @@ assignees: - bprashanth - quinton-hoole title: Cross-cluster Service Discovery using Federated Services -redirect_from: -- "/docs/user-guide/federation/federated-services/" -- "/docs/user-guide/federation/federated-services.html" -- "/docs/concepts/cluster-administration/federation-service-discovery/" -- "/docs/concepts/cluster-administration/federation-service-discovery.html" --- This guide explains how to use Kubernetes Federated Services to deploy @@ -175,7 +170,7 @@ this. For example, if your Federation is configured to use Google Cloud DNS, and a managed DNS domain 'example.com': ``` shell -$ gcloud dns managed-zones describe example-dot-com +$ gcloud dns managed-zones describe example-dot-com creationTime: '2016-06-26T18:18:39.229Z' description: Example domain for Kubernetes Cluster Federation dnsName: example.com. diff --git a/docs/tasks/federation/set-up-cluster-federation-kubefed.md b/docs/tasks/federation/set-up-cluster-federation-kubefed.md index cd2c39c656..114d54b735 100644 --- a/docs/tasks/federation/set-up-cluster-federation-kubefed.md +++ b/docs/tasks/federation/set-up-cluster-federation-kubefed.md @@ -2,11 +2,6 @@ assignees: - madhusudancs title: Set up Cluster Federation with Kubefed -redirect_from: -- "/docs/admin/federation/kubefed/" -- "/docs/admin/federation/kubefed.html" -- "/docs/tutorials/federation/set-up-cluster-federation-kubefed/" -- "/docs/tutorials/federation/set-up-cluster-federation-kubefed.html" --- * TOC diff --git a/docs/tasks/federation/set-up-coredns-provider-federation.md b/docs/tasks/federation/set-up-coredns-provider-federation.md index 6a22b2b4ef..1161cefc9c 100644 --- a/docs/tasks/federation/set-up-coredns-provider-federation.md +++ b/docs/tasks/federation/set-up-coredns-provider-federation.md @@ -1,8 +1,5 @@ --- title: Set up CoreDNS as DNS provider for Cluster Federation -redirect_from: -- "/docs/tutorials/federation/set-up-coredns-provider-federation/" -- "/docs/tutorials/federation/set-up-coredns-provider-federation.html" --- {% capture overview %} diff --git a/docs/tasks/federation/set-up-placement-policies-federation.md b/docs/tasks/federation/set-up-placement-policies-federation.md index 5ed3fceadf..a5dd281593 100644 --- a/docs/tasks/federation/set-up-placement-policies-federation.md +++ b/docs/tasks/federation/set-up-placement-policies-federation.md @@ -1,8 +1,5 @@ --- title: Set up placement policies in Federation -redirect_from: -- "/docs/tutorials/federation/set-up-placement-policies-federation/" -- "/docs/tutorials/federation/set-up-placement-policies-federation.html" --- {% capture overview %} diff --git a/docs/tasks/index.md b/docs/tasks/index.md index 2921e97d38..d527b14f6c 100644 --- a/docs/tasks/index.md +++ b/docs/tasks/index.md @@ -1,10 +1,5 @@ --- -title: Tasks -redirect_from: -- "/docs/user-guide/configuring-containers/" -- "/docs/user-guide/configuring-containers.html" -- "/docs/user-guide/production-pods/" -- "/docs/user-guide/production-pods.html" +title: Tasks" --- This section of the Kubernetes documentation contains pages that diff --git a/docs/tasks/inject-data-application/define-command-argument-container.md b/docs/tasks/inject-data-application/define-command-argument-container.md index f9796c0b82..c8209d0716 100644 --- a/docs/tasks/inject-data-application/define-command-argument-container.md +++ b/docs/tasks/inject-data-application/define-command-argument-container.md @@ -1,8 +1,5 @@ --- title: Define a Command and Arguments for a Container -redirect_from: -- "/docs/concepts/configuration/container-command-args/" -- "/docs/concepts/configuration/container-command-arg.html" --- {% capture overview %} diff --git a/docs/tasks/inject-data-application/define-environment-variable-container.md b/docs/tasks/inject-data-application/define-environment-variable-container.md index 8c44a145cc..6010f47624 100644 --- a/docs/tasks/inject-data-application/define-environment-variable-container.md +++ b/docs/tasks/inject-data-application/define-environment-variable-container.md @@ -1,8 +1,5 @@ --- title: Define Environment Variables for a Container -redirect_from: -- "/docs/tasks/configure-pod-container/define-environment-variable-container/" -- "/docs/tasks/configure-pod-container/define-environment-variable-container.html" --- {% capture overview %} diff --git a/docs/tasks/inject-data-application/distribute-credentials-secure.md b/docs/tasks/inject-data-application/distribute-credentials-secure.md index 950d702bc3..05bd8d6456 100644 --- a/docs/tasks/inject-data-application/distribute-credentials-secure.md +++ b/docs/tasks/inject-data-application/distribute-credentials-secure.md @@ -1,10 +1,5 @@ --- title: Distribute Credentials Securely Using Secrets -redirect_from: -- "/docs/user-guide/secrets/walkthrough/" -- "/docs/user-guide/secrets/walkthrough.html" -- "/docs/tasks/configure-pod-container/distribute-credentials-secure/" -- "/docs/tasks/configure-pod-container/distribute-credentials-secure.html" --- {% capture overview %} diff --git a/docs/tasks/inject-data-application/downward-api-volume-expose-pod-information.md b/docs/tasks/inject-data-application/downward-api-volume-expose-pod-information.md index 3c64967cd7..606fb99c7d 100644 --- a/docs/tasks/inject-data-application/downward-api-volume-expose-pod-information.md +++ b/docs/tasks/inject-data-application/downward-api-volume-expose-pod-information.md @@ -1,12 +1,5 @@ --- title: Expose Pod Information to Containers Through Files -redirect_from: -- "/docs/user-guide/downward-api/" -- "/docs/user-guide/downward-api/index.html" -- "/docs/user-guide/downward-api/volume/" -- "/docs/user-guide/downward-api/volume/index.html" -- "/docs/tasks/configure-pod-container/downward-api-volume-expose-pod-information/" -- "/docs/tasks/configure-pod-container/downward-api-volume-expose-pod-information.html" --- {% capture overview %} diff --git a/docs/tasks/inject-data-application/environment-variable-expose-pod-information.md b/docs/tasks/inject-data-application/environment-variable-expose-pod-information.md index 4416852005..4934aebb17 100644 --- a/docs/tasks/inject-data-application/environment-variable-expose-pod-information.md +++ b/docs/tasks/inject-data-application/environment-variable-expose-pod-information.md @@ -1,10 +1,5 @@ --- title: Expose Pod Information to Containers Through Environment Variables -redirect_from: -- "/docs/user-guide/environment-guide/" -- "/docs/user-guide/environment-guide/index.html" -- "/docs/tasks/configure-pod-container/environment-variable-expose-pod-information/" -- "/docs/tasks/configure-pod-container/environment-variable-expose-pod-information.html" --- {% capture overview %} diff --git a/docs/tasks/inject-data-application/podpreset.md b/docs/tasks/inject-data-application/podpreset.md index 54af226c79..89222c4076 100644 --- a/docs/tasks/inject-data-application/podpreset.md +++ b/docs/tasks/inject-data-application/podpreset.md @@ -2,11 +2,6 @@ assignees: - jessfraz title: Inject Information into Pods Using a PodPreset -redirect_from: -- "/docs/user-guide/pod-preset/index/" -- "/docs/user-guide/pod-preset/index.html" -- "/docs/tasks/run-application/podpreset/" -- "/docs/tasks/run-application/podpreset.html" --- You can use a `podpreset` object to inject certain information into pods at creation @@ -148,7 +143,7 @@ spec: ### Pod Spec with `ConfigMap` Example -This is an example to show how a Pod spec is modified by the Pod Preset +This is an example to show how a Pod spec is modified by the Pod Preset that defines a `ConfigMap` for Environment Variables. **User submitted pod spec:** @@ -462,7 +457,7 @@ spec: ### Conflict Example -This is an example to show how a Pod spec is not modified by the Pod Preset +This is an example to show how a Pod spec is not modified by the Pod Preset when there is a conflict. **User submitted pod spec:** diff --git a/docs/tasks/job/coarse-parallel-processing-work-queue/index.md b/docs/tasks/job/coarse-parallel-processing-work-queue/index.md index 0cae8e008e..7d95beccbd 100644 --- a/docs/tasks/job/coarse-parallel-processing-work-queue/index.md +++ b/docs/tasks/job/coarse-parallel-processing-work-queue/index.md @@ -1,8 +1,5 @@ --- title: Coarse Parallel Processing Using a Work Queue -redirect_from: -- "/docs/user-guide/jobs/work-queue-1/" -- "/docs/user-guide/jobs/work-queue-1/index.html" --- * TOC diff --git a/docs/tasks/job/fine-parallel-processing-work-queue/index.md b/docs/tasks/job/fine-parallel-processing-work-queue/index.md index dd8c2ac8b9..660f38bcf5 100644 --- a/docs/tasks/job/fine-parallel-processing-work-queue/index.md +++ b/docs/tasks/job/fine-parallel-processing-work-queue/index.md @@ -1,8 +1,5 @@ --- title: Fine Parallel Processing Using a Work Queue -redirect_from: -- "/docs/user-guide/jobs/work-queue-2/" -- "/docs/user-guide/jobs/work-queue-2/index.html" --- * TOC diff --git a/docs/tasks/job/parallel-processing-expansion.md b/docs/tasks/job/parallel-processing-expansion.md index 7a76447001..f4bec2dcc1 100644 --- a/docs/tasks/job/parallel-processing-expansion.md +++ b/docs/tasks/job/parallel-processing-expansion.md @@ -1,8 +1,5 @@ --- title: Parallel Processing using Expansions -redirect_from: -- "/docs/user-guide/jobs/expansions/index/" -- "/docs/user-guide/jobs/expansions/index.html" --- * TOC @@ -90,9 +87,9 @@ We can check on the pods as well using the same label selector: ```shell $ kubectl get pods -l jobgroup=jobexample --show-all NAME READY STATUS RESTARTS AGE -process-item-apple-kixwv 0/1 Completed 0 4m -process-item-banana-wrsf7 0/1 Completed 0 4m -process-item-cherry-dnfu9 0/1 Completed 0 4m +process-item-apple-kixwv 0/1 Completed 0 4m +process-item-banana-wrsf7 0/1 Completed 0 4m +process-item-cherry-dnfu9 0/1 Completed 0 4m ``` There is not a single command to check on the output of all jobs at once, @@ -124,7 +121,7 @@ First, copy and paste the following template of a Job object, into a file called {%- set params = [{ "name": "apple", "url": "http://www.orangepippin.com/apples", }, { "name": "banana", "url": "https://en.wikipedia.org/wiki/Banana", }, { "name": "raspberry", "url": "https://www.raspberrypi.org/" }] -%} +%} {%- for p in params %} {%- set name = p["name"] %} {%- set url = p["url"] %} diff --git a/docs/tasks/manage-gpus/scheduling-gpus.md b/docs/tasks/manage-gpus/scheduling-gpus.md index b392d88471..79768e6224 100644 --- a/docs/tasks/manage-gpus/scheduling-gpus.md +++ b/docs/tasks/manage-gpus/scheduling-gpus.md @@ -2,9 +2,6 @@ assignees: - vishh title: Schedule GPUs -redirect_from: -- "/docs/user-guide/gpus/" -- "/docs/user-guide/gpus.html" --- {% capture overview %} @@ -33,17 +30,17 @@ Nvidia GPUs can be consumed via container level resource requirements using the ```yaml apiVersion: v1 kind: pod -spec: - containers: - - +spec: + containers: + - name: gpu-container-1 - resources: - limits: + resources: + limits: alpha.kubernetes.io/nvidia-gpu: 2 # requesting 2 GPUs - - + - name: gpu-container-2 - resources: - limits: + resources: + limits: alpha.kubernetes.io/nvidia-gpu: 3 # requesting 3 GPUs ``` @@ -90,12 +87,12 @@ metadata: } } } -spec: - containers: - - +spec: + containers: + - name: gpu-container-1 - resources: - limits: + resources: + limits: alpha.kubernetes.io/nvidia-gpu: 2 ``` @@ -135,7 +132,7 @@ spec: - hostPath: path: /usr/lib/nvidia-375/bin name: bin - - hostPath: + - hostPath: path: /usr/lib/nvidia-375 name: lib ``` diff --git a/docs/tasks/run-application/delete-stateful-set.md b/docs/tasks/run-application/delete-stateful-set.md index 4b2b0cb34a..146bb6b326 100644 --- a/docs/tasks/run-application/delete-stateful-set.md +++ b/docs/tasks/run-application/delete-stateful-set.md @@ -6,9 +6,6 @@ assignees: - janetkuo - smarterclayton title: Delete a Stateful Set -redirect_from: -- "/docs/tasks/manage-stateful-set/deleting-a-statefulset/" -- "/docs/tasks/manage-stateful-set/deleting-a-statefulset.html" --- {% capture overview %} @@ -30,7 +27,7 @@ This task shows you how to delete a StatefulSet. You can delete a StatefulSet in the same way you delete other resources in Kubernetes: use the `kubectl delete` command, and specify the StatefulSet either by file or by name. ```shell -kubectl delete -f +kubectl delete -f ``` ```shell diff --git a/docs/tasks/run-application/horizontal-pod-autoscale-walkthrough.md b/docs/tasks/run-application/horizontal-pod-autoscale-walkthrough.md index b3fd14864f..4619b8e086 100644 --- a/docs/tasks/run-application/horizontal-pod-autoscale-walkthrough.md +++ b/docs/tasks/run-application/horizontal-pod-autoscale-walkthrough.md @@ -5,9 +5,6 @@ assignees: - justinsb - directxman12 title: Horizontal Pod Autoscaling Walkthrough -redirect_from: -- "/docs/user-guide/horizontal-pod-autoscaling/walkthrough/" -- "/docs/user-guide/horizontal-pod-autoscaling/walkthrough.html" --- Horizontal Pod Autoscaling automatically scales the number of pods diff --git a/docs/tasks/run-application/horizontal-pod-autoscale.md b/docs/tasks/run-application/horizontal-pod-autoscale.md index fcce446a72..ee504883a3 100644 --- a/docs/tasks/run-application/horizontal-pod-autoscale.md +++ b/docs/tasks/run-application/horizontal-pod-autoscale.md @@ -4,9 +4,6 @@ assignees: - jszczepkowski - directxman12 title: Horizontal Pod Autoscaling -redirect_from: -- "/docs/user-guide/horizontal-pod-autoscaling/" -- "/docs/user-guide/horizontal-pod-autoscaling/index.html" --- This document describes the current state of Horizontal Pod Autoscaling in Kubernetes. diff --git a/docs/tasks/run-application/rolling-update-replication-controller.md b/docs/tasks/run-application/rolling-update-replication-controller.md index 187b41496c..47f9673efe 100644 --- a/docs/tasks/run-application/rolling-update-replication-controller.md +++ b/docs/tasks/run-application/rolling-update-replication-controller.md @@ -2,11 +2,6 @@ assignees: - janetkuo title: Perform Rolling Update Using a Replication Controller -redirect_from: -- "/docs/user-guide/rolling-updates/" -- "/docs/user-guide/rolling-updates.html" -- "/docs/user-guide/update-demo/index/" -- "/docs/user-guide/update-demo/index.html" --- * TOC diff --git a/docs/tasks/run-application/run-replicated-stateful-application.md b/docs/tasks/run-application/run-replicated-stateful-application.md index 0d18d10dab..f4b15adbb6 100644 --- a/docs/tasks/run-application/run-replicated-stateful-application.md +++ b/docs/tasks/run-application/run-replicated-stateful-application.md @@ -8,9 +8,6 @@ assignees: - kow3ns - smarterclayton title: Run a Replicated Stateful Application -redirect_from: -- "/docs/tutorials/stateful-application/run-replicated-stateful-application/" -- "/docs/tutorials/stateful-application/run-replicated-stateful-application.html" --- {% capture overview %} diff --git a/docs/tasks/run-application/run-single-instance-stateful-application.md b/docs/tasks/run-application/run-single-instance-stateful-application.md index 6e22164ba9..5860a79bdc 100644 --- a/docs/tasks/run-application/run-single-instance-stateful-application.md +++ b/docs/tasks/run-application/run-single-instance-stateful-application.md @@ -1,8 +1,5 @@ --- title: Run a Single-Instance Stateful Application -redirect_from: -- "/docs/tutorials/stateful-application/run-stateful-application/" -- "/docs/tutorials/stateful-application/run-stateful-application.html" --- {% capture overview %} @@ -115,7 +112,7 @@ for a secure solution. NAME READY STATUS RESTARTS AGE mysql-63082529-2z3ki 1/1 Running 0 3m - + 1. Inspect the Persistent Volume: kubectl describe pv mysql-pv @@ -127,7 +124,7 @@ for a secure solution. Reclaim Policy: Retain Access Modes: RWO Capacity: 20Gi - Message: + Message: Source: Type: GCEPersistentDisk (a Persistent Disk resource in Google Compute Engine) PDName: mysql-disk @@ -171,7 +168,7 @@ know your stateful MySQL database is up and running. Waiting for pod default/mysql-client-274442439-zyp6i to be running, status is Pending, pod ready: false If you don't see a command prompt, try pressing enter. -mysql> +mysql> ``` ## Updating diff --git a/docs/tasks/run-application/run-stateless-application-deployment.md b/docs/tasks/run-application/run-stateless-application-deployment.md index c0ed69b320..596b0fbbe3 100644 --- a/docs/tasks/run-application/run-stateless-application-deployment.md +++ b/docs/tasks/run-application/run-stateless-application-deployment.md @@ -1,16 +1,5 @@ --- title: Run a Stateless Application Using a Deployment -redirect_from: -- "/docs/user-guide/simple-nginx/" -- "/docs/user-guide/simple-nginx.html" -- "/docs/user-guide/pods/single-container/" -- "/docs/user-guide/pods/single-container.html" -- "/docs/user-guide/deploying-applications/" -- "/docs/user-guide/deploying-applications.html" -- "/docs/tutorials/stateless-application/run-stateless-application-deployment/" -- "/docs/tutorials/stateless-application/run-stateless-application-deployment.html" -- "/docs/tutorials/stateless-application/run-stateless-ap-replication-controller/" -- "/docs/tutorials/stateless-application/run-stateless-ap-replication-controller.html" --- {% capture overview %} diff --git a/docs/tasks/run-application/scale-stateful-set.md b/docs/tasks/run-application/scale-stateful-set.md index de3b8fab46..12e68e556b 100644 --- a/docs/tasks/run-application/scale-stateful-set.md +++ b/docs/tasks/run-application/scale-stateful-set.md @@ -8,9 +8,6 @@ assignees: - kow3ns - smarterclayton title: Scale a StatefulSet -redirect_from: -- "/docs/tasks/manage-stateful-set/scale-stateful-set/" -- "/docs/tasks/manage-stateful-set/scale-stateful-set.html" --- {% capture overview %} @@ -19,8 +16,8 @@ This page shows how to scale a StatefulSet. {% capture prerequisites %} -* StatefulSets are only available in Kubernetes version 1.5 or later. -* **Not all stateful applications scale nicely.** You need to understand your StatefulSets well before continuing. If you're unsure, remember that it might not be safe to scale your StatefulSets. +* StatefulSets are only available in Kubernetes version 1.5 or later. +* **Not all stateful applications scale nicely.** You need to understand your StatefulSets well before continuing. If you're unsure, remember that it might not be safe to scale your StatefulSets. * You should perform scaling only when you're sure that your stateful application cluster is completely healthy. @@ -36,7 +33,7 @@ for which kubectl you're using. ### `kubectl scale` -First, find the StatefulSet you want to scale. Remember, you need to first understand if you can scale it or not. +First, find the StatefulSet you want to scale. Remember, you need to first understand if you can scale it or not. ```shell kubectl get statefulsets @@ -50,24 +47,24 @@ kubectl scale statefulsets --replicas= ### Alternative: `kubectl apply` / `kubectl edit` / `kubectl patch` -Alternatively, you can do [in-place updates](/docs/concepts/cluster-administration/manage-deployment/#in-place-updates-of-resources) on your StatefulSets. +Alternatively, you can do [in-place updates](/docs/concepts/cluster-administration/manage-deployment/#in-place-updates-of-resources) on your StatefulSets. -If your StatefulSet was initially created with `kubectl apply` or `kubectl create --save-config`, +If your StatefulSet was initially created with `kubectl apply` or `kubectl create --save-config`, update `.spec.replicas` of the StatefulSet manifests, and then do a `kubectl apply`: -```shell +```shell kubectl apply -f ``` Otherwise, edit that field with `kubectl edit`: -```shell +```shell kubectl edit statefulsets ``` Or use `kubectl patch`: -```shell +```shell kubectl patch statefulsets -p '{"spec":{"replicas":}}' ``` @@ -76,9 +73,9 @@ kubectl patch statefulsets -p '{"spec":{"replicas": 1, if there is an unhealthy Pod, there is no way +With a StatefulSet of size > 1, if there is an unhealthy Pod, there is no way for Kubernetes to know (yet) if it is due to a permanent fault or a transient one (upgrade/maintenance/node reboot). If the Pod is unhealthy due to a permanent fault, scaling without correcting the fault may lead to a state where the StatefulSet membership @@ -90,7 +87,7 @@ the transient error may interfere with your scale-up/scale-down operation. Some databases have issues when nodes join and leave at the same time. It is better to reason about scaling operations at the application level in these cases, and perform scaling only when you're sure that your stateful application cluster is -completely healthy. +completely healthy. {% endcapture %} diff --git a/docs/tasks/run-application/upgrade-pet-set-to-stateful-set.md b/docs/tasks/run-application/upgrade-pet-set-to-stateful-set.md index 358fc022f5..369e98d9aa 100644 --- a/docs/tasks/run-application/upgrade-pet-set-to-stateful-set.md +++ b/docs/tasks/run-application/upgrade-pet-set-to-stateful-set.md @@ -8,9 +8,6 @@ assignees: - kow3ns - smarterclayton title: Upgrade from PetSets to StatefulSets -redirect_from: -- "/docs/tasks/manage-stateful-set/upgrade-pet-set-to-stateful-set/" -- "/docs/tasks/manage-stateful-set/upgrade-pet-set-to-stateful-set.html" --- {% capture overview %} @@ -20,7 +17,7 @@ This page shows how to upgrade from PetSets (Kubernetes version 1.3 or 1.4) to * {% capture prerequisites %} * If you don't have PetSets in your current cluster, or you don't plan to upgrade - your master to Kubernetes 1.5 or later, you can skip this task. + your master to Kubernetes 1.5 or later, you can skip this task. {% endcapture %} @@ -28,12 +25,12 @@ This page shows how to upgrade from PetSets (Kubernetes version 1.3 or 1.4) to * ## Differences between alpha PetSets and beta StatefulSets -PetSet was introduced as an alpha resource in Kubernetes release 1.3, and was renamed to StatefulSet as a beta resource in 1.5. +PetSet was introduced as an alpha resource in Kubernetes release 1.3, and was renamed to StatefulSet as a beta resource in 1.5. Here are some notable changes: * **StatefulSet is the new PetSet**: PetSet is no longer available in Kubernetes release 1.5 or later. It becomes beta StatefulSet. To understand why the name was changed, see this [discussion thread](https://github.com/kubernetes/kubernetes/issues/27430). * **StatefulSet guards against split brain**: StatefulSets guarantee at most one Pod for a given ordinal index can be running anywhere in a cluster, to guard against split brain scenarios with distributed applications. *TODO: Link to doc about fencing.* -* **Flipped debug annotation behavior**: The default value of the debug annotation (`pod.alpha.kubernetes.io/initialized`) is now `true`. The absence of this annotation will pause PetSet operations, but will NOT pause StatefulSet operations. In most cases, you no longer need this annotation in your StatefulSet manifests. +* **Flipped debug annotation behavior**: The default value of the debug annotation (`pod.alpha.kubernetes.io/initialized`) is now `true`. The absence of this annotation will pause PetSet operations, but will NOT pause StatefulSet operations. In most cases, you no longer need this annotation in your StatefulSet manifests. ## Upgrading from PetSets to StatefulSets @@ -42,7 +39,7 @@ Note that these steps need to be done in the specified order. You **should NOT upgrade your Kubernetes master, nodes, or `kubectl` to Kubernetes version 1.5 or later**, until told to do so. -### Find all PetSets and their manifests +### Find all PetSets and their manifests First, find all existing PetSets in your cluster: @@ -53,42 +50,42 @@ kubectl get petsets --all-namespaces If you don't find any existing PetSets, you can safely upgrade your cluster to Kubernetes version 1.5 or later. -If you find existing PetSets and you have all their manifests at hand, you can continue to the next step to prepare StatefulSet manifests. +If you find existing PetSets and you have all their manifests at hand, you can continue to the next step to prepare StatefulSet manifests. -Otherwise, you need to save their manifests so that you can recreate them as StatefulSets later. -Here's an example command for you to save all existing PetSets as one file. +Otherwise, you need to save their manifests so that you can recreate them as StatefulSets later. +Here's an example command for you to save all existing PetSets as one file. ```shell -# Save all existing PetSets in all namespaces into a single file. Only needed when you don't have their manifests at hand. +# Save all existing PetSets in all namespaces into a single file. Only needed when you don't have their manifests at hand. kubectl get petsets --all-namespaces -o yaml > all-petsets.yaml ``` -### Prepare StatefulSet manifests +### Prepare StatefulSet manifests -Now, for every PetSet manifest you have, prepare a corresponding StatefulSet manifest: +Now, for every PetSet manifest you have, prepare a corresponding StatefulSet manifest: 1. Change `apiVersion` from `apps/v1alpha1` to `apps/v1beta1`. 2. Change `kind` from `PetSet` to `StatefulSet`. 3. If you have the debug hook annotation `pod.alpha.kubernetes.io/initialized` set to `true`, you can remove it because it's redundant. If you don't have this annotation, you should add one, with the value set to `false`, to pause StatefulSets operations. -It's recommended that you keep both PetSet manifests and StatefulSet manifests, so that you can safely roll back and recreate your PetSets, -if you decide not to upgrade your cluster. +It's recommended that you keep both PetSet manifests and StatefulSet manifests, so that you can safely roll back and recreate your PetSets, +if you decide not to upgrade your cluster. ### Delete all PetSets without cascading -If you find existing PetSets in your cluster in the previous step, you need to delete all PetSets *without cascading*. You can do this from `kubectl` with `--cascade=false`. -Note that if the flag isn't set, **cascading deletion will be performed by default**, and all Pods managed by your PetSets will be gone. +If you find existing PetSets in your cluster in the previous step, you need to delete all PetSets *without cascading*. You can do this from `kubectl` with `--cascade=false`. +Note that if the flag isn't set, **cascading deletion will be performed by default**, and all Pods managed by your PetSets will be gone. Delete those PetSets by specifying file names. This only works when the files contain only PetSets, but not other resources such as Services: ```shell -# Delete all existing PetSets without cascading +# Delete all existing PetSets without cascading # Note that should only contain PetSets that you want to delete, but not any other resources kubectl delete -f --cascade=false ``` -Alternatively, delete them by specifying resource names: +Alternatively, delete them by specifying resource names: ```shell # Alternatively, delete them by name and namespace without cascading @@ -98,23 +95,23 @@ kubectl delete petsets -n= --cascade=false Make sure you've deleted all PetSets in the system: ```shell -# Get all PetSets again to make sure you deleted them all +# Get all PetSets again to make sure you deleted them all # This should return nothing kubectl get petsets --all-namespaces ``` -At this moment, you've deleted all PetSets in your cluster, but not their Pods, Persistent Volumes, or Persistent Volume Claims. +At this moment, you've deleted all PetSets in your cluster, but not their Pods, Persistent Volumes, or Persistent Volume Claims. However, since the Pods are not managed by PetSets anymore, they will be vulnerable to node failures until you finish the master upgrade and recreate StatefulSets. ### Upgrade your master to Kubernetes version 1.5 or later Now, you can [upgrade your Kubernetes master](/docs/admin/cluster-management/#upgrading-a-cluster) to Kubernetes version 1.5 or later. Note that **you should NOT upgrade Nodes at this time**, because the Pods -(that were once managed by PetSets) are now vulnerable to node failures. +(that were once managed by PetSets) are now vulnerable to node failures. ### Upgrade kubectl to Kubernetes version 1.5 or later -Upgrade `kubectl` to Kubernetes version 1.5 or later, following [the steps for installing and setting up +Upgrade `kubectl` to Kubernetes version 1.5 or later, following [the steps for installing and setting up kubectl](/docs/tasks/kubectl/install/). ### Create StatefulSets diff --git a/docs/tasks/tools/install-kubectl.md b/docs/tasks/tools/install-kubectl.md index df655eb7a5..3c1bb31245 100644 --- a/docs/tasks/tools/install-kubectl.md +++ b/docs/tasks/tools/install-kubectl.md @@ -3,12 +3,8 @@ assignees: - bgrant0607 - mikedanese title: Install and Set Up kubectl -redirect_from: -- "/docs/tasks/kubectl/install/" -- "/docs/tasks/kubectl/install.html" -- "/docs/user-guide/prereqs/" -- "/docs/user-guide/prereqs.html" --- + {% capture overview %} Use the Kubernetes command-line tool, [kubectl](/docs/user-guide/kubectl), to deploy and manage applications on Kubernetes. Using kubectl, you can inspect cluster resources; create, delete, and update components; and look at your new cluster and bring up example apps. {% endcapture %} diff --git a/docs/tools/kompose/user-guide.md b/docs/tools/kompose/user-guide.md index 109309e44d..83088fb600 100644 --- a/docs/tools/kompose/user-guide.md +++ b/docs/tools/kompose/user-guide.md @@ -4,9 +4,6 @@ assignees: - cdrage title: Translate a Docker Compose File to Kubernetes Resources -redirect_from: -- "/docs/tools/kompose/" -- "/docs/tools/kompose/index.html" --- * TOC @@ -42,7 +39,7 @@ Kompose is released via GitHub on a three-week cycle, you can see all current re The current release we use is `0.5.0`. ```sh -# Linux +# Linux curl -L https://github.com/kubernetes-incubator/kompose/releases/download/v0.5.0/kompose-linux-amd64 -o kompose # macOS @@ -111,27 +108,27 @@ You can also provide multiple docker-compose files at the same time: ```console $ kompose -f docker-compose.yml -f docker-guestbook.yml convert -file "frontend-service.yaml" created -file "mlbparks-service.yaml" created -file "mongodb-service.yaml" created -file "redis-master-service.yaml" created -file "redis-slave-service.yaml" created -file "frontend-deployment.yaml" created -file "mlbparks-deployment.yaml" created -file "mongodb-deployment.yaml" created -file "mongodb-claim0-persistentvolumeclaim.yaml" created -file "redis-master-deployment.yaml" created -file "redis-slave-deployment.yaml" created +file "frontend-service.yaml" created +file "mlbparks-service.yaml" created +file "mongodb-service.yaml" created +file "redis-master-service.yaml" created +file "redis-slave-service.yaml" created +file "frontend-deployment.yaml" created +file "mlbparks-deployment.yaml" created +file "mongodb-deployment.yaml" created +file "mongodb-claim0-persistentvolumeclaim.yaml" created +file "redis-master-deployment.yaml" created +file "redis-slave-deployment.yaml" created $ ls -mlbparks-deployment.yaml mongodb-service.yaml redis-slave-service.jsonmlbparks-service.yaml +mlbparks-deployment.yaml mongodb-service.yaml redis-slave-service.jsonmlbparks-service.yaml frontend-deployment.yaml mongodb-claim0-persistentvolumeclaim.yaml redis-master-service.yaml frontend-service.yaml mongodb-deployment.yaml redis-slave-deployment.yaml redis-master-deployment.yaml -``` +``` When multiple docker-compose files are provided the configuration is merged. Any configuration that is common will be over ridden by subsequent file. - + Using `--bundle, --dab` to specify a DAB file as below: ```console @@ -148,20 +145,20 @@ file "redis-deployment.yaml" created ```console $ kompose --provider openshift --file docker-voting.yml convert WARN [worker] Service cannot be created because of missing port. -INFO file "vote-service.yaml" created -INFO file "db-service.yaml" created -INFO file "redis-service.yaml" created -INFO file "result-service.yaml" created -INFO file "vote-deploymentconfig.yaml" created -INFO file "vote-imagestream.yaml" created -INFO file "worker-deploymentconfig.yaml" created -INFO file "worker-imagestream.yaml" created -INFO file "db-deploymentconfig.yaml" created -INFO file "db-imagestream.yaml" created -INFO file "redis-deploymentconfig.yaml" created -INFO file "redis-imagestream.yaml" created -INFO file "result-deploymentconfig.yaml" created -INFO file "result-imagestream.yaml" created +INFO file "vote-service.yaml" created +INFO file "db-service.yaml" created +INFO file "redis-service.yaml" created +INFO file "result-service.yaml" created +INFO file "vote-deploymentconfig.yaml" created +INFO file "vote-imagestream.yaml" created +INFO file "worker-deploymentconfig.yaml" created +INFO file "worker-imagestream.yaml" created +INFO file "db-deploymentconfig.yaml" created +INFO file "db-imagestream.yaml" created +INFO file "redis-deploymentconfig.yaml" created +INFO file "redis-imagestream.yaml" created +INFO file "result-deploymentconfig.yaml" created +INFO file "result-imagestream.yaml" created ``` In similar way you can convert DAB files to OpenShift. @@ -171,7 +168,7 @@ WARN: Unsupported key networks - ignoring INFO file "redis-svc.yaml" created INFO file "web-svc.yaml" created INFO file "web-deploymentconfig.yaml" created -INFO file "web-imagestream.yaml" created +INFO file "web-imagestream.yaml" created INFO file "redis-deploymentconfig.yaml" created INFO file "redis-imagestream.yaml" created ``` @@ -180,11 +177,11 @@ It also supports creating buildconfig for build directive in a service. By defau ```console $ kompose --provider openshift --file buildconfig/docker-compose.yml convert -WARN [foo] Service cannot be created because of missing port. -INFO Buildconfig using git@github.com:rtnpro/kompose.git::master as source. -INFO file "foo-deploymentconfig.yaml" created -INFO file "foo-imagestream.yaml" created -INFO file "foo-buildconfig.yaml" created +WARN [foo] Service cannot be created because of missing port. +INFO Buildconfig using git@github.com:rtnpro/kompose.git::master as source. +INFO file "foo-deploymentconfig.yaml" created +INFO file "foo-imagestream.yaml" created +INFO file "foo-buildconfig.yaml" created ``` **Note**: If you are manually pushing the Openshift artifacts using ``oc create -f``, you need to ensure that you push the imagestream artifact before the buildconfig artifact, to workaround this Openshift issue: https://github.com/openshift/origin/issues/4518 . @@ -200,12 +197,12 @@ $ kompose --file ./examples/docker-guestbook.yml up We are going to create Kubernetes deployments and services for your Dockerized application. If you need different kind of resources, use the 'kompose convert' and 'kubectl create -f' commands instead. -INFO Successfully created service: redis-master -INFO Successfully created service: redis-slave -INFO Successfully created service: frontend +INFO Successfully created service: redis-master +INFO Successfully created service: redis-slave +INFO Successfully created service: frontend INFO Successfully created deployment: redis-master INFO Successfully created deployment: redis-slave -INFO Successfully created deployment: frontend +INFO Successfully created deployment: frontend Your application has been deployed to Kubernetes. You can run 'kubectl get deployment,svc,pods' for details. @@ -236,13 +233,13 @@ $kompose --file ./examples/docker-guestbook.yml --provider openshift up We are going to create OpenShift DeploymentConfigs and Services for your Dockerized application. If you need different kind of resources, use the 'kompose convert' and 'oc create -f' commands instead. -INFO Successfully created service: redis-slave -INFO Successfully created service: frontend -INFO Successfully created service: redis-master +INFO Successfully created service: redis-slave +INFO Successfully created service: frontend +INFO Successfully created service: redis-master INFO Successfully created deployment: redis-slave INFO Successfully created ImageStream: redis-slave -INFO Successfully created deployment: frontend -INFO Successfully created ImageStream: frontend +INFO Successfully created deployment: frontend +INFO Successfully created ImageStream: frontend INFO Successfully created deployment: redis-master INFO Successfully created ImageStream: redis-master @@ -258,9 +255,9 @@ svc/frontend 172.30.46.64 80/TCP svc/redis-master 172.30.144.56 6379/TCP 8s svc/redis-slave 172.30.75.245 6379/TCP 8s NAME DOCKER REPO TAGS UPDATED -is/frontend 172.30.12.200:5000/fff/frontend -is/redis-master 172.30.12.200:5000/fff/redis-master -is/redis-slave 172.30.12.200:5000/fff/redis-slave v1 +is/frontend 172.30.12.200:5000/fff/frontend +is/redis-master 172.30.12.200:5000/fff/redis-master +is/redis-slave 172.30.12.200:5000/fff/redis-slave v1 ``` Note: @@ -272,11 +269,11 @@ Once you have deployed "composed" application to Kubernetes, `kompose down` will ```console $ kompose --file docker-guestbook.yml down -INFO Successfully deleted service: redis-master +INFO Successfully deleted service: redis-master INFO Successfully deleted deployment: redis-master -INFO Successfully deleted service: redis-slave +INFO Successfully deleted service: redis-slave INFO Successfully deleted deployment: redis-slave -INFO Successfully deleted service: frontend +INFO Successfully deleted service: frontend INFO Successfully deleted deployment: frontend ``` Note: @@ -296,7 +293,7 @@ file "web-deployment.json" created The `*-deployment.json` files contain the Deployment objects. ```console -$ kompose convert --rc +$ kompose convert --rc file "redis-svc.yaml" created file "web-svc.yaml" created file "redis-rc.yaml" created @@ -306,7 +303,7 @@ file "web-rc.yaml" created The `*-rc.yaml` files contain the Replication Controller objects. If you want to specify replicas (default is 1), use `--replicas` flag: `$ kompose convert --rc --replicas 3` ```console -$ kompose convert --ds +$ kompose convert --ds file "redis-svc.yaml" created file "web-svc.yaml" created file "redis-daemonset.yaml" created @@ -318,7 +315,7 @@ The `*-daemonset.yaml` files contain the Daemon Set objects If you want to generate a Chart to be used with [Helm](https://github.com/kubernetes/helm) simply do: ```console -$ kompose convert -c +$ kompose convert -c file "web-svc.yaml" created file "redis-svc.yaml" created file "web-deployment.yaml" created @@ -355,8 +352,8 @@ nginx: container_name: foobar $ kompose -f nginx.yml convert -WARN Unsupported key build - ignoring -WARN Unsupported key cap_add - ignoring +WARN Unsupported key build - ignoring +WARN Unsupported key cap_add - ignoring WARN Unsupported key dockerfile - ignoring ``` @@ -370,7 +367,7 @@ For example: ```yaml version: "2" -services: +services: nginx: image: nginx dockerfile: foobar @@ -378,7 +375,7 @@ services: cap_add: - ALL container_name: foobar - labels: + labels: kompose.service.type: nodeport ``` diff --git a/docs/tutorials/clusters/apparmor.md b/docs/tutorials/clusters/apparmor.md index 16967924b1..40d5d0591b 100644 --- a/docs/tutorials/clusters/apparmor.md +++ b/docs/tutorials/clusters/apparmor.md @@ -2,9 +2,6 @@ assignees: - stclair title: AppArmor -redirect_from: -- "/docs/admin/apparmor/index/" -- "/docs/admin/apparmor/index.html" --- {% capture overview %} diff --git a/docs/tutorials/index.md b/docs/tutorials/index.md index bbd289bcc6..0af884f961 100644 --- a/docs/tutorials/index.md +++ b/docs/tutorials/index.md @@ -1,8 +1,5 @@ --- title: Tutorials -redirect_from: -- "/docs/samples/" -- "/docs/samples.html" --- This section of the Kubernetes documentation contains tutorials. diff --git a/docs/tutorials/kubernetes-basics/cluster-intro.html b/docs/tutorials/kubernetes-basics/cluster-intro.html index f6e1a60802..16427c1ee2 100644 --- a/docs/tutorials/kubernetes-basics/cluster-intro.html +++ b/docs/tutorials/kubernetes-basics/cluster-intro.html @@ -1,7 +1,4 @@ --- -redirect_from: -- "/docs/tutorials/getting-started/create-cluster/" -- "/docs/tutorials/getting-started/create-cluster.html" title: Using Minikube to Create a Cluster --- diff --git a/docs/tutorials/object-management-kubectl/declarative-object-management-configuration.md b/docs/tutorials/object-management-kubectl/declarative-object-management-configuration.md index a2abda7455..d91cc4377b 100644 --- a/docs/tutorials/object-management-kubectl/declarative-object-management-configuration.md +++ b/docs/tutorials/object-management-kubectl/declarative-object-management-configuration.md @@ -1,8 +1,5 @@ --- title: Declarative Management of Kubernetes Objects Using Configuration Files -redirect_from: -- "/docs/concepts/tools/kubectl/object-management-using-declarative-config/" -- "/docs/concepts/tools/kubectl/object-management-using-declarative-config.html" --- {% capture overview %} diff --git a/docs/tutorials/object-management-kubectl/imperative-object-management-command.md b/docs/tutorials/object-management-kubectl/imperative-object-management-command.md index b1e58854fd..cbf6c44bb2 100644 --- a/docs/tutorials/object-management-kubectl/imperative-object-management-command.md +++ b/docs/tutorials/object-management-kubectl/imperative-object-management-command.md @@ -1,8 +1,5 @@ --- title: Managing Kubernetes Objects Using Imperative Commands -redirect_from: -- "/docs/concepts/tools/kubectl/object-management-using-imperative-commands/" -- "/docs/concepts/tools/kubectl/object-management-using-imperative-commands.html" --- {% capture overview %} diff --git a/docs/tutorials/object-management-kubectl/imperative-object-management-configuration.md b/docs/tutorials/object-management-kubectl/imperative-object-management-configuration.md index a6846e7fcd..ea26a5adc9 100644 --- a/docs/tutorials/object-management-kubectl/imperative-object-management-configuration.md +++ b/docs/tutorials/object-management-kubectl/imperative-object-management-configuration.md @@ -1,8 +1,5 @@ --- title: Imperative Management of Kubernetes Objects Using Configuration Files -redirect_from: -- "/docs/concepts/tools/kubectl/object-management-using-imperative-config/" -- "/docs/concepts/tools/kubectl/object-management-using-imperative-config.html" --- {% capture overview %} diff --git a/docs/tutorials/object-management-kubectl/object-management.md b/docs/tutorials/object-management-kubectl/object-management.md index 38bb586e52..2a65f83bdc 100644 --- a/docs/tutorials/object-management-kubectl/object-management.md +++ b/docs/tutorials/object-management-kubectl/object-management.md @@ -1,10 +1,5 @@ --- title: Kubernetes Object Management -redirect_from: -- "/docs/concepts/tools/kubectl/object-management-overview/" -- "/docs/concepts/tools/kubectl/object-management-overview.html" -- "/docs/user-guide/working-with-resources/" -- "/docs/user-guide/working-with-resources.html" --- {% capture overview %} diff --git a/docs/tutorials/stateless-application/hello-minikube.md b/docs/tutorials/stateless-application/hello-minikube.md index 47fabb336f..0e4e21fc55 100644 --- a/docs/tutorials/stateless-application/hello-minikube.md +++ b/docs/tutorials/stateless-application/hello-minikube.md @@ -1,9 +1,5 @@ --- - title: Hello Minikube -redirect_from: -- "/docs/hellonode/" -- "/docs/hellonode.html" --- {% capture overview %} @@ -77,10 +73,10 @@ brew install kubectl Determine whether you can access sites like [https://cloud.google.com/container-registry/](https://cloud.google.com/container-registry/) directly without a proxy, by opening a new terminal and using ```shell -curl --proxy "" https://cloud.google.com/container-registry/ +curl --proxy "" https://cloud.google.com/container-registry/ ``` -If NO proxy is required, start the Minikube cluster: +If NO proxy is required, start the Minikube cluster: ```shell minikube start --vm-driver=xhyve diff --git a/js/redirects.js b/js/redirects.js index 3cd29fe6be..f7aae1ed62 100644 --- a/js/redirects.js +++ b/js/redirects.js @@ -3,7 +3,7 @@ $( document ).ready(function() { var notHere = false; var forwardingURL = window.location.href; - var oldURLs = ["/README.md","/README.html","/index.md",".html",".md","/v1.1/","/v1.0/"]; + var oldURLs = ["/README.md","/README.html","/index.md",".html",".md"]; /* var: forwardingRules * type: array of objects @@ -32,62 +32,6 @@ $( document ).ready(function() { "pattern":"\/contribute\/([0-9a-zA-Z\-\_]+)", "to":"/docs/home/contribute", "postfix":"/" - }, - { - "from":"/resource-quota", - "pattern":"", - "to":"/docs/concepts/policy/resource-quotas/", - "postfix":"" - }, - { - "from":"/horizontal-pod-autoscaler", - "pattern":"", - "to":"/docs/tasks/run-application/horizontal-pod-autoscale/", - "postfix":"" - }, - { - "from":"/docs/roadmap", - "pattern":"", - "to":"https://github.com/kubernetes/kubernetes/milestones/", - "postfix":"" - }, - { - "from":"/api-ref/", - "pattern":"", - "to":"https://github.com/kubernetes/kubernetes/milestones/", - "postfix":"" - }, - { - "from":"/kubernetes/third_party/swagger-ui/", - "pattern":"", - "to":"/docs/reference", - "postfix":"" - }, - { - "from":"/docs/user-guide/overview", - "pattern":"", - "to":"/docs/concepts/overview/what-is-kubernetes/", - "postfix":"" - }, - { - "from": "/docs/admin/multiple-schedulers", - "to": "/docs/tutorials/clusters/multiple-schedulers/" - }, - { - "from": "/docs/troubleshooting/", - "to": "/docs/tasks/debug-application-cluster/troubleshooting/" - }, - { - "from": "/docs/concepts/services-networking/networkpolicies/", - "to": "/docs/concepts/services-networking/network-policies/" - }, - { - "from": "/docs/getting-started-guides/meanstack/", - "to": "https://medium.com/google-cloud/running-a-mean-stack-on-google-cloud-platform-with-kubernetes-149ca81c2b5d" - }, - { - "from": "/docs/samples/", - "to": "/docs/tutorials/" } ];