From eaaaa781a78911dd87d30403736957c09f1fb160 Mon Sep 17 00:00:00 2001 From: John Mulhausen Date: Wed, 24 Feb 2016 13:47:57 -0800 Subject: [PATCH] Single-source/multi-version docs structure --- _config.yml | 18 + _data/v1_0/globals.yml | 5 + _data/v1_0/guides.yml | 179 ++++ _data/v1_0/overrides.yml | 16 + _data/v1_0/reference.yml | 184 ++++ _data/v1_0/samples.yml | 54 ++ _data/v1_0/support.yml | 40 + _data/v1_2/globals.yml | 5 + _data/v1_2/guides.yml | 179 ++++ _data/v1_2/overrides.yml | 16 + _data/v1_2/reference.yml | 184 ++++ _data/v1_2/samples.yml | 54 ++ _data/v1_2/support.yml | 40 + .../docs/docs/admin/accessing-the-api.md | 73 ++ .../docs/docs/admin/admission-controllers.md | 131 +++ _includes/docs/docs/admin/authentication.md | 126 +++ _includes/docs/docs/admin/authorization.md | 129 +++ .../docs/docs/admin/cluster-components.md | 116 +++ _includes/docs/docs/admin/cluster-large.md | 67 ++ .../docs/docs/admin/cluster-management.md | 185 ++++ .../docs/admin/cluster-troubleshooting.md | 110 +++ _includes/docs/docs/admin/daemon.yaml | 18 + _includes/docs/docs/admin/daemons.md | 167 ++++ _includes/docs/docs/admin/dns.md | 37 + _includes/docs/docs/admin/etcd.md | 44 + .../docs/docs/admin/garbage-collection.md | 59 ++ .../docs/docs/admin/high-availability.md | 225 +++++ .../admin/high-availability/default-kubelet | 8 + .../docs/admin/high-availability/etcd.yaml | 87 ++ .../high-availability/kube-apiserver.yaml | 90 ++ .../kube-controller-manager.yaml | 82 ++ .../high-availability/kube-scheduler.yaml | 30 + .../docs/admin/high-availability/monit-docker | 9 + .../admin/high-availability/monit-kubelet | 11 + .../admin/high-availability/podmaster.yaml | 43 + _includes/docs/docs/admin/index.md | 76 ++ _includes/docs/docs/admin/limitrange/index.md | 192 ++++ .../docs/admin/limitrange/invalid-pod.yaml | 12 + .../docs/docs/admin/limitrange/limits.yaml | 26 + .../docs/docs/admin/limitrange/namespace.yaml | 4 + .../docs/docs/admin/limitrange/valid-pod.yaml | 14 + _includes/docs/docs/admin/multi-cluster.md | 64 ++ _includes/docs/docs/admin/namespaces.md | 143 +++ _includes/docs/docs/admin/namespaces/index.md | 248 ++++++ .../docs/admin/namespaces/namespace-dev.json | 10 + .../docs/admin/namespaces/namespace-prod.json | 10 + _includes/docs/docs/admin/networking.md | 182 ++++ _includes/docs/docs/admin/node.md | 210 +++++ _includes/docs/docs/admin/ovs-networking.md | 16 + _includes/docs/docs/admin/resource-quota.md | 152 ++++ .../docs/docs/admin/resourcequota/index.md | 154 ++++ .../docs/docs/admin/resourcequota/limits.yaml | 13 + .../docs/admin/resourcequota/namespace.yaml | 4 + .../docs/docs/admin/resourcequota/quota.yaml | 14 + _includes/docs/docs/admin/salt.md | 100 +++ .../docs/docs/admin/service-accounts-admin.md | 95 ++ _includes/docs/docs/admin/static-pods.md | 121 +++ _includes/docs/docs/api.md | 127 +++ _includes/docs/docs/devel/api-conventions.md | 657 ++++++++++++++ _includes/docs/docs/devel/api_changes.md | 584 ++++++++++++ _includes/docs/docs/devel/automation.md | 101 +++ _includes/docs/docs/devel/cherry-picks.md | 31 + _includes/docs/docs/devel/cli-roadmap.md | 8 + _includes/docs/docs/devel/client-libraries.md | 20 + .../docs/docs/devel/coding-conventions.md | 56 ++ _includes/docs/docs/devel/collab.md | 39 + .../docs/devel/developer-guides/vagrant.md | 343 ++++++++ _includes/docs/docs/devel/development.md | 315 +++++++ _includes/docs/docs/devel/e2e-tests.md | 110 +++ _includes/docs/docs/devel/faster_reviews.md | 200 +++++ _includes/docs/docs/devel/flaky-tests.md | 64 ++ _includes/docs/docs/devel/getting-builds.md | 31 + _includes/docs/docs/devel/index.md | 76 ++ _includes/docs/docs/devel/instrumentation.md | 34 + _includes/docs/docs/devel/issues.md | 16 + .../docs/docs/devel/kubectl-conventions.md | 96 ++ _includes/docs/docs/devel/logging.md | 27 + .../docs/docs/devel/making-release-notes.md | 41 + _includes/docs/docs/devel/profiling.md | 43 + _includes/docs/docs/devel/pull-requests.md | 34 + _includes/docs/docs/devel/releasing.md | 307 +++++++ _includes/docs/docs/devel/scheduler.md | 48 + .../docs/docs/devel/scheduler_algorithm.md | 36 + .../devel/writing-a-getting-started-guide.md | 97 ++ .../docs/docs/getting-started-guides/aws.md | 101 +++ .../docs/docs/getting-started-guides/azure.md | 8 + .../getting-started-guides/binary_release.md | 25 + .../centos/centos_manual_config.md | 169 ++++ .../docs/getting-started-guides/cloudstack.md | 78 ++ .../docs/getting-started-guides/coreos.md | 69 ++ .../coreos/azure/.gitignore | 1 + .../coreos/azure/addons/skydns-rc.yaml | 92 ++ .../coreos/azure/addons/skydns-svc.yaml | 20 + .../coreos/azure/azure-login.js | 3 + .../kubernetes-cluster-etcd-node-template.yml | 19 + ...kubernetes-cluster-main-nodes-template.yml | 339 +++++++ .../coreos/azure/create-kubernetes-cluster.js | 15 + .../coreos/azure/destroy-cluster.js | 7 + .../coreos/azure/expose_guestbook_app_port.sh | 29 + .../coreos/azure/index.md | 225 +++++ .../coreos/azure/lib/azure_wrapper.js | 271 ++++++ .../coreos/azure/lib/cloud_config.js | 58 ++ .../azure/lib/deployment_logic/kubernetes.js | 77 ++ .../coreos/azure/lib/util.js | 33 + .../coreos/azure/package.json | 19 + .../coreos/azure/scale-kubernetes-cluster.js | 10 + .../coreos/bare_metal_calico.md | 118 +++ .../coreos/bare_metal_offline.md | 692 +++++++++++++++ .../coreos/cloud-configs/master.yaml | 140 +++ .../coreos/cloud-configs/node.yaml | 98 +++ .../coreos/coreos_multinode_cluster.md | 197 +++++ .../docs/docs/getting-started-guides/dcos.md | 128 +++ .../docker-multinode.md | 82 ++ .../docker-multinode/deployDNS.md | 49 ++ .../docker-multinode/master.md | 177 ++++ .../docker-multinode/master.sh | 176 ++++ .../docker-multinode/skydns-rc.yaml.in | 92 ++ .../docker-multinode/skydns-svc.yaml.in | 20 + .../docker-multinode/testing.md | 66 ++ .../docker-multinode/worker.md | 137 +++ .../docker-multinode/worker.sh | 174 ++++ .../docs/getting-started-guides/docker.md | 142 +++ .../fedora/fedora-calico.md | 308 +++++++ .../fedora/fedora_ansible_config.md | 224 +++++ .../fedora/fedora_manual_config.md | 213 +++++ .../fedora/flannel_multi_node_cluster.md | 171 ++++ .../docs/docs/getting-started-guides/gce.md | 213 +++++ .../docs/docs/getting-started-guides/index.md | 175 ++++ .../docs/docs/getting-started-guides/juju.md | 248 ++++++ .../getting-started-guides/libvirt-coreos.md | 291 ++++++ .../docs/getting-started-guides/locally.md | 114 +++ .../logging-elasticsearch.md | 241 +++++ .../docs/getting-started-guides/logging.md | 216 +++++ .../docs/getting-started-guides/meanstack.md | 442 ++++++++++ .../getting-started-guides/mesos-docker.md | 260 ++++++ .../docs/docs/getting-started-guides/mesos.md | 333 +++++++ .../docs/docs/getting-started-guides/ovirt.md | 48 + .../docs/getting-started-guides/rackspace.md | 61 ++ .../docs/getting-started-guides/rkt/index.md | 136 +++ .../docs/getting-started-guides/scratch.md | 826 +++++++++++++++++ .../getting-started-guides/ubuntu-calico.md | 271 ++++++ .../docs/getting-started-guides/ubuntu.md | 259 ++++++ .../docs/getting-started-guides/vagrant.md | 334 +++++++ .../docs/getting-started-guides/vsphere.md | 82 ++ _includes/docs/docs/index.md | 29 + .../docs/docs/reporting-security-issues.md | 22 + _includes/docs/docs/roadmap.md | 46 + _includes/docs/docs/troubleshooting.md | 56 ++ .../docs/user-guide/accessing-the-cluster.md | 269 ++++++ _includes/docs/docs/user-guide/annotations.md | 25 + .../user-guide/application-troubleshooting.md | 189 ++++ .../docs/docs/user-guide/compute-resources.md | 247 ++++++ .../docs/user-guide/config-best-practices.md | 23 + .../docs/user-guide/configuring-containers.md | 162 ++++ .../user-guide/connecting-applications.md | 384 ++++++++ ...connecting-to-applications-port-forward.md | 44 + .../connecting-to-applications-proxy.md | 26 + .../docs/user-guide/container-environment.md | 78 ++ _includes/docs/docs/user-guide/containers.md | 91 ++ .../docs/user-guide/debugging-services.md | 501 +++++++++++ .../docs/user-guide/deploying-applications.md | 112 +++ _includes/docs/docs/user-guide/deployments.md | 349 ++++++++ .../docs/user-guide/docker-cli-to-kubectl.md | 264 ++++++ .../docs/docs/user-guide/downward-api.md | 152 ++++ .../user-guide/downward-api/dapi-pod.yaml | 23 + .../docs/user-guide/downward-api/index.md | 33 + .../downward-api/volume/dapi-volume.yaml | 30 + .../user-guide/downward-api/volume/index.md | 65 ++ .../environment-guide/backend-rc.yaml | 30 + .../environment-guide/backend-srv.yaml | 13 + .../containers/backend/Dockerfile | 2 + .../containers/backend/backend.go | 37 + .../environment-guide/containers/index.md | 25 + .../containers/show/Dockerfile | 2 + .../environment-guide/containers/show/show.go | 95 ++ .../user-guide/environment-guide/index.md | 91 ++ .../user-guide/environment-guide/show-rc.yaml | 32 + .../environment-guide/show-srv.yaml | 15 + .../user-guide/getting-into-containers.md | 68 ++ .../user-guide/horizontal-pod-autoscaler.md | 76 ++ .../hpa-php-apache.yaml | 14 + .../image/Dockerfile | 5 + .../image/index.php | 7 + .../horizontal-pod-autoscaling/index.md | 182 ++++ _includes/docs/docs/user-guide/identifiers.md | 12 + _includes/docs/docs/user-guide/images.md | 235 +++++ _includes/docs/docs/user-guide/index.md | 88 ++ _includes/docs/docs/user-guide/ingress.md | 263 ++++++ _includes/docs/docs/user-guide/ingress.yaml | 9 + .../user-guide/introspection-and-debugging.md | 314 +++++++ _includes/docs/docs/user-guide/job.yaml | 20 + _includes/docs/docs/user-guide/jobs.md | 208 +++++ _includes/docs/docs/user-guide/jsonpath.md | 61 ++ .../docs/docs/user-guide/known-issues.md | 23 + .../docs/docs/user-guide/kubeconfig-file.md | 206 +++++ .../docs/docs/user-guide/kubectl-overview.md | 268 ++++++ _includes/docs/docs/user-guide/labels.md | 161 ++++ .../user-guide/liveness/exec-liveness.yaml | 21 + .../user-guide/liveness/http-liveness.yaml | 18 + .../docs/user-guide/liveness/image/Dockerfile | 4 + .../docs/user-guide/liveness/image/Makefile | 13 + .../docs/user-guide/liveness/image/server.go | 46 + .../docs/docs/user-guide/liveness/index.md | 80 ++ .../docs/user-guide/logging-demo/Makefile | 26 + .../docs/user-guide/logging-demo/index.md | 16 + .../logging-demo/synthetic_0_25lps.yaml | 30 + .../logging-demo/synthetic_10lps.yaml | 30 + _includes/docs/docs/user-guide/logging.md | 91 ++ .../docs/user-guide/managing-deployments.md | 407 +++++++++ _includes/docs/docs/user-guide/monitoring.md | 58 ++ _includes/docs/docs/user-guide/multi-pod.yaml | 49 ++ _includes/docs/docs/user-guide/namespaces.md | 87 ++ .../docs/user-guide/new-nginx-deployment.yaml | 16 + .../docs/user-guide/nginx-deployment.yaml | 16 + .../docs/user-guide/node-selection/index.md | 59 ++ .../docs/user-guide/node-selection/pod.yaml | 13 + _includes/docs/docs/user-guide/overview.md | 23 + .../docs/user-guide/persistent-volumes.md | 169 ++++ .../persistent-volumes/claims/claim-01.yaml | 10 + .../persistent-volumes/claims/claim-02.yaml | 10 + .../persistent-volumes/claims/claim-03.json | 17 + .../user-guide/persistent-volumes/index.md | 91 ++ .../simpletest/namespace.json | 10 + .../persistent-volumes/simpletest/pod.yaml | 20 + .../simpletest/service.json | 19 + .../persistent-volumes/volumes/gce.yaml | 13 + .../persistent-volumes/volumes/local-01.yaml | 13 + .../persistent-volumes/volumes/local-02.yaml | 14 + .../persistent-volumes/volumes/nfs.yaml | 12 + _includes/docs/docs/user-guide/pod-states.md | 123 +++ _includes/docs/docs/user-guide/pod.yaml | 12 + _includes/docs/docs/user-guide/pods.md | 122 +++ _includes/docs/docs/user-guide/prereqs.md | 54 ++ .../docs/docs/user-guide/production-pods.md | 341 +++++++ _includes/docs/docs/user-guide/quick-start.md | 64 ++ .../docs/user-guide/replication-controller.md | 76 ++ .../docs/docs/user-guide/replication.yaml | 19 + .../docs/user-guide/resourcequota/index.md | 4 + _includes/docs/docs/user-guide/secrets.md | 489 +++++++++++ .../docs/docs/user-guide/secrets/index.md | 57 ++ .../docs/user-guide/secrets/secret-pod.yaml | 18 + .../docs/docs/user-guide/secrets/secret.yaml | 7 + .../docs/docs/user-guide/security-context.md | 2 + .../docs/docs/user-guide/service-accounts.md | 190 ++++ .../docs/user-guide/services-firewalls.md | 49 ++ _includes/docs/docs/user-guide/services.md | 529 +++++++++++ .../docs/docs/user-guide/sharing-clusters.md | 117 +++ .../docs/docs/user-guide/simple-nginx.md | 54 ++ _includes/docs/docs/user-guide/simple-yaml.md | 92 ++ _includes/docs/docs/user-guide/ui.md | 57 ++ .../user-guide/update-demo/build-images.sh | 30 + .../update-demo/images/kitten/Dockerfile | 17 + .../update-demo/images/kitten/html/data.json | 3 + .../update-demo/images/kitten/html/kitten.jpg | Bin 0 -> 14769 bytes .../update-demo/images/nautilus/Dockerfile | 17 + .../images/nautilus/html/data.json | 3 + .../images/nautilus/html/nautilus.jpg | Bin 0 -> 21231 bytes .../docs/docs/user-guide/update-demo/index.md | 124 +++ .../user-guide/update-demo/kitten-rc.yaml | 20 + .../update-demo/local/LICENSE.angular | 21 + .../update-demo/local/angular.min.js | 210 +++++ .../update-demo/local/angular.min.js.map | 8 + .../user-guide/update-demo/local/index.html | 36 + .../user-guide/update-demo/local/script.js | 100 +++ .../user-guide/update-demo/local/style.css | 40 + .../user-guide/update-demo/nautilus-rc.yaml | 21 + _includes/docs/docs/user-guide/volumes.md | 376 ++++++++ .../docs/docs/user-guide/walkthrough/index.md | 180 ++++ .../docs/user-guide/walkthrough/k8s201.md | 275 ++++++ .../walkthrough/pod-nginx-with-label.yaml | 12 + .../user-guide/walkthrough/pod-nginx.yaml | 10 + .../user-guide/walkthrough/pod-redis.yaml | 14 + .../pod-with-http-healthcheck.yaml | 20 + .../user-guide/walkthrough/podtemplate.json | 22 + .../walkthrough/replication-controller.yaml | 24 + .../docs/user-guide/walkthrough/service.yaml | 16 + .../docs/user-guide/working-with-resources.md | 52 ++ _includes/docs/docs/whatisk8s.md | 92 ++ _includes/docs/editdocs.md | 114 +++ _includes/docs/index.md | 114 +++ _includes/docs/pagelist.md | 20 + _includes/docs/reference.md | 1 + _includes/docs/samples.md | 1 + _includes/tocsearch.html | 2 +- _layouts/docwithnav.html | 3 +- _layouts/headerfooter.html | 4 +- v1.0/docs/admin/accessing-the-api.md | 3 + v1.0/docs/admin/admission-controllers.md | 3 + v1.0/docs/admin/authentication.md | 3 + v1.0/docs/admin/authorization.md | 3 + v1.0/docs/admin/cluster-components.md | 3 + v1.0/docs/admin/cluster-large.md | 3 + v1.0/docs/admin/cluster-management.md | 3 + v1.0/docs/admin/cluster-troubleshooting.md | 3 + v1.0/docs/admin/daemon.yaml | 18 + v1.0/docs/admin/daemons.md | 3 + v1.0/docs/admin/dns.md | 3 + v1.0/docs/admin/etcd.md | 3 + v1.0/docs/admin/garbage-collection.md | 3 + v1.0/docs/admin/high-availability.md | 3 + .../admin/high-availability/default-kubelet | 8 + v1.0/docs/admin/high-availability/etcd.yaml | 87 ++ .../high-availability/kube-apiserver.yaml | 90 ++ .../kube-controller-manager.yaml | 82 ++ .../high-availability/kube-scheduler.yaml | 30 + .../docs/admin/high-availability/monit-docker | 9 + .../admin/high-availability/monit-kubelet | 11 + .../admin/high-availability/podmaster.yaml | 43 + v1.0/docs/admin/index.md | 3 + v1.0/docs/admin/limitrange/index.md | 3 + v1.0/docs/admin/limitrange/invalid-pod.yaml | 12 + v1.0/docs/admin/limitrange/limits.yaml | 26 + v1.0/docs/admin/limitrange/namespace.yaml | 4 + v1.0/docs/admin/limitrange/valid-pod.yaml | 14 + v1.0/docs/admin/multi-cluster.md | 3 + v1.0/docs/admin/namespaces.md | 3 + v1.0/docs/admin/namespaces/index.md | 3 + v1.0/docs/admin/namespaces/namespace-dev.json | 10 + .../docs/admin/namespaces/namespace-prod.json | 10 + v1.0/docs/admin/networking.md | 3 + v1.0/docs/admin/node.md | 3 + v1.0/docs/admin/ovs-networking.md | 3 + v1.0/docs/admin/resource-quota.md | 3 + v1.0/docs/admin/resourcequota/index.md | 3 + v1.0/docs/admin/resourcequota/limits.yaml | 13 + v1.0/docs/admin/resourcequota/namespace.yaml | 4 + v1.0/docs/admin/resourcequota/quota.yaml | 14 + v1.0/docs/admin/salt.md | 3 + v1.0/docs/admin/service-accounts-admin.md | 3 + v1.0/docs/admin/static-pods.md | 3 + v1.0/docs/api.md | 3 + v1.0/docs/devel/api-conventions.md | 3 + v1.0/docs/devel/api_changes.md | 3 + v1.0/docs/devel/automation.md | 3 + v1.0/docs/devel/cherry-picks.md | 3 + v1.0/docs/devel/cli-roadmap.md | 3 + v1.0/docs/devel/client-libraries.md | 3 + v1.0/docs/devel/coding-conventions.md | 3 + v1.0/docs/devel/collab.md | 3 + v1.0/docs/devel/developer-guides/vagrant.md | 3 + v1.0/docs/devel/development.md | 3 + v1.0/docs/devel/e2e-tests.md | 3 + v1.0/docs/devel/faster_reviews.md | 3 + v1.0/docs/devel/flaky-tests.md | 3 + v1.0/docs/devel/getting-builds.md | 3 + v1.0/docs/devel/index.md | 3 + v1.0/docs/devel/instrumentation.md | 3 + v1.0/docs/devel/issues.md | 3 + v1.0/docs/devel/kubectl-conventions.md | 3 + v1.0/docs/devel/logging.md | 3 + v1.0/docs/devel/making-release-notes.md | 3 + v1.0/docs/devel/profiling.md | 3 + v1.0/docs/devel/pull-requests.md | 3 + v1.0/docs/devel/releasing.md | 3 + v1.0/docs/devel/scheduler.md | 3 + v1.0/docs/devel/scheduler_algorithm.md | 3 + .../devel/writing-a-getting-started-guide.md | 3 + v1.0/docs/getting-started-guides/aws.md | 3 + v1.0/docs/getting-started-guides/azure.md | 3 + .../getting-started-guides/binary_release.md | 3 + .../centos/centos_manual_config.md | 3 + .../docs/getting-started-guides/cloudstack.md | 3 + v1.0/docs/getting-started-guides/coreos.md | 3 + .../coreos/azure/.gitignore | 1 + .../coreos/azure/addons/skydns-rc.yaml | 92 ++ .../coreos/azure/addons/skydns-svc.yaml | 20 + .../coreos/azure/azure-login.js | 3 + .../kubernetes-cluster-etcd-node-template.yml | 19 + ...kubernetes-cluster-main-nodes-template.yml | 339 +++++++ .../coreos/azure/create-kubernetes-cluster.js | 15 + .../coreos/azure/destroy-cluster.js | 7 + .../coreos/azure/expose_guestbook_app_port.sh | 29 + .../coreos/azure/index.md | 3 + .../coreos/azure/lib/azure_wrapper.js | 271 ++++++ .../coreos/azure/lib/cloud_config.js | 58 ++ .../azure/lib/deployment_logic/kubernetes.js | 77 ++ .../coreos/azure/lib/util.js | 33 + .../coreos/azure/package.json | 19 + .../coreos/azure/scale-kubernetes-cluster.js | 10 + .../coreos/bare_metal_calico.md | 3 + .../coreos/bare_metal_offline.md | 3 + .../coreos/cloud-configs/master.yaml | 140 +++ .../coreos/cloud-configs/node.yaml | 98 +++ .../coreos/coreos_multinode_cluster.md | 3 + v1.0/docs/getting-started-guides/dcos.md | 3 + .../docker-multinode.md | 3 + .../docker-multinode/deployDNS.md | 3 + .../docker-multinode/master.md | 3 + .../docker-multinode/master.sh | 176 ++++ .../docker-multinode/skydns-rc.yaml.in | 92 ++ .../docker-multinode/skydns-svc.yaml.in | 20 + .../docker-multinode/testing.md | 3 + .../docker-multinode/worker.md | 3 + .../docker-multinode/worker.sh | 174 ++++ v1.0/docs/getting-started-guides/docker.md | 3 + .../fedora/fedora-calico.md | 3 + .../fedora/fedora_ansible_config.md | 3 + .../fedora/fedora_manual_config.md | 3 + .../fedora/flannel_multi_node_cluster.md | 3 + v1.0/docs/getting-started-guides/gce.md | 3 + v1.0/docs/getting-started-guides/index.md | 3 + v1.0/docs/getting-started-guides/juju.md | 3 + .../getting-started-guides/libvirt-coreos.md | 3 + v1.0/docs/getting-started-guides/locally.md | 3 + .../logging-elasticsearch.md | 3 + v1.0/docs/getting-started-guides/logging.md | 3 + v1.0/docs/getting-started-guides/meanstack.md | 3 + .../getting-started-guides/mesos-docker.md | 3 + v1.0/docs/getting-started-guides/mesos.md | 3 + v1.0/docs/getting-started-guides/ovirt.md | 3 + v1.0/docs/getting-started-guides/rackspace.md | 3 + v1.0/docs/getting-started-guides/rkt/index.md | 3 + v1.0/docs/getting-started-guides/scratch.md | 3 + .../getting-started-guides/ubuntu-calico.md | 3 + v1.0/docs/getting-started-guides/ubuntu.md | 3 + v1.0/docs/getting-started-guides/vagrant.md | 3 + v1.0/docs/getting-started-guides/vsphere.md | 3 + v1.0/docs/index.md | 3 + v1.0/docs/reporting-security-issues.md | 3 + v1.0/docs/roadmap.md | 3 + v1.0/docs/troubleshooting.md | 3 + v1.0/docs/user-guide/accessing-the-cluster.md | 3 + v1.0/docs/user-guide/annotations.md | 3 + .../user-guide/application-troubleshooting.md | 3 + v1.0/docs/user-guide/compute-resources.md | 3 + v1.0/docs/user-guide/config-best-practices.md | 3 + .../docs/user-guide/configuring-containers.md | 3 + .../user-guide/connecting-applications.md | 3 + ...connecting-to-applications-port-forward.md | 3 + .../connecting-to-applications-proxy.md | 3 + v1.0/docs/user-guide/container-environment.md | 3 + v1.0/docs/user-guide/containers.md | 3 + v1.0/docs/user-guide/debugging-services.md | 3 + .../docs/user-guide/deploying-applications.md | 3 + v1.0/docs/user-guide/deployments.md | 3 + v1.0/docs/user-guide/docker-cli-to-kubectl.md | 3 + v1.0/docs/user-guide/downward-api.md | 3 + .../user-guide/downward-api/dapi-pod.yaml | 23 + v1.0/docs/user-guide/downward-api/index.md | 3 + .../downward-api/volume/dapi-volume.yaml | 30 + .../user-guide/downward-api/volume/index.md | 3 + .../environment-guide/backend-rc.yaml | 30 + .../environment-guide/backend-srv.yaml | 13 + .../containers/backend/Dockerfile | 2 + .../containers/backend/backend.go | 37 + .../environment-guide/containers/index.md | 3 + .../containers/show/Dockerfile | 2 + .../environment-guide/containers/show/show.go | 95 ++ .../user-guide/environment-guide/index.md | 3 + .../user-guide/environment-guide/show-rc.yaml | 32 + .../environment-guide/show-srv.yaml | 15 + .../user-guide/getting-into-containers.md | 3 + .../user-guide/horizontal-pod-autoscaler.md | 3 + .../hpa-php-apache.yaml | 14 + .../image/Dockerfile | 5 + .../image/index.php | 7 + .../horizontal-pod-autoscaling/index.md | 3 + v1.0/docs/user-guide/identifiers.md | 3 + v1.0/docs/user-guide/images.md | 3 + v1.0/docs/user-guide/index.md | 3 + v1.0/docs/user-guide/ingress.md | 3 + v1.0/docs/user-guide/ingress.yaml | 9 + .../user-guide/introspection-and-debugging.md | 3 + v1.0/docs/user-guide/job.yaml | 20 + v1.0/docs/user-guide/jobs.md | 3 + v1.0/docs/user-guide/jsonpath.md | 3 + v1.0/docs/user-guide/known-issues.md | 3 + v1.0/docs/user-guide/kubeconfig-file.md | 3 + v1.0/docs/user-guide/kubectl-overview.md | 3 + v1.0/docs/user-guide/labels.md | 3 + .../user-guide/liveness/exec-liveness.yaml | 21 + .../user-guide/liveness/http-liveness.yaml | 18 + .../docs/user-guide/liveness/image/Dockerfile | 4 + v1.0/docs/user-guide/liveness/image/Makefile | 13 + v1.0/docs/user-guide/liveness/image/server.go | 46 + v1.0/docs/user-guide/liveness/index.md | 3 + v1.0/docs/user-guide/logging-demo/Makefile | 26 + v1.0/docs/user-guide/logging-demo/index.md | 3 + .../logging-demo/synthetic_0_25lps.yaml | 30 + .../logging-demo/synthetic_10lps.yaml | 30 + v1.0/docs/user-guide/logging.md | 3 + v1.0/docs/user-guide/managing-deployments.md | 3 + v1.0/docs/user-guide/monitoring.md | 3 + v1.0/docs/user-guide/multi-pod.yaml | 49 ++ v1.0/docs/user-guide/namespaces.md | 3 + .../docs/user-guide/new-nginx-deployment.yaml | 16 + v1.0/docs/user-guide/nginx-deployment.yaml | 16 + v1.0/docs/user-guide/node-selection/index.md | 3 + v1.0/docs/user-guide/node-selection/pod.yaml | 13 + v1.0/docs/user-guide/overview.md | 3 + v1.0/docs/user-guide/persistent-volumes.md | 3 + .../persistent-volumes/claims/claim-01.yaml | 10 + .../persistent-volumes/claims/claim-02.yaml | 10 + .../persistent-volumes/claims/claim-03.json | 17 + .../user-guide/persistent-volumes/index.md | 3 + .../simpletest/namespace.json | 10 + .../persistent-volumes/simpletest/pod.yaml | 20 + .../simpletest/service.json | 19 + .../persistent-volumes/volumes/gce.yaml | 13 + .../persistent-volumes/volumes/local-01.yaml | 13 + .../persistent-volumes/volumes/local-02.yaml | 14 + .../persistent-volumes/volumes/nfs.yaml | 12 + v1.0/docs/user-guide/pod-states.md | 3 + v1.0/docs/user-guide/pod.yaml | 12 + v1.0/docs/user-guide/pods.md | 3 + v1.0/docs/user-guide/prereqs.md | 3 + v1.0/docs/user-guide/production-pods.md | 3 + v1.0/docs/user-guide/quick-start.md | 3 + .../docs/user-guide/replication-controller.md | 3 + v1.0/docs/user-guide/replication.yaml | 19 + v1.0/docs/user-guide/resourcequota/index.md | 3 + v1.0/docs/user-guide/secrets.md | 3 + v1.0/docs/user-guide/secrets/index.md | 3 + v1.0/docs/user-guide/secrets/secret-pod.yaml | 18 + v1.0/docs/user-guide/secrets/secret.yaml | 7 + v1.0/docs/user-guide/security-context.md | 3 + v1.0/docs/user-guide/service-accounts.md | 3 + v1.0/docs/user-guide/services-firewalls.md | 3 + v1.0/docs/user-guide/services.md | 3 + v1.0/docs/user-guide/sharing-clusters.md | 3 + v1.0/docs/user-guide/simple-nginx.md | 3 + v1.0/docs/user-guide/simple-yaml.md | 3 + v1.0/docs/user-guide/ui.md | 3 + .../user-guide/update-demo/build-images.sh | 30 + .../update-demo/images/kitten/Dockerfile | 17 + .../update-demo/images/kitten/html/data.json | 3 + .../update-demo/images/kitten/html/kitten.jpg | Bin 0 -> 14769 bytes .../update-demo/images/nautilus/Dockerfile | 17 + .../images/nautilus/html/data.json | 3 + .../images/nautilus/html/nautilus.jpg | Bin 0 -> 21231 bytes v1.0/docs/user-guide/update-demo/index.md | 3 + .../user-guide/update-demo/kitten-rc.yaml | 20 + .../update-demo/local/LICENSE.angular | 21 + .../update-demo/local/angular.min.js | 210 +++++ .../update-demo/local/angular.min.js.map | 8 + .../user-guide/update-demo/local/index.html | 36 + .../user-guide/update-demo/local/script.js | 100 +++ .../user-guide/update-demo/local/style.css | 40 + .../user-guide/update-demo/nautilus-rc.yaml | 21 + v1.0/docs/user-guide/volumes.md | 3 + v1.0/docs/user-guide/walkthrough/index.md | 3 + v1.0/docs/user-guide/walkthrough/k8s201.md | 3 + .../walkthrough/pod-nginx-with-label.yaml | 12 + .../user-guide/walkthrough/pod-nginx.yaml | 10 + .../user-guide/walkthrough/pod-redis.yaml | 14 + .../pod-with-http-healthcheck.yaml | 20 + .../user-guide/walkthrough/podtemplate.json | 22 + .../walkthrough/replication-controller.yaml | 24 + v1.0/docs/user-guide/walkthrough/service.yaml | 16 + .../docs/user-guide/working-with-resources.md | 3 + v1.0/docs/whatisk8s.md | 3 + v1.0/editdocs.md | 3 + v1.0/index.md | 3 + v1.0/pagelist.md | 3 + v1.0/reference.md | 3 + v1.0/samples.md | 3 + v1.1/docs/admin/accessing-the-api.md | 74 +- v1.1/docs/admin/admission-controllers.md | 132 +-- v1.1/docs/admin/authentication.md | 127 +-- v1.1/docs/admin/authorization.md | 130 +-- v1.1/docs/admin/cluster-components.md | 117 +-- v1.1/docs/admin/cluster-large.md | 72 +- v1.1/docs/admin/cluster-management.md | 186 +--- v1.1/docs/admin/cluster-troubleshooting.md | 111 +-- v1.1/docs/admin/daemons.md | 168 +--- v1.1/docs/admin/dns.md | 38 +- v1.1/docs/admin/etcd.md | 45 +- v1.1/docs/admin/garbage-collection.md | 60 +- v1.1/docs/admin/high-availability.md | 230 +---- v1.1/docs/admin/index.md | 77 +- v1.1/docs/admin/limitrange/index.md | 197 +---- v1.1/docs/admin/multi-cluster.md | 65 +- v1.1/docs/admin/namespaces.md | 144 +-- v1.1/docs/admin/namespaces/index.md | 249 +----- v1.1/docs/admin/networking.md | 183 +--- v1.1/docs/admin/node.md | 211 +---- v1.1/docs/admin/ovs-networking.md | 17 +- v1.1/docs/admin/resource-quota.md | 153 +--- v1.1/docs/admin/resourcequota/index.md | 155 +--- v1.1/docs/admin/salt.md | 105 +-- v1.1/docs/admin/service-accounts-admin.md | 96 +- v1.1/docs/admin/static-pods.md | 126 +-- v1.1/docs/api.md | 128 +-- v1.1/docs/devel/api-conventions.md | 662 +------------- v1.1/docs/devel/api_changes.md | 585 +----------- v1.1/docs/devel/automation.md | 102 +-- v1.1/docs/devel/cherry-picks.md | 32 +- v1.1/docs/devel/cli-roadmap.md | 9 +- v1.1/docs/devel/client-libraries.md | 21 +- v1.1/docs/devel/coding-conventions.md | 57 +- v1.1/docs/devel/collab.md | 40 +- v1.1/docs/devel/developer-guides/vagrant.md | 344 +------- v1.1/docs/devel/development.md | 316 +------ v1.1/docs/devel/e2e-tests.md | 111 +-- v1.1/docs/devel/faster_reviews.md | 201 +---- v1.1/docs/devel/flaky-tests.md | 65 +- v1.1/docs/devel/getting-builds.md | 32 +- v1.1/docs/devel/index.md | 77 +- v1.1/docs/devel/instrumentation.md | 35 +- v1.1/docs/devel/issues.md | 17 +- v1.1/docs/devel/kubectl-conventions.md | 97 +- v1.1/docs/devel/logging.md | 28 +- v1.1/docs/devel/making-release-notes.md | 42 +- v1.1/docs/devel/profiling.md | 44 +- v1.1/docs/devel/pull-requests.md | 35 +- v1.1/docs/devel/releasing.md | 308 +------ v1.1/docs/devel/scheduler.md | 49 +- v1.1/docs/devel/scheduler_algorithm.md | 37 +- .../devel/writing-a-getting-started-guide.md | 98 +-- v1.1/docs/getting-started-guides/aws.md | 106 +-- v1.1/docs/getting-started-guides/azure.md | 9 +- .../getting-started-guides/binary_release.md | 26 +- .../centos/centos_manual_config.md | 174 +--- .../docs/getting-started-guides/cloudstack.md | 79 +- v1.1/docs/getting-started-guides/coreos.md | 70 +- .../coreos/azure/index.md | 230 +---- .../coreos/bare_metal_calico.md | 123 +-- .../coreos/bare_metal_offline.md | 693 +-------------- .../coreos/coreos_multinode_cluster.md | 198 +---- v1.1/docs/getting-started-guides/dcos.md | 133 +-- .../docker-multinode.md | 87 +- .../docker-multinode/deployDNS.md | 50 +- .../docker-multinode/master.md | 178 +--- .../docker-multinode/testing.md | 67 +- .../docker-multinode/worker.md | 138 +-- v1.1/docs/getting-started-guides/docker.md | 147 +--- .../fedora/fedora-calico.md | 309 +------ .../fedora/fedora_ansible_config.md | 229 +---- .../fedora/fedora_manual_config.md | 218 +---- .../fedora/flannel_multi_node_cluster.md | 176 +--- v1.1/docs/getting-started-guides/gce.md | 218 +---- v1.1/docs/getting-started-guides/index.md | 176 +--- v1.1/docs/getting-started-guides/juju.md | 253 +----- .../getting-started-guides/libvirt-coreos.md | 296 +------ v1.1/docs/getting-started-guides/locally.md | 119 +-- .../logging-elasticsearch.md | 242 +---- v1.1/docs/getting-started-guides/logging.md | 217 +---- v1.1/docs/getting-started-guides/meanstack.md | 443 +--------- .../getting-started-guides/mesos-docker.md | 261 +----- v1.1/docs/getting-started-guides/mesos.md | 338 +------ v1.1/docs/getting-started-guides/ovirt.md | 49 +- v1.1/docs/getting-started-guides/rackspace.md | 62 +- v1.1/docs/getting-started-guides/rkt/index.md | 137 +-- v1.1/docs/getting-started-guides/scratch.md | 831 +----------------- .../getting-started-guides/ubuntu-calico.md | 272 +----- v1.1/docs/getting-started-guides/ubuntu.md | 260 +----- v1.1/docs/getting-started-guides/vagrant.md | 339 +------ v1.1/docs/getting-started-guides/vsphere.md | 87 +- v1.1/docs/index.md | 30 +- v1.1/docs/reporting-security-issues.md | 23 +- v1.1/docs/roadmap.md | 47 +- v1.1/docs/troubleshooting.md | 57 +- v1.1/docs/user-guide/accessing-the-cluster.md | 274 +----- v1.1/docs/user-guide/annotations.md | 26 +- .../user-guide/application-troubleshooting.md | 190 +--- v1.1/docs/user-guide/compute-resources.md | 248 +----- v1.1/docs/user-guide/config-best-practices.md | 24 +- .../docs/user-guide/configuring-containers.md | 163 +--- .../user-guide/connecting-applications.md | 385 +------- ...connecting-to-applications-port-forward.md | 49 +- .../connecting-to-applications-proxy.md | 31 +- v1.1/docs/user-guide/container-environment.md | 79 +- v1.1/docs/user-guide/containers.md | 92 +- v1.1/docs/user-guide/debugging-services.md | 502 +---------- .../docs/user-guide/deploying-applications.md | 113 +-- v1.1/docs/user-guide/deployments.md | 350 +------- v1.1/docs/user-guide/docker-cli-to-kubectl.md | 265 +----- v1.1/docs/user-guide/downward-api.md | 153 +--- v1.1/docs/user-guide/downward-api/index.md | 34 +- .../user-guide/downward-api/volume/index.md | 66 +- .../environment-guide/containers/index.md | 26 +- .../user-guide/environment-guide/index.md | 96 +- .../user-guide/getting-into-containers.md | 73 +- .../user-guide/horizontal-pod-autoscaler.md | 77 +- .../horizontal-pod-autoscaling/index.md | 183 +--- v1.1/docs/user-guide/identifiers.md | 13 +- v1.1/docs/user-guide/images.md | 236 +---- v1.1/docs/user-guide/index.md | 89 +- v1.1/docs/user-guide/ingress.md | 264 +----- .../user-guide/introspection-and-debugging.md | 315 +------ v1.1/docs/user-guide/jobs.md | 209 +---- v1.1/docs/user-guide/jsonpath.md | 62 +- v1.1/docs/user-guide/known-issues.md | 24 +- v1.1/docs/user-guide/kubeconfig-file.md | 211 +---- v1.1/docs/user-guide/kubectl-overview.md | 269 +----- v1.1/docs/user-guide/labels.md | 162 +--- v1.1/docs/user-guide/liveness/index.md | 81 +- v1.1/docs/user-guide/logging-demo/index.md | 17 +- v1.1/docs/user-guide/logging.md | 96 +- v1.1/docs/user-guide/managing-deployments.md | 408 +-------- v1.1/docs/user-guide/monitoring.md | 59 +- v1.1/docs/user-guide/namespaces.md | 88 +- v1.1/docs/user-guide/node-selection/index.md | 60 +- v1.1/docs/user-guide/overview.md | 24 +- v1.1/docs/user-guide/persistent-volumes.md | 170 +--- .../user-guide/persistent-volumes/index.md | 92 +- v1.1/docs/user-guide/pod-states.md | 124 +-- v1.1/docs/user-guide/pods.md | 123 +-- v1.1/docs/user-guide/prereqs.md | 55 +- v1.1/docs/user-guide/production-pods.md | 342 +------ v1.1/docs/user-guide/quick-start.md | 65 +- .../docs/user-guide/replication-controller.md | 77 +- v1.1/docs/user-guide/resourcequota/index.md | 5 +- v1.1/docs/user-guide/secrets.md | 490 +---------- v1.1/docs/user-guide/secrets/index.md | 58 +- v1.1/docs/user-guide/security-context.md | 3 +- v1.1/docs/user-guide/service-accounts.md | 191 +--- v1.1/docs/user-guide/services-firewalls.md | 50 +- v1.1/docs/user-guide/services.md | 530 +---------- v1.1/docs/user-guide/sharing-clusters.md | 118 +-- v1.1/docs/user-guide/simple-nginx.md | 55 +- v1.1/docs/user-guide/simple-yaml.md | 93 +- v1.1/docs/user-guide/ui.md | 62 +- v1.1/docs/user-guide/update-demo/index.md | 129 +-- v1.1/docs/user-guide/volumes.md | 377 +------- v1.1/docs/user-guide/walkthrough/index.md | 181 +--- v1.1/docs/user-guide/walkthrough/k8s201.md | 280 +----- .../docs/user-guide/working-with-resources.md | 53 +- v1.1/docs/whatisk8s.md | 96 +- v1.1/editdocs.md | 116 +-- v1.1/index.md | 116 +-- v1.1/pagelist.md | 23 +- v1.1/reference.md | 4 +- v1.1/samples.md | 4 +- v1.2/docs/admin/accessing-the-api.md | 3 + v1.2/docs/admin/admission-controllers.md | 3 + v1.2/docs/admin/authentication.md | 3 + v1.2/docs/admin/authorization.md | 3 + v1.2/docs/admin/cluster-components.md | 3 + v1.2/docs/admin/cluster-large.md | 3 + v1.2/docs/admin/cluster-management.md | 3 + v1.2/docs/admin/cluster-troubleshooting.md | 3 + v1.2/docs/admin/daemon.yaml | 18 + v1.2/docs/admin/daemons.md | 3 + v1.2/docs/admin/dns.md | 3 + v1.2/docs/admin/etcd.md | 3 + v1.2/docs/admin/garbage-collection.md | 3 + v1.2/docs/admin/high-availability.md | 3 + .../admin/high-availability/default-kubelet | 8 + v1.2/docs/admin/high-availability/etcd.yaml | 87 ++ .../high-availability/kube-apiserver.yaml | 90 ++ .../kube-controller-manager.yaml | 82 ++ .../high-availability/kube-scheduler.yaml | 30 + .../docs/admin/high-availability/monit-docker | 9 + .../admin/high-availability/monit-kubelet | 11 + .../admin/high-availability/podmaster.yaml | 43 + v1.2/docs/admin/index.md | 3 + v1.2/docs/admin/limitrange/index.md | 3 + v1.2/docs/admin/limitrange/invalid-pod.yaml | 12 + v1.2/docs/admin/limitrange/limits.yaml | 26 + v1.2/docs/admin/limitrange/namespace.yaml | 4 + v1.2/docs/admin/limitrange/valid-pod.yaml | 14 + v1.2/docs/admin/multi-cluster.md | 3 + v1.2/docs/admin/namespaces.md | 3 + v1.2/docs/admin/namespaces/index.md | 3 + v1.2/docs/admin/namespaces/namespace-dev.json | 10 + .../docs/admin/namespaces/namespace-prod.json | 10 + v1.2/docs/admin/networking.md | 3 + v1.2/docs/admin/node.md | 3 + v1.2/docs/admin/ovs-networking.md | 3 + v1.2/docs/admin/resource-quota.md | 3 + v1.2/docs/admin/resourcequota/index.md | 3 + v1.2/docs/admin/resourcequota/limits.yaml | 13 + v1.2/docs/admin/resourcequota/namespace.yaml | 4 + v1.2/docs/admin/resourcequota/quota.yaml | 14 + v1.2/docs/admin/salt.md | 3 + v1.2/docs/admin/service-accounts-admin.md | 3 + v1.2/docs/admin/static-pods.md | 3 + v1.2/docs/api.md | 3 + v1.2/docs/devel/api-conventions.md | 3 + v1.2/docs/devel/api_changes.md | 3 + v1.2/docs/devel/automation.md | 3 + v1.2/docs/devel/cherry-picks.md | 3 + v1.2/docs/devel/cli-roadmap.md | 3 + v1.2/docs/devel/client-libraries.md | 3 + v1.2/docs/devel/coding-conventions.md | 3 + v1.2/docs/devel/collab.md | 3 + v1.2/docs/devel/developer-guides/vagrant.md | 3 + v1.2/docs/devel/development.md | 3 + v1.2/docs/devel/e2e-tests.md | 3 + v1.2/docs/devel/faster_reviews.md | 3 + v1.2/docs/devel/flaky-tests.md | 3 + v1.2/docs/devel/getting-builds.md | 3 + v1.2/docs/devel/index.md | 3 + v1.2/docs/devel/instrumentation.md | 3 + v1.2/docs/devel/issues.md | 3 + v1.2/docs/devel/kubectl-conventions.md | 3 + v1.2/docs/devel/logging.md | 3 + v1.2/docs/devel/making-release-notes.md | 3 + v1.2/docs/devel/profiling.md | 3 + v1.2/docs/devel/pull-requests.md | 3 + v1.2/docs/devel/releasing.md | 3 + v1.2/docs/devel/scheduler.md | 3 + v1.2/docs/devel/scheduler_algorithm.md | 3 + .../devel/writing-a-getting-started-guide.md | 3 + v1.2/docs/getting-started-guides/aws.md | 3 + v1.2/docs/getting-started-guides/azure.md | 3 + .../getting-started-guides/binary_release.md | 3 + .../centos/centos_manual_config.md | 3 + .../docs/getting-started-guides/cloudstack.md | 3 + v1.2/docs/getting-started-guides/coreos.md | 3 + .../coreos/azure/.gitignore | 1 + .../coreos/azure/addons/skydns-rc.yaml | 92 ++ .../coreos/azure/addons/skydns-svc.yaml | 20 + .../coreos/azure/azure-login.js | 3 + .../kubernetes-cluster-etcd-node-template.yml | 19 + ...kubernetes-cluster-main-nodes-template.yml | 339 +++++++ .../coreos/azure/create-kubernetes-cluster.js | 15 + .../coreos/azure/destroy-cluster.js | 7 + .../coreos/azure/expose_guestbook_app_port.sh | 29 + .../coreos/azure/index.md | 3 + .../coreos/azure/lib/azure_wrapper.js | 271 ++++++ .../coreos/azure/lib/cloud_config.js | 58 ++ .../azure/lib/deployment_logic/kubernetes.js | 77 ++ .../coreos/azure/lib/util.js | 33 + .../coreos/azure/package.json | 19 + .../coreos/azure/scale-kubernetes-cluster.js | 10 + .../coreos/bare_metal_calico.md | 3 + .../coreos/bare_metal_offline.md | 3 + .../coreos/cloud-configs/master.yaml | 140 +++ .../coreos/cloud-configs/node.yaml | 98 +++ .../coreos/coreos_multinode_cluster.md | 3 + v1.2/docs/getting-started-guides/dcos.md | 3 + .../docker-multinode.md | 3 + .../docker-multinode/deployDNS.md | 3 + .../docker-multinode/master.md | 3 + .../docker-multinode/master.sh | 176 ++++ .../docker-multinode/skydns-rc.yaml.in | 92 ++ .../docker-multinode/skydns-svc.yaml.in | 20 + .../docker-multinode/testing.md | 3 + .../docker-multinode/worker.md | 3 + .../docker-multinode/worker.sh | 174 ++++ v1.2/docs/getting-started-guides/docker.md | 3 + .../fedora/fedora-calico.md | 3 + .../fedora/fedora_ansible_config.md | 3 + .../fedora/fedora_manual_config.md | 3 + .../fedora/flannel_multi_node_cluster.md | 3 + v1.2/docs/getting-started-guides/gce.md | 3 + v1.2/docs/getting-started-guides/index.md | 3 + v1.2/docs/getting-started-guides/juju.md | 3 + .../getting-started-guides/libvirt-coreos.md | 3 + v1.2/docs/getting-started-guides/locally.md | 3 + .../logging-elasticsearch.md | 3 + v1.2/docs/getting-started-guides/logging.md | 3 + v1.2/docs/getting-started-guides/meanstack.md | 3 + .../getting-started-guides/mesos-docker.md | 3 + v1.2/docs/getting-started-guides/mesos.md | 3 + v1.2/docs/getting-started-guides/ovirt.md | 3 + v1.2/docs/getting-started-guides/rackspace.md | 3 + v1.2/docs/getting-started-guides/rkt/index.md | 3 + v1.2/docs/getting-started-guides/scratch.md | 3 + .../getting-started-guides/ubuntu-calico.md | 3 + v1.2/docs/getting-started-guides/ubuntu.md | 3 + v1.2/docs/getting-started-guides/vagrant.md | 3 + v1.2/docs/getting-started-guides/vsphere.md | 3 + v1.2/docs/index.md | 3 + v1.2/docs/reporting-security-issues.md | 3 + v1.2/docs/roadmap.md | 3 + v1.2/docs/troubleshooting.md | 3 + v1.2/docs/user-guide/accessing-the-cluster.md | 3 + v1.2/docs/user-guide/annotations.md | 3 + .../user-guide/application-troubleshooting.md | 3 + v1.2/docs/user-guide/compute-resources.md | 3 + v1.2/docs/user-guide/config-best-practices.md | 3 + .../docs/user-guide/configuring-containers.md | 3 + .../user-guide/connecting-applications.md | 3 + ...connecting-to-applications-port-forward.md | 3 + .../connecting-to-applications-proxy.md | 3 + v1.2/docs/user-guide/container-environment.md | 3 + v1.2/docs/user-guide/containers.md | 3 + v1.2/docs/user-guide/debugging-services.md | 3 + .../docs/user-guide/deploying-applications.md | 3 + v1.2/docs/user-guide/deployments.md | 3 + v1.2/docs/user-guide/docker-cli-to-kubectl.md | 3 + v1.2/docs/user-guide/downward-api.md | 3 + .../user-guide/downward-api/dapi-pod.yaml | 23 + v1.2/docs/user-guide/downward-api/index.md | 3 + .../downward-api/volume/dapi-volume.yaml | 30 + .../user-guide/downward-api/volume/index.md | 3 + .../environment-guide/backend-rc.yaml | 30 + .../environment-guide/backend-srv.yaml | 13 + .../containers/backend/Dockerfile | 2 + .../containers/backend/backend.go | 37 + .../environment-guide/containers/index.md | 3 + .../containers/show/Dockerfile | 2 + .../environment-guide/containers/show/show.go | 95 ++ .../user-guide/environment-guide/index.md | 3 + .../user-guide/environment-guide/show-rc.yaml | 32 + .../environment-guide/show-srv.yaml | 15 + .../user-guide/getting-into-containers.md | 3 + .../user-guide/horizontal-pod-autoscaler.md | 3 + .../hpa-php-apache.yaml | 14 + .../image/Dockerfile | 5 + .../image/index.php | 7 + .../horizontal-pod-autoscaling/index.md | 3 + v1.2/docs/user-guide/identifiers.md | 3 + v1.2/docs/user-guide/images.md | 3 + v1.2/docs/user-guide/index.md | 3 + v1.2/docs/user-guide/ingress.md | 3 + v1.2/docs/user-guide/ingress.yaml | 9 + .../user-guide/introspection-and-debugging.md | 3 + v1.2/docs/user-guide/job.yaml | 20 + v1.2/docs/user-guide/jobs.md | 3 + v1.2/docs/user-guide/jsonpath.md | 3 + v1.2/docs/user-guide/known-issues.md | 3 + v1.2/docs/user-guide/kubeconfig-file.md | 3 + v1.2/docs/user-guide/kubectl-overview.md | 3 + v1.2/docs/user-guide/labels.md | 3 + .../user-guide/liveness/exec-liveness.yaml | 21 + .../user-guide/liveness/http-liveness.yaml | 18 + .../docs/user-guide/liveness/image/Dockerfile | 4 + v1.2/docs/user-guide/liveness/image/Makefile | 13 + v1.2/docs/user-guide/liveness/image/server.go | 46 + v1.2/docs/user-guide/liveness/index.md | 3 + v1.2/docs/user-guide/logging-demo/Makefile | 26 + v1.2/docs/user-guide/logging-demo/index.md | 3 + .../logging-demo/synthetic_0_25lps.yaml | 30 + .../logging-demo/synthetic_10lps.yaml | 30 + v1.2/docs/user-guide/logging.md | 3 + v1.2/docs/user-guide/managing-deployments.md | 3 + v1.2/docs/user-guide/monitoring.md | 3 + v1.2/docs/user-guide/multi-pod.yaml | 49 ++ v1.2/docs/user-guide/namespaces.md | 3 + .../docs/user-guide/new-nginx-deployment.yaml | 16 + v1.2/docs/user-guide/nginx-deployment.yaml | 16 + v1.2/docs/user-guide/node-selection/index.md | 3 + v1.2/docs/user-guide/node-selection/pod.yaml | 13 + v1.2/docs/user-guide/overview.md | 3 + v1.2/docs/user-guide/persistent-volumes.md | 3 + .../persistent-volumes/claims/claim-01.yaml | 10 + .../persistent-volumes/claims/claim-02.yaml | 10 + .../persistent-volumes/claims/claim-03.json | 17 + .../user-guide/persistent-volumes/index.md | 3 + .../simpletest/namespace.json | 10 + .../persistent-volumes/simpletest/pod.yaml | 20 + .../simpletest/service.json | 19 + .../persistent-volumes/volumes/gce.yaml | 13 + .../persistent-volumes/volumes/local-01.yaml | 13 + .../persistent-volumes/volumes/local-02.yaml | 14 + .../persistent-volumes/volumes/nfs.yaml | 12 + v1.2/docs/user-guide/pod-states.md | 3 + v1.2/docs/user-guide/pod.yaml | 12 + v1.2/docs/user-guide/pods.md | 3 + v1.2/docs/user-guide/prereqs.md | 3 + v1.2/docs/user-guide/production-pods.md | 3 + v1.2/docs/user-guide/quick-start.md | 3 + .../docs/user-guide/replication-controller.md | 3 + v1.2/docs/user-guide/replication.yaml | 19 + v1.2/docs/user-guide/resourcequota/index.md | 3 + v1.2/docs/user-guide/secrets.md | 3 + v1.2/docs/user-guide/secrets/index.md | 3 + v1.2/docs/user-guide/secrets/secret-pod.yaml | 18 + v1.2/docs/user-guide/secrets/secret.yaml | 7 + v1.2/docs/user-guide/security-context.md | 3 + v1.2/docs/user-guide/service-accounts.md | 3 + v1.2/docs/user-guide/services-firewalls.md | 3 + v1.2/docs/user-guide/services.md | 3 + v1.2/docs/user-guide/sharing-clusters.md | 3 + v1.2/docs/user-guide/simple-nginx.md | 3 + v1.2/docs/user-guide/simple-yaml.md | 3 + v1.2/docs/user-guide/ui.md | 3 + .../user-guide/update-demo/build-images.sh | 30 + .../update-demo/images/kitten/Dockerfile | 17 + .../update-demo/images/kitten/html/data.json | 3 + .../update-demo/images/kitten/html/kitten.jpg | Bin 0 -> 14769 bytes .../update-demo/images/nautilus/Dockerfile | 17 + .../images/nautilus/html/data.json | 3 + .../images/nautilus/html/nautilus.jpg | Bin 0 -> 21231 bytes v1.2/docs/user-guide/update-demo/index.md | 3 + .../user-guide/update-demo/kitten-rc.yaml | 20 + .../update-demo/local/LICENSE.angular | 21 + .../update-demo/local/angular.min.js | 210 +++++ .../update-demo/local/angular.min.js.map | 8 + .../user-guide/update-demo/local/index.html | 36 + .../user-guide/update-demo/local/script.js | 100 +++ .../user-guide/update-demo/local/style.css | 40 + .../user-guide/update-demo/nautilus-rc.yaml | 21 + v1.2/docs/user-guide/volumes.md | 3 + v1.2/docs/user-guide/walkthrough/index.md | 3 + v1.2/docs/user-guide/walkthrough/k8s201.md | 3 + .../walkthrough/pod-nginx-with-label.yaml | 12 + .../user-guide/walkthrough/pod-nginx.yaml | 10 + .../user-guide/walkthrough/pod-redis.yaml | 14 + .../pod-with-http-healthcheck.yaml | 20 + .../user-guide/walkthrough/podtemplate.json | 22 + .../walkthrough/replication-controller.yaml | 24 + v1.2/docs/user-guide/walkthrough/service.yaml | 16 + .../docs/user-guide/working-with-resources.md | 3 + v1.2/docs/whatisk8s.md | 3 + v1.2/editdocs.md | 3 + v1.2/index.md | 3 + v1.2/pagelist.md | 3 + v1.2/reference.md | 3 + v1.2/samples.md | 3 + 994 files changed, 38110 insertions(+), 25072 deletions(-) create mode 100644 _data/v1_0/globals.yml create mode 100644 _data/v1_0/guides.yml create mode 100644 _data/v1_0/overrides.yml create mode 100644 _data/v1_0/reference.yml create mode 100644 _data/v1_0/samples.yml create mode 100644 _data/v1_0/support.yml create mode 100644 _data/v1_2/globals.yml create mode 100644 _data/v1_2/guides.yml create mode 100644 _data/v1_2/overrides.yml create mode 100644 _data/v1_2/reference.yml create mode 100644 _data/v1_2/samples.yml create mode 100644 _data/v1_2/support.yml create mode 100644 _includes/docs/docs/admin/accessing-the-api.md create mode 100644 _includes/docs/docs/admin/admission-controllers.md create mode 100644 _includes/docs/docs/admin/authentication.md create mode 100644 _includes/docs/docs/admin/authorization.md create mode 100644 _includes/docs/docs/admin/cluster-components.md create mode 100644 _includes/docs/docs/admin/cluster-large.md create mode 100644 _includes/docs/docs/admin/cluster-management.md create mode 100644 _includes/docs/docs/admin/cluster-troubleshooting.md create mode 100644 _includes/docs/docs/admin/daemon.yaml create mode 100644 _includes/docs/docs/admin/daemons.md create mode 100644 _includes/docs/docs/admin/dns.md create mode 100644 _includes/docs/docs/admin/etcd.md create mode 100644 _includes/docs/docs/admin/garbage-collection.md create mode 100644 _includes/docs/docs/admin/high-availability.md create mode 100644 _includes/docs/docs/admin/high-availability/default-kubelet create mode 100644 _includes/docs/docs/admin/high-availability/etcd.yaml create mode 100644 _includes/docs/docs/admin/high-availability/kube-apiserver.yaml create mode 100644 _includes/docs/docs/admin/high-availability/kube-controller-manager.yaml create mode 100644 _includes/docs/docs/admin/high-availability/kube-scheduler.yaml create mode 100644 _includes/docs/docs/admin/high-availability/monit-docker create mode 100644 _includes/docs/docs/admin/high-availability/monit-kubelet create mode 100644 _includes/docs/docs/admin/high-availability/podmaster.yaml create mode 100644 _includes/docs/docs/admin/index.md create mode 100644 _includes/docs/docs/admin/limitrange/index.md create mode 100644 _includes/docs/docs/admin/limitrange/invalid-pod.yaml create mode 100644 _includes/docs/docs/admin/limitrange/limits.yaml create mode 100644 _includes/docs/docs/admin/limitrange/namespace.yaml create mode 100644 _includes/docs/docs/admin/limitrange/valid-pod.yaml create mode 100644 _includes/docs/docs/admin/multi-cluster.md create mode 100644 _includes/docs/docs/admin/namespaces.md create mode 100644 _includes/docs/docs/admin/namespaces/index.md create mode 100644 _includes/docs/docs/admin/namespaces/namespace-dev.json create mode 100644 _includes/docs/docs/admin/namespaces/namespace-prod.json create mode 100644 _includes/docs/docs/admin/networking.md create mode 100644 _includes/docs/docs/admin/node.md create mode 100644 _includes/docs/docs/admin/ovs-networking.md create mode 100755 _includes/docs/docs/admin/resource-quota.md create mode 100644 _includes/docs/docs/admin/resourcequota/index.md create mode 100755 _includes/docs/docs/admin/resourcequota/limits.yaml create mode 100644 _includes/docs/docs/admin/resourcequota/namespace.yaml create mode 100644 _includes/docs/docs/admin/resourcequota/quota.yaml create mode 100644 _includes/docs/docs/admin/salt.md create mode 100644 _includes/docs/docs/admin/service-accounts-admin.md create mode 100644 _includes/docs/docs/admin/static-pods.md create mode 100644 _includes/docs/docs/api.md create mode 100644 _includes/docs/docs/devel/api-conventions.md create mode 100644 _includes/docs/docs/devel/api_changes.md create mode 100644 _includes/docs/docs/devel/automation.md create mode 100644 _includes/docs/docs/devel/cherry-picks.md create mode 100644 _includes/docs/docs/devel/cli-roadmap.md create mode 100644 _includes/docs/docs/devel/client-libraries.md create mode 100644 _includes/docs/docs/devel/coding-conventions.md create mode 100644 _includes/docs/docs/devel/collab.md create mode 100644 _includes/docs/docs/devel/developer-guides/vagrant.md create mode 100644 _includes/docs/docs/devel/development.md create mode 100644 _includes/docs/docs/devel/e2e-tests.md create mode 100644 _includes/docs/docs/devel/faster_reviews.md create mode 100644 _includes/docs/docs/devel/flaky-tests.md create mode 100644 _includes/docs/docs/devel/getting-builds.md create mode 100644 _includes/docs/docs/devel/index.md create mode 100644 _includes/docs/docs/devel/instrumentation.md create mode 100644 _includes/docs/docs/devel/issues.md create mode 100644 _includes/docs/docs/devel/kubectl-conventions.md create mode 100644 _includes/docs/docs/devel/logging.md create mode 100644 _includes/docs/docs/devel/making-release-notes.md create mode 100644 _includes/docs/docs/devel/profiling.md create mode 100644 _includes/docs/docs/devel/pull-requests.md create mode 100644 _includes/docs/docs/devel/releasing.md create mode 100755 _includes/docs/docs/devel/scheduler.md create mode 100755 _includes/docs/docs/devel/scheduler_algorithm.md create mode 100644 _includes/docs/docs/devel/writing-a-getting-started-guide.md create mode 100644 _includes/docs/docs/getting-started-guides/aws.md create mode 100644 _includes/docs/docs/getting-started-guides/azure.md create mode 100644 _includes/docs/docs/getting-started-guides/binary_release.md create mode 100644 _includes/docs/docs/getting-started-guides/centos/centos_manual_config.md create mode 100644 _includes/docs/docs/getting-started-guides/cloudstack.md create mode 100644 _includes/docs/docs/getting-started-guides/coreos.md create mode 100644 _includes/docs/docs/getting-started-guides/coreos/azure/.gitignore create mode 100644 _includes/docs/docs/getting-started-guides/coreos/azure/addons/skydns-rc.yaml create mode 100644 _includes/docs/docs/getting-started-guides/coreos/azure/addons/skydns-svc.yaml create mode 100755 _includes/docs/docs/getting-started-guides/coreos/azure/azure-login.js create mode 100644 _includes/docs/docs/getting-started-guides/coreos/azure/cloud_config_templates/kubernetes-cluster-etcd-node-template.yml create mode 100644 _includes/docs/docs/getting-started-guides/coreos/azure/cloud_config_templates/kubernetes-cluster-main-nodes-template.yml create mode 100755 _includes/docs/docs/getting-started-guides/coreos/azure/create-kubernetes-cluster.js create mode 100755 _includes/docs/docs/getting-started-guides/coreos/azure/destroy-cluster.js create mode 100755 _includes/docs/docs/getting-started-guides/coreos/azure/expose_guestbook_app_port.sh create mode 100644 _includes/docs/docs/getting-started-guides/coreos/azure/index.md create mode 100644 _includes/docs/docs/getting-started-guides/coreos/azure/lib/azure_wrapper.js create mode 100644 _includes/docs/docs/getting-started-guides/coreos/azure/lib/cloud_config.js create mode 100644 _includes/docs/docs/getting-started-guides/coreos/azure/lib/deployment_logic/kubernetes.js create mode 100644 _includes/docs/docs/getting-started-guides/coreos/azure/lib/util.js create mode 100644 _includes/docs/docs/getting-started-guides/coreos/azure/package.json create mode 100755 _includes/docs/docs/getting-started-guides/coreos/azure/scale-kubernetes-cluster.js create mode 100644 _includes/docs/docs/getting-started-guides/coreos/bare_metal_calico.md create mode 100644 _includes/docs/docs/getting-started-guides/coreos/bare_metal_offline.md create mode 100644 _includes/docs/docs/getting-started-guides/coreos/cloud-configs/master.yaml create mode 100644 _includes/docs/docs/getting-started-guides/coreos/cloud-configs/node.yaml create mode 100644 _includes/docs/docs/getting-started-guides/coreos/coreos_multinode_cluster.md create mode 100644 _includes/docs/docs/getting-started-guides/dcos.md create mode 100644 _includes/docs/docs/getting-started-guides/docker-multinode.md create mode 100644 _includes/docs/docs/getting-started-guides/docker-multinode/deployDNS.md create mode 100644 _includes/docs/docs/getting-started-guides/docker-multinode/master.md create mode 100755 _includes/docs/docs/getting-started-guides/docker-multinode/master.sh create mode 100644 _includes/docs/docs/getting-started-guides/docker-multinode/skydns-rc.yaml.in create mode 100644 _includes/docs/docs/getting-started-guides/docker-multinode/skydns-svc.yaml.in create mode 100644 _includes/docs/docs/getting-started-guides/docker-multinode/testing.md create mode 100644 _includes/docs/docs/getting-started-guides/docker-multinode/worker.md create mode 100755 _includes/docs/docs/getting-started-guides/docker-multinode/worker.sh create mode 100644 _includes/docs/docs/getting-started-guides/docker.md create mode 100644 _includes/docs/docs/getting-started-guides/fedora/fedora-calico.md create mode 100644 _includes/docs/docs/getting-started-guides/fedora/fedora_ansible_config.md create mode 100644 _includes/docs/docs/getting-started-guides/fedora/fedora_manual_config.md create mode 100644 _includes/docs/docs/getting-started-guides/fedora/flannel_multi_node_cluster.md create mode 100644 _includes/docs/docs/getting-started-guides/gce.md create mode 100644 _includes/docs/docs/getting-started-guides/index.md create mode 100644 _includes/docs/docs/getting-started-guides/juju.md create mode 100644 _includes/docs/docs/getting-started-guides/libvirt-coreos.md create mode 100644 _includes/docs/docs/getting-started-guides/locally.md create mode 100644 _includes/docs/docs/getting-started-guides/logging-elasticsearch.md create mode 100644 _includes/docs/docs/getting-started-guides/logging.md create mode 100644 _includes/docs/docs/getting-started-guides/meanstack.md create mode 100644 _includes/docs/docs/getting-started-guides/mesos-docker.md create mode 100644 _includes/docs/docs/getting-started-guides/mesos.md create mode 100644 _includes/docs/docs/getting-started-guides/ovirt.md create mode 100644 _includes/docs/docs/getting-started-guides/rackspace.md create mode 100644 _includes/docs/docs/getting-started-guides/rkt/index.md create mode 100644 _includes/docs/docs/getting-started-guides/scratch.md create mode 100644 _includes/docs/docs/getting-started-guides/ubuntu-calico.md create mode 100644 _includes/docs/docs/getting-started-guides/ubuntu.md create mode 100644 _includes/docs/docs/getting-started-guides/vagrant.md create mode 100644 _includes/docs/docs/getting-started-guides/vsphere.md create mode 100644 _includes/docs/docs/index.md create mode 100644 _includes/docs/docs/reporting-security-issues.md create mode 100644 _includes/docs/docs/roadmap.md create mode 100644 _includes/docs/docs/troubleshooting.md create mode 100644 _includes/docs/docs/user-guide/accessing-the-cluster.md create mode 100644 _includes/docs/docs/user-guide/annotations.md create mode 100644 _includes/docs/docs/user-guide/application-troubleshooting.md create mode 100644 _includes/docs/docs/user-guide/compute-resources.md create mode 100644 _includes/docs/docs/user-guide/config-best-practices.md create mode 100644 _includes/docs/docs/user-guide/configuring-containers.md create mode 100644 _includes/docs/docs/user-guide/connecting-applications.md create mode 100644 _includes/docs/docs/user-guide/connecting-to-applications-port-forward.md create mode 100644 _includes/docs/docs/user-guide/connecting-to-applications-proxy.md create mode 100644 _includes/docs/docs/user-guide/container-environment.md create mode 100644 _includes/docs/docs/user-guide/containers.md create mode 100644 _includes/docs/docs/user-guide/debugging-services.md create mode 100644 _includes/docs/docs/user-guide/deploying-applications.md create mode 100644 _includes/docs/docs/user-guide/deployments.md create mode 100644 _includes/docs/docs/user-guide/docker-cli-to-kubectl.md create mode 100644 _includes/docs/docs/user-guide/downward-api.md create mode 100644 _includes/docs/docs/user-guide/downward-api/dapi-pod.yaml create mode 100644 _includes/docs/docs/user-guide/downward-api/index.md create mode 100644 _includes/docs/docs/user-guide/downward-api/volume/dapi-volume.yaml create mode 100644 _includes/docs/docs/user-guide/downward-api/volume/index.md create mode 100644 _includes/docs/docs/user-guide/environment-guide/backend-rc.yaml create mode 100644 _includes/docs/docs/user-guide/environment-guide/backend-srv.yaml create mode 100644 _includes/docs/docs/user-guide/environment-guide/containers/backend/Dockerfile create mode 100644 _includes/docs/docs/user-guide/environment-guide/containers/backend/backend.go create mode 100644 _includes/docs/docs/user-guide/environment-guide/containers/index.md create mode 100644 _includes/docs/docs/user-guide/environment-guide/containers/show/Dockerfile create mode 100644 _includes/docs/docs/user-guide/environment-guide/containers/show/show.go create mode 100644 _includes/docs/docs/user-guide/environment-guide/index.md create mode 100644 _includes/docs/docs/user-guide/environment-guide/show-rc.yaml create mode 100644 _includes/docs/docs/user-guide/environment-guide/show-srv.yaml create mode 100644 _includes/docs/docs/user-guide/getting-into-containers.md create mode 100644 _includes/docs/docs/user-guide/horizontal-pod-autoscaler.md create mode 100644 _includes/docs/docs/user-guide/horizontal-pod-autoscaling/hpa-php-apache.yaml create mode 100644 _includes/docs/docs/user-guide/horizontal-pod-autoscaling/image/Dockerfile create mode 100755 _includes/docs/docs/user-guide/horizontal-pod-autoscaling/image/index.php create mode 100644 _includes/docs/docs/user-guide/horizontal-pod-autoscaling/index.md create mode 100644 _includes/docs/docs/user-guide/identifiers.md create mode 100644 _includes/docs/docs/user-guide/images.md create mode 100644 _includes/docs/docs/user-guide/index.md create mode 100644 _includes/docs/docs/user-guide/ingress.md create mode 100644 _includes/docs/docs/user-guide/ingress.yaml create mode 100644 _includes/docs/docs/user-guide/introspection-and-debugging.md create mode 100644 _includes/docs/docs/user-guide/job.yaml create mode 100644 _includes/docs/docs/user-guide/jobs.md create mode 100644 _includes/docs/docs/user-guide/jsonpath.md create mode 100644 _includes/docs/docs/user-guide/known-issues.md create mode 100644 _includes/docs/docs/user-guide/kubeconfig-file.md create mode 100644 _includes/docs/docs/user-guide/kubectl-overview.md create mode 100644 _includes/docs/docs/user-guide/labels.md create mode 100644 _includes/docs/docs/user-guide/liveness/exec-liveness.yaml create mode 100644 _includes/docs/docs/user-guide/liveness/http-liveness.yaml create mode 100644 _includes/docs/docs/user-guide/liveness/image/Dockerfile create mode 100644 _includes/docs/docs/user-guide/liveness/image/Makefile create mode 100644 _includes/docs/docs/user-guide/liveness/image/server.go create mode 100644 _includes/docs/docs/user-guide/liveness/index.md create mode 100644 _includes/docs/docs/user-guide/logging-demo/Makefile create mode 100644 _includes/docs/docs/user-guide/logging-demo/index.md create mode 100644 _includes/docs/docs/user-guide/logging-demo/synthetic_0_25lps.yaml create mode 100644 _includes/docs/docs/user-guide/logging-demo/synthetic_10lps.yaml create mode 100644 _includes/docs/docs/user-guide/logging.md create mode 100644 _includes/docs/docs/user-guide/managing-deployments.md create mode 100644 _includes/docs/docs/user-guide/monitoring.md create mode 100644 _includes/docs/docs/user-guide/multi-pod.yaml create mode 100644 _includes/docs/docs/user-guide/namespaces.md create mode 100644 _includes/docs/docs/user-guide/new-nginx-deployment.yaml create mode 100644 _includes/docs/docs/user-guide/nginx-deployment.yaml create mode 100644 _includes/docs/docs/user-guide/node-selection/index.md create mode 100644 _includes/docs/docs/user-guide/node-selection/pod.yaml create mode 100644 _includes/docs/docs/user-guide/overview.md create mode 100644 _includes/docs/docs/user-guide/persistent-volumes.md create mode 100644 _includes/docs/docs/user-guide/persistent-volumes/claims/claim-01.yaml create mode 100644 _includes/docs/docs/user-guide/persistent-volumes/claims/claim-02.yaml create mode 100644 _includes/docs/docs/user-guide/persistent-volumes/claims/claim-03.json create mode 100644 _includes/docs/docs/user-guide/persistent-volumes/index.md create mode 100644 _includes/docs/docs/user-guide/persistent-volumes/simpletest/namespace.json create mode 100644 _includes/docs/docs/user-guide/persistent-volumes/simpletest/pod.yaml create mode 100644 _includes/docs/docs/user-guide/persistent-volumes/simpletest/service.json create mode 100644 _includes/docs/docs/user-guide/persistent-volumes/volumes/gce.yaml create mode 100644 _includes/docs/docs/user-guide/persistent-volumes/volumes/local-01.yaml create mode 100644 _includes/docs/docs/user-guide/persistent-volumes/volumes/local-02.yaml create mode 100644 _includes/docs/docs/user-guide/persistent-volumes/volumes/nfs.yaml create mode 100644 _includes/docs/docs/user-guide/pod-states.md create mode 100644 _includes/docs/docs/user-guide/pod.yaml create mode 100644 _includes/docs/docs/user-guide/pods.md create mode 100644 _includes/docs/docs/user-guide/prereqs.md create mode 100644 _includes/docs/docs/user-guide/production-pods.md create mode 100644 _includes/docs/docs/user-guide/quick-start.md create mode 100644 _includes/docs/docs/user-guide/replication-controller.md create mode 100644 _includes/docs/docs/user-guide/replication.yaml create mode 100644 _includes/docs/docs/user-guide/resourcequota/index.md create mode 100644 _includes/docs/docs/user-guide/secrets.md create mode 100644 _includes/docs/docs/user-guide/secrets/index.md create mode 100644 _includes/docs/docs/user-guide/secrets/secret-pod.yaml create mode 100644 _includes/docs/docs/user-guide/secrets/secret.yaml create mode 100644 _includes/docs/docs/user-guide/security-context.md create mode 100644 _includes/docs/docs/user-guide/service-accounts.md create mode 100644 _includes/docs/docs/user-guide/services-firewalls.md create mode 100644 _includes/docs/docs/user-guide/services.md create mode 100644 _includes/docs/docs/user-guide/sharing-clusters.md create mode 100644 _includes/docs/docs/user-guide/simple-nginx.md create mode 100644 _includes/docs/docs/user-guide/simple-yaml.md create mode 100644 _includes/docs/docs/user-guide/ui.md create mode 100755 _includes/docs/docs/user-guide/update-demo/build-images.sh create mode 100644 _includes/docs/docs/user-guide/update-demo/images/kitten/Dockerfile create mode 100644 _includes/docs/docs/user-guide/update-demo/images/kitten/html/data.json create mode 100644 _includes/docs/docs/user-guide/update-demo/images/kitten/html/kitten.jpg create mode 100644 _includes/docs/docs/user-guide/update-demo/images/nautilus/Dockerfile create mode 100644 _includes/docs/docs/user-guide/update-demo/images/nautilus/html/data.json create mode 100644 _includes/docs/docs/user-guide/update-demo/images/nautilus/html/nautilus.jpg create mode 100644 _includes/docs/docs/user-guide/update-demo/index.md create mode 100644 _includes/docs/docs/user-guide/update-demo/kitten-rc.yaml create mode 100644 _includes/docs/docs/user-guide/update-demo/local/LICENSE.angular create mode 100644 _includes/docs/docs/user-guide/update-demo/local/angular.min.js create mode 100644 _includes/docs/docs/user-guide/update-demo/local/angular.min.js.map create mode 100644 _includes/docs/docs/user-guide/update-demo/local/index.html create mode 100644 _includes/docs/docs/user-guide/update-demo/local/script.js create mode 100644 _includes/docs/docs/user-guide/update-demo/local/style.css create mode 100644 _includes/docs/docs/user-guide/update-demo/nautilus-rc.yaml create mode 100644 _includes/docs/docs/user-guide/volumes.md create mode 100644 _includes/docs/docs/user-guide/walkthrough/index.md create mode 100644 _includes/docs/docs/user-guide/walkthrough/k8s201.md create mode 100644 _includes/docs/docs/user-guide/walkthrough/pod-nginx-with-label.yaml create mode 100644 _includes/docs/docs/user-guide/walkthrough/pod-nginx.yaml create mode 100644 _includes/docs/docs/user-guide/walkthrough/pod-redis.yaml create mode 100644 _includes/docs/docs/user-guide/walkthrough/pod-with-http-healthcheck.yaml create mode 100644 _includes/docs/docs/user-guide/walkthrough/podtemplate.json create mode 100644 _includes/docs/docs/user-guide/walkthrough/replication-controller.yaml create mode 100644 _includes/docs/docs/user-guide/walkthrough/service.yaml create mode 100644 _includes/docs/docs/user-guide/working-with-resources.md create mode 100644 _includes/docs/docs/whatisk8s.md create mode 100644 _includes/docs/editdocs.md create mode 100644 _includes/docs/index.md create mode 100644 _includes/docs/pagelist.md create mode 100644 _includes/docs/reference.md create mode 100644 _includes/docs/samples.md create mode 100644 v1.0/docs/admin/accessing-the-api.md create mode 100644 v1.0/docs/admin/admission-controllers.md create mode 100644 v1.0/docs/admin/authentication.md create mode 100644 v1.0/docs/admin/authorization.md create mode 100644 v1.0/docs/admin/cluster-components.md create mode 100644 v1.0/docs/admin/cluster-large.md create mode 100644 v1.0/docs/admin/cluster-management.md create mode 100644 v1.0/docs/admin/cluster-troubleshooting.md create mode 100644 v1.0/docs/admin/daemon.yaml create mode 100644 v1.0/docs/admin/daemons.md create mode 100644 v1.0/docs/admin/dns.md create mode 100644 v1.0/docs/admin/etcd.md create mode 100644 v1.0/docs/admin/garbage-collection.md create mode 100644 v1.0/docs/admin/high-availability.md create mode 100644 v1.0/docs/admin/high-availability/default-kubelet create mode 100644 v1.0/docs/admin/high-availability/etcd.yaml create mode 100644 v1.0/docs/admin/high-availability/kube-apiserver.yaml create mode 100644 v1.0/docs/admin/high-availability/kube-controller-manager.yaml create mode 100644 v1.0/docs/admin/high-availability/kube-scheduler.yaml create mode 100644 v1.0/docs/admin/high-availability/monit-docker create mode 100644 v1.0/docs/admin/high-availability/monit-kubelet create mode 100644 v1.0/docs/admin/high-availability/podmaster.yaml create mode 100644 v1.0/docs/admin/index.md create mode 100644 v1.0/docs/admin/limitrange/index.md create mode 100644 v1.0/docs/admin/limitrange/invalid-pod.yaml create mode 100644 v1.0/docs/admin/limitrange/limits.yaml create mode 100644 v1.0/docs/admin/limitrange/namespace.yaml create mode 100644 v1.0/docs/admin/limitrange/valid-pod.yaml create mode 100644 v1.0/docs/admin/multi-cluster.md create mode 100644 v1.0/docs/admin/namespaces.md create mode 100644 v1.0/docs/admin/namespaces/index.md create mode 100644 v1.0/docs/admin/namespaces/namespace-dev.json create mode 100644 v1.0/docs/admin/namespaces/namespace-prod.json create mode 100644 v1.0/docs/admin/networking.md create mode 100644 v1.0/docs/admin/node.md create mode 100644 v1.0/docs/admin/ovs-networking.md create mode 100755 v1.0/docs/admin/resource-quota.md create mode 100644 v1.0/docs/admin/resourcequota/index.md create mode 100755 v1.0/docs/admin/resourcequota/limits.yaml create mode 100644 v1.0/docs/admin/resourcequota/namespace.yaml create mode 100644 v1.0/docs/admin/resourcequota/quota.yaml create mode 100644 v1.0/docs/admin/salt.md create mode 100644 v1.0/docs/admin/service-accounts-admin.md create mode 100644 v1.0/docs/admin/static-pods.md create mode 100644 v1.0/docs/api.md create mode 100644 v1.0/docs/devel/api-conventions.md create mode 100644 v1.0/docs/devel/api_changes.md create mode 100644 v1.0/docs/devel/automation.md create mode 100644 v1.0/docs/devel/cherry-picks.md create mode 100644 v1.0/docs/devel/cli-roadmap.md create mode 100644 v1.0/docs/devel/client-libraries.md create mode 100644 v1.0/docs/devel/coding-conventions.md create mode 100644 v1.0/docs/devel/collab.md create mode 100644 v1.0/docs/devel/developer-guides/vagrant.md create mode 100644 v1.0/docs/devel/development.md create mode 100644 v1.0/docs/devel/e2e-tests.md create mode 100644 v1.0/docs/devel/faster_reviews.md create mode 100644 v1.0/docs/devel/flaky-tests.md create mode 100644 v1.0/docs/devel/getting-builds.md create mode 100644 v1.0/docs/devel/index.md create mode 100644 v1.0/docs/devel/instrumentation.md create mode 100644 v1.0/docs/devel/issues.md create mode 100644 v1.0/docs/devel/kubectl-conventions.md create mode 100644 v1.0/docs/devel/logging.md create mode 100644 v1.0/docs/devel/making-release-notes.md create mode 100644 v1.0/docs/devel/profiling.md create mode 100644 v1.0/docs/devel/pull-requests.md create mode 100644 v1.0/docs/devel/releasing.md create mode 100755 v1.0/docs/devel/scheduler.md create mode 100755 v1.0/docs/devel/scheduler_algorithm.md create mode 100644 v1.0/docs/devel/writing-a-getting-started-guide.md create mode 100644 v1.0/docs/getting-started-guides/aws.md create mode 100644 v1.0/docs/getting-started-guides/azure.md create mode 100644 v1.0/docs/getting-started-guides/binary_release.md create mode 100644 v1.0/docs/getting-started-guides/centos/centos_manual_config.md create mode 100644 v1.0/docs/getting-started-guides/cloudstack.md create mode 100644 v1.0/docs/getting-started-guides/coreos.md create mode 100644 v1.0/docs/getting-started-guides/coreos/azure/.gitignore create mode 100644 v1.0/docs/getting-started-guides/coreos/azure/addons/skydns-rc.yaml create mode 100644 v1.0/docs/getting-started-guides/coreos/azure/addons/skydns-svc.yaml create mode 100755 v1.0/docs/getting-started-guides/coreos/azure/azure-login.js create mode 100644 v1.0/docs/getting-started-guides/coreos/azure/cloud_config_templates/kubernetes-cluster-etcd-node-template.yml create mode 100644 v1.0/docs/getting-started-guides/coreos/azure/cloud_config_templates/kubernetes-cluster-main-nodes-template.yml create mode 100755 v1.0/docs/getting-started-guides/coreos/azure/create-kubernetes-cluster.js create mode 100755 v1.0/docs/getting-started-guides/coreos/azure/destroy-cluster.js create mode 100755 v1.0/docs/getting-started-guides/coreos/azure/expose_guestbook_app_port.sh create mode 100644 v1.0/docs/getting-started-guides/coreos/azure/index.md create mode 100644 v1.0/docs/getting-started-guides/coreos/azure/lib/azure_wrapper.js create mode 100644 v1.0/docs/getting-started-guides/coreos/azure/lib/cloud_config.js create mode 100644 v1.0/docs/getting-started-guides/coreos/azure/lib/deployment_logic/kubernetes.js create mode 100644 v1.0/docs/getting-started-guides/coreos/azure/lib/util.js create mode 100644 v1.0/docs/getting-started-guides/coreos/azure/package.json create mode 100755 v1.0/docs/getting-started-guides/coreos/azure/scale-kubernetes-cluster.js create mode 100644 v1.0/docs/getting-started-guides/coreos/bare_metal_calico.md create mode 100644 v1.0/docs/getting-started-guides/coreos/bare_metal_offline.md create mode 100644 v1.0/docs/getting-started-guides/coreos/cloud-configs/master.yaml create mode 100644 v1.0/docs/getting-started-guides/coreos/cloud-configs/node.yaml create mode 100644 v1.0/docs/getting-started-guides/coreos/coreos_multinode_cluster.md create mode 100644 v1.0/docs/getting-started-guides/dcos.md create mode 100644 v1.0/docs/getting-started-guides/docker-multinode.md create mode 100644 v1.0/docs/getting-started-guides/docker-multinode/deployDNS.md create mode 100644 v1.0/docs/getting-started-guides/docker-multinode/master.md create mode 100755 v1.0/docs/getting-started-guides/docker-multinode/master.sh create mode 100644 v1.0/docs/getting-started-guides/docker-multinode/skydns-rc.yaml.in create mode 100644 v1.0/docs/getting-started-guides/docker-multinode/skydns-svc.yaml.in create mode 100644 v1.0/docs/getting-started-guides/docker-multinode/testing.md create mode 100644 v1.0/docs/getting-started-guides/docker-multinode/worker.md create mode 100755 v1.0/docs/getting-started-guides/docker-multinode/worker.sh create mode 100644 v1.0/docs/getting-started-guides/docker.md create mode 100644 v1.0/docs/getting-started-guides/fedora/fedora-calico.md create mode 100644 v1.0/docs/getting-started-guides/fedora/fedora_ansible_config.md create mode 100644 v1.0/docs/getting-started-guides/fedora/fedora_manual_config.md create mode 100644 v1.0/docs/getting-started-guides/fedora/flannel_multi_node_cluster.md create mode 100644 v1.0/docs/getting-started-guides/gce.md create mode 100644 v1.0/docs/getting-started-guides/index.md create mode 100644 v1.0/docs/getting-started-guides/juju.md create mode 100644 v1.0/docs/getting-started-guides/libvirt-coreos.md create mode 100644 v1.0/docs/getting-started-guides/locally.md create mode 100644 v1.0/docs/getting-started-guides/logging-elasticsearch.md create mode 100644 v1.0/docs/getting-started-guides/logging.md create mode 100644 v1.0/docs/getting-started-guides/meanstack.md create mode 100644 v1.0/docs/getting-started-guides/mesos-docker.md create mode 100644 v1.0/docs/getting-started-guides/mesos.md create mode 100644 v1.0/docs/getting-started-guides/ovirt.md create mode 100644 v1.0/docs/getting-started-guides/rackspace.md create mode 100644 v1.0/docs/getting-started-guides/rkt/index.md create mode 100644 v1.0/docs/getting-started-guides/scratch.md create mode 100644 v1.0/docs/getting-started-guides/ubuntu-calico.md create mode 100644 v1.0/docs/getting-started-guides/ubuntu.md create mode 100644 v1.0/docs/getting-started-guides/vagrant.md create mode 100644 v1.0/docs/getting-started-guides/vsphere.md create mode 100644 v1.0/docs/index.md create mode 100644 v1.0/docs/reporting-security-issues.md create mode 100644 v1.0/docs/roadmap.md create mode 100644 v1.0/docs/troubleshooting.md create mode 100644 v1.0/docs/user-guide/accessing-the-cluster.md create mode 100644 v1.0/docs/user-guide/annotations.md create mode 100644 v1.0/docs/user-guide/application-troubleshooting.md create mode 100644 v1.0/docs/user-guide/compute-resources.md create mode 100644 v1.0/docs/user-guide/config-best-practices.md create mode 100644 v1.0/docs/user-guide/configuring-containers.md create mode 100644 v1.0/docs/user-guide/connecting-applications.md create mode 100644 v1.0/docs/user-guide/connecting-to-applications-port-forward.md create mode 100644 v1.0/docs/user-guide/connecting-to-applications-proxy.md create mode 100644 v1.0/docs/user-guide/container-environment.md create mode 100644 v1.0/docs/user-guide/containers.md create mode 100644 v1.0/docs/user-guide/debugging-services.md create mode 100644 v1.0/docs/user-guide/deploying-applications.md create mode 100644 v1.0/docs/user-guide/deployments.md create mode 100644 v1.0/docs/user-guide/docker-cli-to-kubectl.md create mode 100644 v1.0/docs/user-guide/downward-api.md create mode 100644 v1.0/docs/user-guide/downward-api/dapi-pod.yaml create mode 100644 v1.0/docs/user-guide/downward-api/index.md create mode 100644 v1.0/docs/user-guide/downward-api/volume/dapi-volume.yaml create mode 100644 v1.0/docs/user-guide/downward-api/volume/index.md create mode 100644 v1.0/docs/user-guide/environment-guide/backend-rc.yaml create mode 100644 v1.0/docs/user-guide/environment-guide/backend-srv.yaml create mode 100644 v1.0/docs/user-guide/environment-guide/containers/backend/Dockerfile create mode 100644 v1.0/docs/user-guide/environment-guide/containers/backend/backend.go create mode 100644 v1.0/docs/user-guide/environment-guide/containers/index.md create mode 100644 v1.0/docs/user-guide/environment-guide/containers/show/Dockerfile create mode 100644 v1.0/docs/user-guide/environment-guide/containers/show/show.go create mode 100644 v1.0/docs/user-guide/environment-guide/index.md create mode 100644 v1.0/docs/user-guide/environment-guide/show-rc.yaml create mode 100644 v1.0/docs/user-guide/environment-guide/show-srv.yaml create mode 100644 v1.0/docs/user-guide/getting-into-containers.md create mode 100644 v1.0/docs/user-guide/horizontal-pod-autoscaler.md create mode 100644 v1.0/docs/user-guide/horizontal-pod-autoscaling/hpa-php-apache.yaml create mode 100644 v1.0/docs/user-guide/horizontal-pod-autoscaling/image/Dockerfile create mode 100755 v1.0/docs/user-guide/horizontal-pod-autoscaling/image/index.php create mode 100644 v1.0/docs/user-guide/horizontal-pod-autoscaling/index.md create mode 100644 v1.0/docs/user-guide/identifiers.md create mode 100644 v1.0/docs/user-guide/images.md create mode 100644 v1.0/docs/user-guide/index.md create mode 100644 v1.0/docs/user-guide/ingress.md create mode 100644 v1.0/docs/user-guide/ingress.yaml create mode 100644 v1.0/docs/user-guide/introspection-and-debugging.md create mode 100644 v1.0/docs/user-guide/job.yaml create mode 100644 v1.0/docs/user-guide/jobs.md create mode 100644 v1.0/docs/user-guide/jsonpath.md create mode 100644 v1.0/docs/user-guide/known-issues.md create mode 100644 v1.0/docs/user-guide/kubeconfig-file.md create mode 100644 v1.0/docs/user-guide/kubectl-overview.md create mode 100644 v1.0/docs/user-guide/labels.md create mode 100644 v1.0/docs/user-guide/liveness/exec-liveness.yaml create mode 100644 v1.0/docs/user-guide/liveness/http-liveness.yaml create mode 100644 v1.0/docs/user-guide/liveness/image/Dockerfile create mode 100644 v1.0/docs/user-guide/liveness/image/Makefile create mode 100644 v1.0/docs/user-guide/liveness/image/server.go create mode 100644 v1.0/docs/user-guide/liveness/index.md create mode 100644 v1.0/docs/user-guide/logging-demo/Makefile create mode 100644 v1.0/docs/user-guide/logging-demo/index.md create mode 100644 v1.0/docs/user-guide/logging-demo/synthetic_0_25lps.yaml create mode 100644 v1.0/docs/user-guide/logging-demo/synthetic_10lps.yaml create mode 100644 v1.0/docs/user-guide/logging.md create mode 100644 v1.0/docs/user-guide/managing-deployments.md create mode 100644 v1.0/docs/user-guide/monitoring.md create mode 100644 v1.0/docs/user-guide/multi-pod.yaml create mode 100644 v1.0/docs/user-guide/namespaces.md create mode 100644 v1.0/docs/user-guide/new-nginx-deployment.yaml create mode 100644 v1.0/docs/user-guide/nginx-deployment.yaml create mode 100644 v1.0/docs/user-guide/node-selection/index.md create mode 100644 v1.0/docs/user-guide/node-selection/pod.yaml create mode 100644 v1.0/docs/user-guide/overview.md create mode 100644 v1.0/docs/user-guide/persistent-volumes.md create mode 100644 v1.0/docs/user-guide/persistent-volumes/claims/claim-01.yaml create mode 100644 v1.0/docs/user-guide/persistent-volumes/claims/claim-02.yaml create mode 100644 v1.0/docs/user-guide/persistent-volumes/claims/claim-03.json create mode 100644 v1.0/docs/user-guide/persistent-volumes/index.md create mode 100644 v1.0/docs/user-guide/persistent-volumes/simpletest/namespace.json create mode 100644 v1.0/docs/user-guide/persistent-volumes/simpletest/pod.yaml create mode 100644 v1.0/docs/user-guide/persistent-volumes/simpletest/service.json create mode 100644 v1.0/docs/user-guide/persistent-volumes/volumes/gce.yaml create mode 100644 v1.0/docs/user-guide/persistent-volumes/volumes/local-01.yaml create mode 100644 v1.0/docs/user-guide/persistent-volumes/volumes/local-02.yaml create mode 100644 v1.0/docs/user-guide/persistent-volumes/volumes/nfs.yaml create mode 100644 v1.0/docs/user-guide/pod-states.md create mode 100644 v1.0/docs/user-guide/pod.yaml create mode 100644 v1.0/docs/user-guide/pods.md create mode 100644 v1.0/docs/user-guide/prereqs.md create mode 100644 v1.0/docs/user-guide/production-pods.md create mode 100644 v1.0/docs/user-guide/quick-start.md create mode 100644 v1.0/docs/user-guide/replication-controller.md create mode 100644 v1.0/docs/user-guide/replication.yaml create mode 100644 v1.0/docs/user-guide/resourcequota/index.md create mode 100644 v1.0/docs/user-guide/secrets.md create mode 100644 v1.0/docs/user-guide/secrets/index.md create mode 100644 v1.0/docs/user-guide/secrets/secret-pod.yaml create mode 100644 v1.0/docs/user-guide/secrets/secret.yaml create mode 100644 v1.0/docs/user-guide/security-context.md create mode 100644 v1.0/docs/user-guide/service-accounts.md create mode 100644 v1.0/docs/user-guide/services-firewalls.md create mode 100644 v1.0/docs/user-guide/services.md create mode 100644 v1.0/docs/user-guide/sharing-clusters.md create mode 100644 v1.0/docs/user-guide/simple-nginx.md create mode 100644 v1.0/docs/user-guide/simple-yaml.md create mode 100644 v1.0/docs/user-guide/ui.md create mode 100755 v1.0/docs/user-guide/update-demo/build-images.sh create mode 100644 v1.0/docs/user-guide/update-demo/images/kitten/Dockerfile create mode 100644 v1.0/docs/user-guide/update-demo/images/kitten/html/data.json create mode 100644 v1.0/docs/user-guide/update-demo/images/kitten/html/kitten.jpg create mode 100644 v1.0/docs/user-guide/update-demo/images/nautilus/Dockerfile create mode 100644 v1.0/docs/user-guide/update-demo/images/nautilus/html/data.json create mode 100644 v1.0/docs/user-guide/update-demo/images/nautilus/html/nautilus.jpg create mode 100644 v1.0/docs/user-guide/update-demo/index.md create mode 100644 v1.0/docs/user-guide/update-demo/kitten-rc.yaml create mode 100644 v1.0/docs/user-guide/update-demo/local/LICENSE.angular create mode 100644 v1.0/docs/user-guide/update-demo/local/angular.min.js create mode 100644 v1.0/docs/user-guide/update-demo/local/angular.min.js.map create mode 100644 v1.0/docs/user-guide/update-demo/local/index.html create mode 100644 v1.0/docs/user-guide/update-demo/local/script.js create mode 100644 v1.0/docs/user-guide/update-demo/local/style.css create mode 100644 v1.0/docs/user-guide/update-demo/nautilus-rc.yaml create mode 100644 v1.0/docs/user-guide/volumes.md create mode 100644 v1.0/docs/user-guide/walkthrough/index.md create mode 100644 v1.0/docs/user-guide/walkthrough/k8s201.md create mode 100644 v1.0/docs/user-guide/walkthrough/pod-nginx-with-label.yaml create mode 100644 v1.0/docs/user-guide/walkthrough/pod-nginx.yaml create mode 100644 v1.0/docs/user-guide/walkthrough/pod-redis.yaml create mode 100644 v1.0/docs/user-guide/walkthrough/pod-with-http-healthcheck.yaml create mode 100644 v1.0/docs/user-guide/walkthrough/podtemplate.json create mode 100644 v1.0/docs/user-guide/walkthrough/replication-controller.yaml create mode 100644 v1.0/docs/user-guide/walkthrough/service.yaml create mode 100644 v1.0/docs/user-guide/working-with-resources.md create mode 100644 v1.0/docs/whatisk8s.md create mode 100644 v1.0/editdocs.md create mode 100644 v1.0/index.md create mode 100644 v1.0/pagelist.md create mode 100644 v1.0/reference.md create mode 100644 v1.0/samples.md create mode 100644 v1.2/docs/admin/accessing-the-api.md create mode 100644 v1.2/docs/admin/admission-controllers.md create mode 100644 v1.2/docs/admin/authentication.md create mode 100644 v1.2/docs/admin/authorization.md create mode 100644 v1.2/docs/admin/cluster-components.md create mode 100644 v1.2/docs/admin/cluster-large.md create mode 100644 v1.2/docs/admin/cluster-management.md create mode 100644 v1.2/docs/admin/cluster-troubleshooting.md create mode 100644 v1.2/docs/admin/daemon.yaml create mode 100644 v1.2/docs/admin/daemons.md create mode 100644 v1.2/docs/admin/dns.md create mode 100644 v1.2/docs/admin/etcd.md create mode 100644 v1.2/docs/admin/garbage-collection.md create mode 100644 v1.2/docs/admin/high-availability.md create mode 100644 v1.2/docs/admin/high-availability/default-kubelet create mode 100644 v1.2/docs/admin/high-availability/etcd.yaml create mode 100644 v1.2/docs/admin/high-availability/kube-apiserver.yaml create mode 100644 v1.2/docs/admin/high-availability/kube-controller-manager.yaml create mode 100644 v1.2/docs/admin/high-availability/kube-scheduler.yaml create mode 100644 v1.2/docs/admin/high-availability/monit-docker create mode 100644 v1.2/docs/admin/high-availability/monit-kubelet create mode 100644 v1.2/docs/admin/high-availability/podmaster.yaml create mode 100644 v1.2/docs/admin/index.md create mode 100644 v1.2/docs/admin/limitrange/index.md create mode 100644 v1.2/docs/admin/limitrange/invalid-pod.yaml create mode 100644 v1.2/docs/admin/limitrange/limits.yaml create mode 100644 v1.2/docs/admin/limitrange/namespace.yaml create mode 100644 v1.2/docs/admin/limitrange/valid-pod.yaml create mode 100644 v1.2/docs/admin/multi-cluster.md create mode 100644 v1.2/docs/admin/namespaces.md create mode 100644 v1.2/docs/admin/namespaces/index.md create mode 100644 v1.2/docs/admin/namespaces/namespace-dev.json create mode 100644 v1.2/docs/admin/namespaces/namespace-prod.json create mode 100644 v1.2/docs/admin/networking.md create mode 100644 v1.2/docs/admin/node.md create mode 100644 v1.2/docs/admin/ovs-networking.md create mode 100755 v1.2/docs/admin/resource-quota.md create mode 100644 v1.2/docs/admin/resourcequota/index.md create mode 100755 v1.2/docs/admin/resourcequota/limits.yaml create mode 100644 v1.2/docs/admin/resourcequota/namespace.yaml create mode 100644 v1.2/docs/admin/resourcequota/quota.yaml create mode 100644 v1.2/docs/admin/salt.md create mode 100644 v1.2/docs/admin/service-accounts-admin.md create mode 100644 v1.2/docs/admin/static-pods.md create mode 100644 v1.2/docs/api.md create mode 100644 v1.2/docs/devel/api-conventions.md create mode 100644 v1.2/docs/devel/api_changes.md create mode 100644 v1.2/docs/devel/automation.md create mode 100644 v1.2/docs/devel/cherry-picks.md create mode 100644 v1.2/docs/devel/cli-roadmap.md create mode 100644 v1.2/docs/devel/client-libraries.md create mode 100644 v1.2/docs/devel/coding-conventions.md create mode 100644 v1.2/docs/devel/collab.md create mode 100644 v1.2/docs/devel/developer-guides/vagrant.md create mode 100644 v1.2/docs/devel/development.md create mode 100644 v1.2/docs/devel/e2e-tests.md create mode 100644 v1.2/docs/devel/faster_reviews.md create mode 100644 v1.2/docs/devel/flaky-tests.md create mode 100644 v1.2/docs/devel/getting-builds.md create mode 100644 v1.2/docs/devel/index.md create mode 100644 v1.2/docs/devel/instrumentation.md create mode 100644 v1.2/docs/devel/issues.md create mode 100644 v1.2/docs/devel/kubectl-conventions.md create mode 100644 v1.2/docs/devel/logging.md create mode 100644 v1.2/docs/devel/making-release-notes.md create mode 100644 v1.2/docs/devel/profiling.md create mode 100644 v1.2/docs/devel/pull-requests.md create mode 100644 v1.2/docs/devel/releasing.md create mode 100755 v1.2/docs/devel/scheduler.md create mode 100755 v1.2/docs/devel/scheduler_algorithm.md create mode 100644 v1.2/docs/devel/writing-a-getting-started-guide.md create mode 100644 v1.2/docs/getting-started-guides/aws.md create mode 100644 v1.2/docs/getting-started-guides/azure.md create mode 100644 v1.2/docs/getting-started-guides/binary_release.md create mode 100644 v1.2/docs/getting-started-guides/centos/centos_manual_config.md create mode 100644 v1.2/docs/getting-started-guides/cloudstack.md create mode 100644 v1.2/docs/getting-started-guides/coreos.md create mode 100644 v1.2/docs/getting-started-guides/coreos/azure/.gitignore create mode 100644 v1.2/docs/getting-started-guides/coreos/azure/addons/skydns-rc.yaml create mode 100644 v1.2/docs/getting-started-guides/coreos/azure/addons/skydns-svc.yaml create mode 100755 v1.2/docs/getting-started-guides/coreos/azure/azure-login.js create mode 100644 v1.2/docs/getting-started-guides/coreos/azure/cloud_config_templates/kubernetes-cluster-etcd-node-template.yml create mode 100644 v1.2/docs/getting-started-guides/coreos/azure/cloud_config_templates/kubernetes-cluster-main-nodes-template.yml create mode 100755 v1.2/docs/getting-started-guides/coreos/azure/create-kubernetes-cluster.js create mode 100755 v1.2/docs/getting-started-guides/coreos/azure/destroy-cluster.js create mode 100755 v1.2/docs/getting-started-guides/coreos/azure/expose_guestbook_app_port.sh create mode 100644 v1.2/docs/getting-started-guides/coreos/azure/index.md create mode 100644 v1.2/docs/getting-started-guides/coreos/azure/lib/azure_wrapper.js create mode 100644 v1.2/docs/getting-started-guides/coreos/azure/lib/cloud_config.js create mode 100644 v1.2/docs/getting-started-guides/coreos/azure/lib/deployment_logic/kubernetes.js create mode 100644 v1.2/docs/getting-started-guides/coreos/azure/lib/util.js create mode 100644 v1.2/docs/getting-started-guides/coreos/azure/package.json create mode 100755 v1.2/docs/getting-started-guides/coreos/azure/scale-kubernetes-cluster.js create mode 100644 v1.2/docs/getting-started-guides/coreos/bare_metal_calico.md create mode 100644 v1.2/docs/getting-started-guides/coreos/bare_metal_offline.md create mode 100644 v1.2/docs/getting-started-guides/coreos/cloud-configs/master.yaml create mode 100644 v1.2/docs/getting-started-guides/coreos/cloud-configs/node.yaml create mode 100644 v1.2/docs/getting-started-guides/coreos/coreos_multinode_cluster.md create mode 100644 v1.2/docs/getting-started-guides/dcos.md create mode 100644 v1.2/docs/getting-started-guides/docker-multinode.md create mode 100644 v1.2/docs/getting-started-guides/docker-multinode/deployDNS.md create mode 100644 v1.2/docs/getting-started-guides/docker-multinode/master.md create mode 100755 v1.2/docs/getting-started-guides/docker-multinode/master.sh create mode 100644 v1.2/docs/getting-started-guides/docker-multinode/skydns-rc.yaml.in create mode 100644 v1.2/docs/getting-started-guides/docker-multinode/skydns-svc.yaml.in create mode 100644 v1.2/docs/getting-started-guides/docker-multinode/testing.md create mode 100644 v1.2/docs/getting-started-guides/docker-multinode/worker.md create mode 100755 v1.2/docs/getting-started-guides/docker-multinode/worker.sh create mode 100644 v1.2/docs/getting-started-guides/docker.md create mode 100644 v1.2/docs/getting-started-guides/fedora/fedora-calico.md create mode 100644 v1.2/docs/getting-started-guides/fedora/fedora_ansible_config.md create mode 100644 v1.2/docs/getting-started-guides/fedora/fedora_manual_config.md create mode 100644 v1.2/docs/getting-started-guides/fedora/flannel_multi_node_cluster.md create mode 100644 v1.2/docs/getting-started-guides/gce.md create mode 100644 v1.2/docs/getting-started-guides/index.md create mode 100644 v1.2/docs/getting-started-guides/juju.md create mode 100644 v1.2/docs/getting-started-guides/libvirt-coreos.md create mode 100644 v1.2/docs/getting-started-guides/locally.md create mode 100644 v1.2/docs/getting-started-guides/logging-elasticsearch.md create mode 100644 v1.2/docs/getting-started-guides/logging.md create mode 100644 v1.2/docs/getting-started-guides/meanstack.md create mode 100644 v1.2/docs/getting-started-guides/mesos-docker.md create mode 100644 v1.2/docs/getting-started-guides/mesos.md create mode 100644 v1.2/docs/getting-started-guides/ovirt.md create mode 100644 v1.2/docs/getting-started-guides/rackspace.md create mode 100644 v1.2/docs/getting-started-guides/rkt/index.md create mode 100644 v1.2/docs/getting-started-guides/scratch.md create mode 100644 v1.2/docs/getting-started-guides/ubuntu-calico.md create mode 100644 v1.2/docs/getting-started-guides/ubuntu.md create mode 100644 v1.2/docs/getting-started-guides/vagrant.md create mode 100644 v1.2/docs/getting-started-guides/vsphere.md create mode 100644 v1.2/docs/index.md create mode 100644 v1.2/docs/reporting-security-issues.md create mode 100644 v1.2/docs/roadmap.md create mode 100644 v1.2/docs/troubleshooting.md create mode 100644 v1.2/docs/user-guide/accessing-the-cluster.md create mode 100644 v1.2/docs/user-guide/annotations.md create mode 100644 v1.2/docs/user-guide/application-troubleshooting.md create mode 100644 v1.2/docs/user-guide/compute-resources.md create mode 100644 v1.2/docs/user-guide/config-best-practices.md create mode 100644 v1.2/docs/user-guide/configuring-containers.md create mode 100644 v1.2/docs/user-guide/connecting-applications.md create mode 100644 v1.2/docs/user-guide/connecting-to-applications-port-forward.md create mode 100644 v1.2/docs/user-guide/connecting-to-applications-proxy.md create mode 100644 v1.2/docs/user-guide/container-environment.md create mode 100644 v1.2/docs/user-guide/containers.md create mode 100644 v1.2/docs/user-guide/debugging-services.md create mode 100644 v1.2/docs/user-guide/deploying-applications.md create mode 100644 v1.2/docs/user-guide/deployments.md create mode 100644 v1.2/docs/user-guide/docker-cli-to-kubectl.md create mode 100644 v1.2/docs/user-guide/downward-api.md create mode 100644 v1.2/docs/user-guide/downward-api/dapi-pod.yaml create mode 100644 v1.2/docs/user-guide/downward-api/index.md create mode 100644 v1.2/docs/user-guide/downward-api/volume/dapi-volume.yaml create mode 100644 v1.2/docs/user-guide/downward-api/volume/index.md create mode 100644 v1.2/docs/user-guide/environment-guide/backend-rc.yaml create mode 100644 v1.2/docs/user-guide/environment-guide/backend-srv.yaml create mode 100644 v1.2/docs/user-guide/environment-guide/containers/backend/Dockerfile create mode 100644 v1.2/docs/user-guide/environment-guide/containers/backend/backend.go create mode 100644 v1.2/docs/user-guide/environment-guide/containers/index.md create mode 100644 v1.2/docs/user-guide/environment-guide/containers/show/Dockerfile create mode 100644 v1.2/docs/user-guide/environment-guide/containers/show/show.go create mode 100644 v1.2/docs/user-guide/environment-guide/index.md create mode 100644 v1.2/docs/user-guide/environment-guide/show-rc.yaml create mode 100644 v1.2/docs/user-guide/environment-guide/show-srv.yaml create mode 100644 v1.2/docs/user-guide/getting-into-containers.md create mode 100644 v1.2/docs/user-guide/horizontal-pod-autoscaler.md create mode 100644 v1.2/docs/user-guide/horizontal-pod-autoscaling/hpa-php-apache.yaml create mode 100644 v1.2/docs/user-guide/horizontal-pod-autoscaling/image/Dockerfile create mode 100755 v1.2/docs/user-guide/horizontal-pod-autoscaling/image/index.php create mode 100644 v1.2/docs/user-guide/horizontal-pod-autoscaling/index.md create mode 100644 v1.2/docs/user-guide/identifiers.md create mode 100644 v1.2/docs/user-guide/images.md create mode 100644 v1.2/docs/user-guide/index.md create mode 100644 v1.2/docs/user-guide/ingress.md create mode 100644 v1.2/docs/user-guide/ingress.yaml create mode 100644 v1.2/docs/user-guide/introspection-and-debugging.md create mode 100644 v1.2/docs/user-guide/job.yaml create mode 100644 v1.2/docs/user-guide/jobs.md create mode 100644 v1.2/docs/user-guide/jsonpath.md create mode 100644 v1.2/docs/user-guide/known-issues.md create mode 100644 v1.2/docs/user-guide/kubeconfig-file.md create mode 100644 v1.2/docs/user-guide/kubectl-overview.md create mode 100644 v1.2/docs/user-guide/labels.md create mode 100644 v1.2/docs/user-guide/liveness/exec-liveness.yaml create mode 100644 v1.2/docs/user-guide/liveness/http-liveness.yaml create mode 100644 v1.2/docs/user-guide/liveness/image/Dockerfile create mode 100644 v1.2/docs/user-guide/liveness/image/Makefile create mode 100644 v1.2/docs/user-guide/liveness/image/server.go create mode 100644 v1.2/docs/user-guide/liveness/index.md create mode 100644 v1.2/docs/user-guide/logging-demo/Makefile create mode 100644 v1.2/docs/user-guide/logging-demo/index.md create mode 100644 v1.2/docs/user-guide/logging-demo/synthetic_0_25lps.yaml create mode 100644 v1.2/docs/user-guide/logging-demo/synthetic_10lps.yaml create mode 100644 v1.2/docs/user-guide/logging.md create mode 100644 v1.2/docs/user-guide/managing-deployments.md create mode 100644 v1.2/docs/user-guide/monitoring.md create mode 100644 v1.2/docs/user-guide/multi-pod.yaml create mode 100644 v1.2/docs/user-guide/namespaces.md create mode 100644 v1.2/docs/user-guide/new-nginx-deployment.yaml create mode 100644 v1.2/docs/user-guide/nginx-deployment.yaml create mode 100644 v1.2/docs/user-guide/node-selection/index.md create mode 100644 v1.2/docs/user-guide/node-selection/pod.yaml create mode 100644 v1.2/docs/user-guide/overview.md create mode 100644 v1.2/docs/user-guide/persistent-volumes.md create mode 100644 v1.2/docs/user-guide/persistent-volumes/claims/claim-01.yaml create mode 100644 v1.2/docs/user-guide/persistent-volumes/claims/claim-02.yaml create mode 100644 v1.2/docs/user-guide/persistent-volumes/claims/claim-03.json create mode 100644 v1.2/docs/user-guide/persistent-volumes/index.md create mode 100644 v1.2/docs/user-guide/persistent-volumes/simpletest/namespace.json create mode 100644 v1.2/docs/user-guide/persistent-volumes/simpletest/pod.yaml create mode 100644 v1.2/docs/user-guide/persistent-volumes/simpletest/service.json create mode 100644 v1.2/docs/user-guide/persistent-volumes/volumes/gce.yaml create mode 100644 v1.2/docs/user-guide/persistent-volumes/volumes/local-01.yaml create mode 100644 v1.2/docs/user-guide/persistent-volumes/volumes/local-02.yaml create mode 100644 v1.2/docs/user-guide/persistent-volumes/volumes/nfs.yaml create mode 100644 v1.2/docs/user-guide/pod-states.md create mode 100644 v1.2/docs/user-guide/pod.yaml create mode 100644 v1.2/docs/user-guide/pods.md create mode 100644 v1.2/docs/user-guide/prereqs.md create mode 100644 v1.2/docs/user-guide/production-pods.md create mode 100644 v1.2/docs/user-guide/quick-start.md create mode 100644 v1.2/docs/user-guide/replication-controller.md create mode 100644 v1.2/docs/user-guide/replication.yaml create mode 100644 v1.2/docs/user-guide/resourcequota/index.md create mode 100644 v1.2/docs/user-guide/secrets.md create mode 100644 v1.2/docs/user-guide/secrets/index.md create mode 100644 v1.2/docs/user-guide/secrets/secret-pod.yaml create mode 100644 v1.2/docs/user-guide/secrets/secret.yaml create mode 100644 v1.2/docs/user-guide/security-context.md create mode 100644 v1.2/docs/user-guide/service-accounts.md create mode 100644 v1.2/docs/user-guide/services-firewalls.md create mode 100644 v1.2/docs/user-guide/services.md create mode 100644 v1.2/docs/user-guide/sharing-clusters.md create mode 100644 v1.2/docs/user-guide/simple-nginx.md create mode 100644 v1.2/docs/user-guide/simple-yaml.md create mode 100644 v1.2/docs/user-guide/ui.md create mode 100755 v1.2/docs/user-guide/update-demo/build-images.sh create mode 100644 v1.2/docs/user-guide/update-demo/images/kitten/Dockerfile create mode 100644 v1.2/docs/user-guide/update-demo/images/kitten/html/data.json create mode 100644 v1.2/docs/user-guide/update-demo/images/kitten/html/kitten.jpg create mode 100644 v1.2/docs/user-guide/update-demo/images/nautilus/Dockerfile create mode 100644 v1.2/docs/user-guide/update-demo/images/nautilus/html/data.json create mode 100644 v1.2/docs/user-guide/update-demo/images/nautilus/html/nautilus.jpg create mode 100644 v1.2/docs/user-guide/update-demo/index.md create mode 100644 v1.2/docs/user-guide/update-demo/kitten-rc.yaml create mode 100644 v1.2/docs/user-guide/update-demo/local/LICENSE.angular create mode 100644 v1.2/docs/user-guide/update-demo/local/angular.min.js create mode 100644 v1.2/docs/user-guide/update-demo/local/angular.min.js.map create mode 100644 v1.2/docs/user-guide/update-demo/local/index.html create mode 100644 v1.2/docs/user-guide/update-demo/local/script.js create mode 100644 v1.2/docs/user-guide/update-demo/local/style.css create mode 100644 v1.2/docs/user-guide/update-demo/nautilus-rc.yaml create mode 100644 v1.2/docs/user-guide/volumes.md create mode 100644 v1.2/docs/user-guide/walkthrough/index.md create mode 100644 v1.2/docs/user-guide/walkthrough/k8s201.md create mode 100644 v1.2/docs/user-guide/walkthrough/pod-nginx-with-label.yaml create mode 100644 v1.2/docs/user-guide/walkthrough/pod-nginx.yaml create mode 100644 v1.2/docs/user-guide/walkthrough/pod-redis.yaml create mode 100644 v1.2/docs/user-guide/walkthrough/pod-with-http-healthcheck.yaml create mode 100644 v1.2/docs/user-guide/walkthrough/podtemplate.json create mode 100644 v1.2/docs/user-guide/walkthrough/replication-controller.yaml create mode 100644 v1.2/docs/user-guide/walkthrough/service.yaml create mode 100644 v1.2/docs/user-guide/working-with-resources.md create mode 100644 v1.2/docs/whatisk8s.md create mode 100644 v1.2/editdocs.md create mode 100644 v1.2/index.md create mode 100644 v1.2/pagelist.md create mode 100644 v1.2/reference.md create mode 100644 v1.2/samples.md diff --git a/_config.yml b/_config.yml index 314c56571e..9f581af79d 100644 --- a/_config.yml +++ b/_config.yml @@ -12,6 +12,15 @@ safe: false lsi: false defaults: + - + scope: + path: "v1.0" + values: + version: "v1.0" + versionfilesafe: "v1_0" + layout: docwithnav + showedit: true + githubbranch: "release-1.0" - scope: path: "v1.1" @@ -21,4 +30,13 @@ defaults: layout: docwithnav showedit: true githubbranch: "release-1.1" + - + scope: + path: "v1.2" + values: + version: "v1.2" + versionfilesafe: "v1_2" + layout: docwithnav + showedit: true + githubbranch: "release-1.2" permalink: pretty \ No newline at end of file diff --git a/_data/v1_0/globals.yml b/_data/v1_0/globals.yml new file mode 100644 index 0000000000..fd3be92a16 --- /dev/null +++ b/_data/v1_0/globals.yml @@ -0,0 +1,5 @@ +tocs: +- guides +- reference +- samples +- support \ No newline at end of file diff --git a/_data/v1_0/guides.yml b/_data/v1_0/guides.yml new file mode 100644 index 0000000000..a8088b55f1 --- /dev/null +++ b/_data/v1_0/guides.yml @@ -0,0 +1,179 @@ +bigheader: "Guides" +abstract: "How to get started, and acheive tasks, using Kubernetes" +toc: +- title: Guides + path: /v1.0/ + +- title: Quickstarts + section: + - title: What is Kubernetes? + path: /v1.0/docs/whatisk8s/ + - title: TODO - 5-minute Quickstart + path: /v1.0/docs/hellonode/ + - title: Kubernetes 101 + path: /v1.0/docs/user-guide/walkthrough/ + - title: Kubernetes 201 + path: /v1.0/docs/user-guide/walkthrough/k8s201/ + +- title: Running Kubernetes + section: + - title: Picking the Right Solution + path: /v1.0/docs/getting-started-guides/ + - title: Running Kubernetes on Your Local Machine + section: + - title: Running Kubernetes Locally via Docker + path: /v1.0/docs/getting-started-guides/docker/ + - title: Running Kubernetes Locally via Vagrant + path: /v1.0/docs/getting-started-guides/vagrant/ + - title: Running Kubernetes Locally with No VM + path: /v1.0/docs/getting-started-guides/locally/ + - title: Running Kubernetes on Turn-key Cloud Solutions + section: + - title: Running Kubernetes on Google Container Engine + path: https://cloud.google.com/container-engine/docs/before-you-begin/ + - title: Running Kubernetes on Google Compute Engine + path: /v1.0/docs/getting-started-guides/gce/ + - title: Running Kubernetes on AWS EC2 + path: /v1.0/docs/getting-started-guides/aws/ + - title: Running Kubernetes on Azure + path: /v1.0/docs/getting-started-guides/coreos/azure/ + - title: Running Kubernetes on Custom Solutions + section: + - title: Getting Started From Scratch + path: /v1.0/docs/getting-started-guides/scratch/ + - title: Custom Cloud Solutions + section: + - title: AWS or GCE on CoreOS + path: /v1.0/docs/getting-started-guides/coreos/ + - title: AWS or Joyent on Ubuntu + path: /v1.0/docs/getting-started-guides/juju/ + - title: Rackspace on CoreOS + path: /v1.0/docs/getting-started-guides/rackspace/ + - title: On-Premise VMs + section: + - title: Vagrant or VMware + path: /v1.0/docs/getting-started-guides/coreos/ + - title: Cloudstack + path: /v1.0/docs/getting-started-guides/cloudstack/ + - title: VMWare + path: /v1.0/docs/getting-started-guides/vsphere/ + - title: Juju + path: /v1.0/docs/getting-started-guides/juju/ + - title: libvirt on CoreOS + path: /v1.0/docs/getting-started-guides/libvirt-coreos/ + - title: oVirt + path: /v1.0/docs/getting-started-guides/ovirt/ + - title: libvirt or KVM + path: /v1.0/docs/getting-started-guides/fedora/flannel_multi_node_cluster/ + - title: Bare Metal + section: + - title: Offline + path: /v1.0/docs/getting-started-guides/coreos/bare_metal_offline/ + - title: Fedora via Ansible + path: /v1.0/docs/getting-started-guides/fedora/fedora_ansible_config/ + - title: Fedora (Single Node) + path: /v1.0/docs/getting-started-guides/fedora/fedora_manual_config/ + - title: Fedora (Multi Node) + path: /v1.0/docs/getting-started-guides/fedora/flannel_multi_node_cluster/ + - title: Centos + path: /v1.0/docs/getting-started-guides/centos/centos_manual_config/ + - title: Ubuntu + path: /v1.0/docs/getting-started-guides/ubuntu/ + - title: Docker (Multi Node) + path: /v1.0/docs/getting-started-guides/docker-multinode/ + +- title: Administering Clusters + section: + - title: Kubernetes Cluster Admin Guide + path: /v1.0/docs/admin/ + - title: Using Multiple Clusters + path: /v1.0/docs/admin/multi-cluster/ + - title: Using Large Clusters + path: /v1.0/docs/admin/cluster-large/ + - title: Building High-Availability Clusters + path: /v1.0/docs/admin/high-availability/ + - title: Accessing Clusters + path: /v1.0/docs/user-guide/accessing-the-cluster/ + - title: Sharing a Cluster + path: /v1.0/docs/admin/namespaces/ + - title: Changing Cluster Size + path: https://github.com/kubernetes/kubernetes/wiki/User-FAQ#how-do-i-change-the-size-of-my-cluster/ + - title: Creating a Custom Cluster from Scratch + path: /v1.0/docs/getting-started-guides/scratch/ + - title: Authenticating Across Clusters with kubeconfig + path: /v1.0/docs/user-guide/kubeconfig-file/ + +- title: Using Nodes, Pods, and Containers + section: + - title: Assigning Pods to Nodes + path: /v1.0/docs/user-guide/node-selection/ + - title: Working with Containers + path: /v1.0/docs/user-guide/production-pods/ + - title: Creating Pods with the Downward API + path: /v1.0/docs/user-guide/downward-api/ + - title: Updating Live Pods + path: /v1.0/docs/user-guide/update-demo/ + - title: Running Commands in a Container with kubectl exec + path: /v1.0/docs/user-guide/getting-into-containers/ + +- title: Networking + section: + - title: Networking in Kubernetes + path: /v1.0/docs/admin/networking/ + - title: Setting Up and Configuring DNS + path: https://github.com/kubernetes/kubernetes/tree/release-1.1/examples/cluster-dns + - title: Connecting Applications + path: /v1.0/docs/user-guide/connecting-applications/ + - title: Creating Servers with External IPs + path: https://github.com/kubernetes/kubernetes/blob/release-1.1/examples/simple-nginx.md + - title: Using DNS Pods and Services + path: /v1.0/docs/admin/dns/ + - title: Connect with Proxies + path: /v1.0/docs/user-guide/connecting-to-applications-proxy/ + - title: Connect with Port Forwarding + path: /v1.0/docs/user-guide/connecting-to-applications-port-forward/ + +- title: Configuring Kubernetes + section: + - title: Using Configuration Files + path: /v1.0/docs/user-guide/simple-yaml/ + - title: Configuring Containers + path: /v1.0/docs/user-guide/configuring-containers/ + - title: Using Environment Variables + path: /v1.0/docs/user-guide/environment-guide/ + - title: Managing Compute Resources + path: /v1.0/docs/user-guide/compute-resources/ + - title: Applying Resource Quotas and Limits + path: /v1.0/docs/admin/resourcequota/ + - title: Setting Pod CPU and Memory Limits + path: /v1.0/docs/admin/limitrange/ + - title: Configuring Garbage Collection + path: /v1.0/docs/admin/garbage-collection/ + - title: Configuring Kubernetes with Salt + path: /v1.0/docs/admin/salt/ + - title: Best Practices for Configuration + path: /v1.0/docs/user-guide/config-best-practices/ + +- title: Application Management and Deployment + section: + - title: "Managing Applications: Prerequisites" + path: /v1.0/docs/user-guide/prereqs/ + - title: Managing Deployments + path: /v1.0/docs/user-guide/managing-deployments/ + - title: Deploying Applications + path: /v1.0/docs/user-guide/deploying-applications/ + - title: Launching, Exposing, and Killing Applications + path: /v1.0/docs/user-guide/quick-start/ + +- title: Testing and Monitoring + section: + - title: Simulating Large Test Loads + path: https://github.com/kubernetes/kubernetes/tree/release-1.1/examples/k8petstore + - title: Checking Pod Health + path: /v1.0/docs/user-guide/liveness/ + - title: Using Explorer to Examine the Runtime Environment + path: https://github.com/kubernetes/kubernetes/tree/release-1.1/examples/explorer + - title: Resource Usage Monitoring + path: /v1.0/docs/user-guide/monitoring/ + - title: Logging + path: /v1.0/docs/user-guide/logging/ diff --git a/_data/v1_0/overrides.yml b/_data/v1_0/overrides.yml new file mode 100644 index 0000000000..cfc070f8db --- /dev/null +++ b/_data/v1_0/overrides.yml @@ -0,0 +1,16 @@ +overrides: +- path: v1.0/docs/man +- path: v1.0/docs/proposals +- path: v1.0/docs/api-reference +- path: v1.0/docs/user-guide/kubectl +- path: v1.0/docs/admin/kube-apiserver.md +- path: v1.0/docs/admin/kube-controller-manager.md +- path: v1.0/docs/admin/kube-proxy.md +- path: v1.0/docs/admin/kube-scheduler.md +- path: v1.0/docs/admin/kubelet.md +- path: v1.0/docs/user-guide/kubectl +- path: v1.0/docs/admin/kube-apiserver.md +- path: v1.0/docs/admin/kube-controller-manager.md +- path: v1.0/docs/admin/kube-proxy.md +- path: v1.0/docs/admin/kube-scheduler.md +- path: v1.0/docs/admin/kubelet.md \ No newline at end of file diff --git a/_data/v1_0/reference.yml b/_data/v1_0/reference.yml new file mode 100644 index 0000000000..b030bf7a6e --- /dev/null +++ b/_data/v1_0/reference.yml @@ -0,0 +1,184 @@ +bigheader: "Reference Documentation" +abstract: "Design docs, concept definitions, and references for APIs and CLIs." +toc: +- title: Reference Documentation + path: /v1.0/reference/ + +- title: Kubernetes API + section: + - title: Kubernetes API Overview + path: /v1.0/docs/api/ + - title: Kubernetes API Operations + path: http://kubernetes.io/v1.0/docs/api-reference/v1/operations.html + - title: Kubernetes API Definitions + path: http://kubernetes.io/v1.0/docs/api-reference/v1/definitions.html + +- title: Extensions API + section: + - title: Extensions API Operations + path: http://kubernetes.io/v1.0/docs/api-reference/extensions/v1beta1/operations.html + - title: Extensions API Definitions + path: http://kubernetes.io/v1.0/docs/api-reference/extensions/v1beta1/definitions.html + +- title: kubectl + section: + - title: kubectl Overview + path: /v1.0/docs/user-guide/kubectl-overview/ + - title: kubectl for Docker Users + path: /v1.0/docs/user-guide/docker-cli-to-kubectl/ + - title: kubectl Commands + section: + - title: kubectl + path: https://github.com/kubernetes/kubernetes/blob/release-1.1/docs/user-guide/kubectl/kubectl.md + - title: kubectl annotate + path: https://github.com/kubernetes/kubernetes/blob/release-1.1/docs/user-guide/kubectl/kubectl_annotate.md + - title: kubectl api-versions + path: https://github.com/kubernetes/kubernetes/blob/release-1.1/docs/user-guide/kubectl/kubectl_api-versions.md + - title: kubectl apply + path: https://github.com/kubernetes/kubernetes/blob/release-1.1/docs/user-guide/kubectl/kubectl_apply.md + - title: kubectl attach + path: https://github.com/kubernetes/kubernetes/blob/release-1.1/docs/user-guide/kubectl/kubectl_attach.md + - title: kubectl autoscale + path: https://github.com/kubernetes/kubernetes/blob/release-1.1/docs/user-guide/kubectl/kubectl_autoscale.md + - title: kubectl cluster-info + path: https://github.com/kubernetes/kubernetes/blob/release-1.1/docs/user-guide/kubectl/kubectl_cluster-info.md + - title: kubectl config + path: https://github.com/kubernetes/kubernetes/blob/release-1.1/docs/user-guide/kubectl/kubectl_config.md + - title: kubectl config set-cluster + path: https://github.com/kubernetes/kubernetes/blob/release-1.1/docs/user-guide/kubectl/kubectl_config_set-cluster.md + - title: kubectl config set-context + path: https://github.com/kubernetes/kubernetes/blob/release-1.1/docs/user-guide/kubectl/kubectl_config_set-context.md + - title: kubectl set-credentials + path: https://github.com/kubernetes/kubernetes/blob/release-1.1/docs/user-guide/kubectl/kubectl_config_set-credentials.md + - title: kubectl config set + path: https://github.com/kubernetes/kubernetes/blob/release-1.1/docs/user-guide/kubectl/kubectl_config_set.md + - title: kubectl config unset + path: https://github.com/kubernetes/kubernetes/blob/release-1.1/docs/user-guide/kubectl/kubectl_config_unset.md + - title: kubectl config use-context + path: https://github.com/kubernetes/kubernetes/blob/release-1.1/docs/user-guide/kubectl/kubectl_config_use-context.md + - title: kubectl config view + path: https://github.com/kubernetes/kubernetes/blob/release-1.1/docs/user-guide/kubectl/kubectl_config_view.md + - title: kubectl create + path: https://github.com/kubernetes/kubernetes/blob/release-1.1/docs/user-guide/kubectl/kubectl_create.md + - title: kubectl delete + path: https://github.com/kubernetes/kubernetes/blob/release-1.1/docs/user-guide/kubectl/kubectl_delete.md + - title: kubectl describe + path: https://github.com/kubernetes/kubernetes/blob/release-1.1/docs/user-guide/kubectl/kubectl_describe.md + - title: kubectl edit + path: https://github.com/kubernetes/kubernetes/blob/release-1.1/docs/user-guide/kubectl/kubectl_edit.md + - title: kubectl exec + path: https://github.com/kubernetes/kubernetes/blob/release-1.1/docs/user-guide/kubectl/kubectl_exec.md + - title: kubectl expose + path: https://github.com/kubernetes/kubernetes/blob/release-1.1/docs/user-guide/kubectl/kubectl_expose.md + - title: kubectl get + path: https://github.com/kubernetes/kubernetes/blob/release-1.1/docs/user-guide/kubectl/kubectl_get.md + - title: kubectl label + path: https://github.com/kubernetes/kubernetes/blob/release-1.1/docs/user-guide/kubectl/kubectl_label.md + - title: kubectl logs + path: https://github.com/kubernetes/kubernetes/blob/release-1.1/docs/user-guide/kubectl/kubectl_logs.md + - title: kubectl patch + path: https://github.com/kubernetes/kubernetes/blob/release-1.1/docs/user-guide/kubectl/kubectl_patch.md + - title: kubectl port-forward + path: https://github.com/kubernetes/kubernetes/blob/release-1.1/docs/user-guide/kubectl/kubectl_port-forward.md + - title: kubectl proxy + path: https://github.com/kubernetes/kubernetes/blob/release-1.1/docs/user-guide/kubectl/kubectl_proxy.md + - title: kubectl replace + path: https://github.com/kubernetes/kubernetes/blob/release-1.1/docs/user-guide/kubectl/kubectl_replace.md + - title: kubectl rolling-update + path: https://github.com/kubernetes/kubernetes/blob/release-1.1/docs/user-guide/kubectl/kubectl_rolling-update.md + - title: kubectl run + path: https://github.com/kubernetes/kubernetes/blob/release-1.1/docs/user-guide/kubectl/kubectl_run.md + - title: kubectl scale + path: https://github.com/kubernetes/kubernetes/blob/release-1.1/docs/user-guide/kubectl/kubectl_scale.md + - title: kubectl stop + path: https://github.com/kubernetes/kubernetes/blob/release-1.1/docs/user-guide/kubectl/kubectl_stop.md + - title: kubectl version + path: https://github.com/kubernetes/kubernetes/blob/release-1.1/docs/user-guide/kubectl/kubectl_version.md + +- title: kube-apiserver + section: + - title: Overview + path: https://github.com/kubernetes/kubernetes/blob/release-1.1/docs/admin/kube-apiserver.md + - title: Authorization Plugins + path: /v1.0/docs/admin/authorization/ + - title: Authentication + path: /v1.0/docs/admin/authentication/ + - title: Accessing the API + path: /v1.0/docs/admin/accessing-the-api/ + - title: Admission Controllers + path: /v1.0/docs/admin/admission-controllers/ + - title: Managing Service Accounts + path: /v1.0/docs/admin/service-accounts-admin/ + +- title: kub-scheduler + path: https://github.com/kubernetes/kubernetes/blob/release-1.1/docs/admin/kube-scheduler.md + +- title: kubelet + path: https://github.com/kubernetes/kubernetes/blob/release-1.1/docs/admin/kubelet.md + +- title: kube-proxy + path: https://github.com/kubernetes/kubernetes/blob/release-1.1/docs/admin/kube-proxy.md + +- title: JSONpath + path: /v1.0/docs/user-guide/jsonpath/ + +- title: etcd + path: /v1.0/docs/admin/etcd/ + +- title: Concept Definitions + section: + - title: Container Environment + path: /v1.0/docs/user-guide/container-environment/ + - title: Images + path: /v1.0/docs/user-guide/images/ + - title: Pods + path: /v1.0/docs/user-guide/pods/ + - title: Labels and Selectors + path: /v1.0/docs/user-guide/labels/ + - title: Replication Controller + path: /v1.0/docs/user-guide/replication-controller/ + - title: Services + path: /v1.0/docs/user-guide/services/ + - title: Volumes + path: /v1.0/docs/user-guide/volumes/ + - title: Persistent Volumes + path: /v1.0/docs/user-guide/persistent-volumes/ + - title: Secrets + path: /v1.0/docs/user-guide/secrets/ + - title: Names + path: /v1.0/docs/user-guide/identifiers/ + - title: Namespaces + path: /v1.0/docs/user-guide/namespaces/ + - title: Nodes + path: /v1.0/docs/admin/node/ + - title: Service Accounts + path: /v1.0/docs/user-guide/service-accounts/ + - title: Annotations + path: /v1.0/docs/user-guide/annotations/ + - title: Daemon Sets + path: /v1.0/docs/admin/daemons/ + - title: Deployments + path: /v1.0/docs/user-guide/deployments/ + - title: Ingress Resources + path: /v1.0/docs/user-guide/ingress/ + - title: Horizontal Pod Autoscaling + path: /v1.0/docs/user-guide/horizontal-pod-autoscaler/ + - title: Jobs + path: /v1.0/docs/user-guide/jobs/ + - title: Resource Quotas + path: /v1.0/docs/admin/resource-quota/ + +- title: Kubernetes Design Docs + section: + - title: Kubernetes Architecture + path: https://github.com/kubernetes/kubernetes/blob/release-1.1/docs/design/architecture.md + - title: Kubernetes Design Overview + path: https://github.com/kubernetes/kubernetes/blob/release-1.1/docs/design/ + - title: Security in Kubernetes + path: https://github.com/kubernetes/kubernetes/blob/release-1.1/docs/design/security.md + - title: Kubernetes Identity and Access Management + path: https://github.com/kubernetes/kubernetes/blob/release-1.1/docs/design/access.md + - title: Security Contexts + path: https://github.com/kubernetes/kubernetes/blob/release-1.1/docs/design/security_context.md + - title: Kubernetes OpenVSwitch GRE/VxLAN networking + path: /v1.0/docs/admin/ovs-networking/ \ No newline at end of file diff --git a/_data/v1_0/samples.yml b/_data/v1_0/samples.yml new file mode 100644 index 0000000000..0b11cfee3a --- /dev/null +++ b/_data/v1_0/samples.yml @@ -0,0 +1,54 @@ +bigheader: "Samples" +abstract: "A collection of example applications that show how to use Kubernetes." +toc: +- title: Samples + path: /v1.0/samples/ + +- title: Clustered Application Samples + section: + - title: Apache Cassandra Database + path: https://github.com/kubernetes/kubernetes/tree/release-1.1/examples/cassandra + - title: Apache Spark + path: https://github.com/kubernetes/kubernetes/tree/release-1.1/examples/spark + - title: Apache Storm + path: https://github.com/kubernetes/kubernetes/tree/release-1.1/examples/storm + - title: Distributed Task Queue + path: https://github.com/kubernetes/kubernetes/tree/release-1.1/examples/celery-rabbitmq + - title: Hazelcast + path: https://github.com/kubernetes/kubernetes/tree/release-1.1/examples/hazelcast + - title: Meteor Applications + path: https://github.com/kubernetes/kubernetes/tree/release-1.1/examples/meteor/ + - title: Redis + path: https://github.com/kubernetes/kubernetes/tree/release-1.1/examples/redis/ + - title: RethinkDB + path: https://github.com/kubernetes/kubernetes/tree/release-1.1/examples/rethinkdb/ + - title: Elasticsearch/Kibana Logging Demonstration + path: https://github.com/kubernetes/kubernetes/tree/release-1.1/examples/logging-demo/ + - title: Elasticsearch + path: https://github.com/kubernetes/kubernetes/tree/release-1.1/examples/elasticsearch/ + - title: OpenShift Origin + path: https://github.com/kubernetes/kubernetes/tree/release-1.1/examples/openshift-origin/ + - title: Ceph + path: https://github.com/kubernetes/kubernetes/tree/release-1.1/examples/rbd/ + - title: MEAN stack on Google Cloud Platform + path: /v1.0/docs/getting-started-guides/meanstack/ + +- title: Persistent Volume Samples + section: + - title: WordPress on a Kubernetes Persistent Volume + path: https://github.com/kubernetes/kubernetes/tree/release-1.1/examples/mysql-wordpress-pd/ + - title: GlusterFS + path: /https://github.com/kubernetes/kubernetes/tree/release-1.1/examples/glusterfs/ + - title: iSCSI + path: https://github.com/kubernetes/kubernetes/tree/release-1.1/examples/iscsi/ + - title: NFS + path: https://github.com/kubernetes/kubernetes/tree/release-1.1/examples/nfs/ + +- title: Multi-tier Application Samples + section: + - title: Guestbook - Go Server + path: https://github.com/kubernetes/kubernetes/tree/release-1.1/examples/guestbook-go/ + - title: GuestBook - PHP Server + path: https://github.com/kubernetes/kubernetes/tree/release-1.1/examples/guestbook/ + - title: MySQL - Phabricator Server + path: https://github.com/kubernetes/kubernetes/tree/release-1.1/examples/phabricator/ diff --git a/_data/v1_0/support.yml b/_data/v1_0/support.yml new file mode 100644 index 0000000000..80d612f889 --- /dev/null +++ b/_data/v1_0/support.yml @@ -0,0 +1,40 @@ +bigheader: "Support" +abstract: "Troubleshooting resources, frequently asked questions, and community support channels." +toc: +- title: Support + path: /v1.0/docs/troubleshooting/ + +- title: Troubleshooting + section: + - title: Web Interface + path: /v1.0/docs/user-guide/ui/ + - title: Troubleshooting Applications + path: /v1.0/docs/user-guide/application-troubleshooting/ + - title: Troubleshooting Clusters + path: /v1.0/docs/admin/cluster-troubleshooting/ + +- title: Frequently Asked Questions + section: + - title: User FAQ + path: https://github.com/kubernetes/kubernetes/wiki/User-FAQ/ + - title: Debugging FAQ + path: https://github.com/kubernetes/kubernetes/wiki/Debugging-FAQ/ + - title: Services FAQ + path: https://github.com/kubernetes/kubernetes/wiki/Services-FAQ/ + +- title: Other Resources + section: + - title: Known Issues + path: /v1.0/docs/user-guide/known-issues/ + - title: Kubernetes Issue Tracker on GitHub + path: https://github.com/kubernetes/kubernetes/issues/ + - title: Report a Security Vulnerability + path: /v1.0/docs/reporting-security-issues/ + - title: Release Notes + path: https://github.com/kubernetes/kubernetes/releases/ + - title: Release Roadmap + path: /v1.0/docs/roadmap/ + - title: Contributing to Kubernetes Documentation + path: /v1.0/editdocs/ + - title: Sitemap for v1.0 + path: /v1.0/pagelist/ diff --git a/_data/v1_2/globals.yml b/_data/v1_2/globals.yml new file mode 100644 index 0000000000..fd3be92a16 --- /dev/null +++ b/_data/v1_2/globals.yml @@ -0,0 +1,5 @@ +tocs: +- guides +- reference +- samples +- support \ No newline at end of file diff --git a/_data/v1_2/guides.yml b/_data/v1_2/guides.yml new file mode 100644 index 0000000000..b36d4a5101 --- /dev/null +++ b/_data/v1_2/guides.yml @@ -0,0 +1,179 @@ +bigheader: "Guides" +abstract: "How to get started, and acheive tasks, using Kubernetes" +toc: +- title: Guides + path: /v1.2/ + +- title: Quickstarts + section: + - title: What is Kubernetes? + path: /v1.2/docs/whatisk8s/ + - title: TODO - 5-minute Quickstart + path: /v1.2/docs/hellonode/ + - title: Kubernetes 101 + path: /v1.2/docs/user-guide/walkthrough/ + - title: Kubernetes 201 + path: /v1.2/docs/user-guide/walkthrough/k8s201/ + +- title: Running Kubernetes + section: + - title: Picking the Right Solution + path: /v1.2/docs/getting-started-guides/ + - title: Running Kubernetes on Your Local Machine + section: + - title: Running Kubernetes Locally via Docker + path: /v1.2/docs/getting-started-guides/docker/ + - title: Running Kubernetes Locally via Vagrant + path: /v1.2/docs/getting-started-guides/vagrant/ + - title: Running Kubernetes Locally with No VM + path: /v1.2/docs/getting-started-guides/locally/ + - title: Running Kubernetes on Turn-key Cloud Solutions + section: + - title: Running Kubernetes on Google Container Engine + path: https://cloud.google.com/container-engine/docs/before-you-begin/ + - title: Running Kubernetes on Google Compute Engine + path: /v1.2/docs/getting-started-guides/gce/ + - title: Running Kubernetes on AWS EC2 + path: /v1.2/docs/getting-started-guides/aws/ + - title: Running Kubernetes on Azure + path: /v1.2/docs/getting-started-guides/coreos/azure/ + - title: Running Kubernetes on Custom Solutions + section: + - title: Getting Started From Scratch + path: /v1.2/docs/getting-started-guides/scratch/ + - title: Custom Cloud Solutions + section: + - title: AWS or GCE on CoreOS + path: /v1.2/docs/getting-started-guides/coreos/ + - title: AWS or Joyent on Ubuntu + path: /v1.2/docs/getting-started-guides/juju/ + - title: Rackspace on CoreOS + path: /v1.2/docs/getting-started-guides/rackspace/ + - title: On-Premise VMs + section: + - title: Vagrant or VMware + path: /v1.2/docs/getting-started-guides/coreos/ + - title: Cloudstack + path: /v1.2/docs/getting-started-guides/cloudstack/ + - title: VMWare + path: /v1.2/docs/getting-started-guides/vsphere/ + - title: Juju + path: /v1.2/docs/getting-started-guides/juju/ + - title: libvirt on CoreOS + path: /v1.2/docs/getting-started-guides/libvirt-coreos/ + - title: oVirt + path: /v1.2/docs/getting-started-guides/ovirt/ + - title: libvirt or KVM + path: /v1.2/docs/getting-started-guides/fedora/flannel_multi_node_cluster/ + - title: Bare Metal + section: + - title: Offline + path: /v1.2/docs/getting-started-guides/coreos/bare_metal_offline/ + - title: Fedora via Ansible + path: /v1.2/docs/getting-started-guides/fedora/fedora_ansible_config/ + - title: Fedora (Single Node) + path: /v1.2/docs/getting-started-guides/fedora/fedora_manual_config/ + - title: Fedora (Multi Node) + path: /v1.2/docs/getting-started-guides/fedora/flannel_multi_node_cluster/ + - title: Centos + path: /v1.2/docs/getting-started-guides/centos/centos_manual_config/ + - title: Ubuntu + path: /v1.2/docs/getting-started-guides/ubuntu/ + - title: Docker (Multi Node) + path: /v1.2/docs/getting-started-guides/docker-multinode/ + +- title: Administering Clusters + section: + - title: Kubernetes Cluster Admin Guide + path: /v1.2/docs/admin/ + - title: Using Multiple Clusters + path: /v1.2/docs/admin/multi-cluster/ + - title: Using Large Clusters + path: /v1.2/docs/admin/cluster-large/ + - title: Building High-Availability Clusters + path: /v1.2/docs/admin/high-availability/ + - title: Accessing Clusters + path: /v1.2/docs/user-guide/accessing-the-cluster/ + - title: Sharing a Cluster + path: /v1.2/docs/admin/namespaces/ + - title: Changing Cluster Size + path: https://github.com/kubernetes/kubernetes/wiki/User-FAQ#how-do-i-change-the-size-of-my-cluster/ + - title: Creating a Custom Cluster from Scratch + path: /v1.2/docs/getting-started-guides/scratch/ + - title: Authenticating Across Clusters with kubeconfig + path: /v1.2/docs/user-guide/kubeconfig-file/ + +- title: Using Nodes, Pods, and Containers + section: + - title: Assigning Pods to Nodes + path: /v1.2/docs/user-guide/node-selection/ + - title: Working with Containers + path: /v1.2/docs/user-guide/production-pods/ + - title: Creating Pods with the Downward API + path: /v1.2/docs/user-guide/downward-api/ + - title: Updating Live Pods + path: /v1.2/docs/user-guide/update-demo/ + - title: Running Commands in a Container with kubectl exec + path: /v1.2/docs/user-guide/getting-into-containers/ + +- title: Networking + section: + - title: Networking in Kubernetes + path: /v1.2/docs/admin/networking/ + - title: Setting Up and Configuring DNS + path: https://github.com/kubernetes/kubernetes/tree/release-1.1/examples/cluster-dns + - title: Connecting Applications + path: /v1.2/docs/user-guide/connecting-applications/ + - title: Creating Servers with External IPs + path: https://github.com/kubernetes/kubernetes/blob/release-1.1/examples/simple-nginx.md + - title: Using DNS Pods and Services + path: /v1.2/docs/admin/dns/ + - title: Connect with Proxies + path: /v1.2/docs/user-guide/connecting-to-applications-proxy/ + - title: Connect with Port Forwarding + path: /v1.2/docs/user-guide/connecting-to-applications-port-forward/ + +- title: Configuring Kubernetes + section: + - title: Using Configuration Files + path: /v1.2/docs/user-guide/simple-yaml/ + - title: Configuring Containers + path: /v1.2/docs/user-guide/configuring-containers/ + - title: Using Environment Variables + path: /v1.2/docs/user-guide/environment-guide/ + - title: Managing Compute Resources + path: /v1.2/docs/user-guide/compute-resources/ + - title: Applying Resource Quotas and Limits + path: /v1.2/docs/admin/resourcequota/ + - title: Setting Pod CPU and Memory Limits + path: /v1.2/docs/admin/limitrange/ + - title: Configuring Garbage Collection + path: /v1.2/docs/admin/garbage-collection/ + - title: Configuring Kubernetes with Salt + path: /v1.2/docs/admin/salt/ + - title: Best Practices for Configuration + path: /v1.2/docs/user-guide/config-best-practices/ + +- title: Application Management and Deployment + section: + - title: "Managing Applications: Prerequisites" + path: /v1.2/docs/user-guide/prereqs/ + - title: Managing Deployments + path: /v1.2/docs/user-guide/managing-deployments/ + - title: Deploying Applications + path: /v1.2/docs/user-guide/deploying-applications/ + - title: Launching, Exposing, and Killing Applications + path: /v1.2/docs/user-guide/quick-start/ + +- title: Testing and Monitoring + section: + - title: Simulating Large Test Loads + path: https://github.com/kubernetes/kubernetes/tree/release-1.1/examples/k8petstore + - title: Checking Pod Health + path: /v1.2/docs/user-guide/liveness/ + - title: Using Explorer to Examine the Runtime Environment + path: https://github.com/kubernetes/kubernetes/tree/release-1.1/examples/explorer + - title: Resource Usage Monitoring + path: /v1.2/docs/user-guide/monitoring/ + - title: Logging + path: /v1.2/docs/user-guide/logging/ diff --git a/_data/v1_2/overrides.yml b/_data/v1_2/overrides.yml new file mode 100644 index 0000000000..10e04424ca --- /dev/null +++ b/_data/v1_2/overrides.yml @@ -0,0 +1,16 @@ +overrides: +- path: v1.2/docs/man +- path: v1.2/docs/proposals +- path: v1.2/docs/api-reference +- path: v1.2/docs/user-guide/kubectl +- path: v1.2/docs/admin/kube-apiserver.md +- path: v1.2/docs/admin/kube-controller-manager.md +- path: v1.2/docs/admin/kube-proxy.md +- path: v1.2/docs/admin/kube-scheduler.md +- path: v1.2/docs/admin/kubelet.md +- path: v1.0/docs/user-guide/kubectl +- path: v1.0/docs/admin/kube-apiserver.md +- path: v1.0/docs/admin/kube-controller-manager.md +- path: v1.0/docs/admin/kube-proxy.md +- path: v1.0/docs/admin/kube-scheduler.md +- path: v1.0/docs/admin/kubelet.md \ No newline at end of file diff --git a/_data/v1_2/reference.yml b/_data/v1_2/reference.yml new file mode 100644 index 0000000000..c1062cd972 --- /dev/null +++ b/_data/v1_2/reference.yml @@ -0,0 +1,184 @@ +bigheader: "Reference Documentation" +abstract: "Design docs, concept definitions, and references for APIs and CLIs." +toc: +- title: Reference Documentation + path: /v1.2/reference/ + +- title: Kubernetes API + section: + - title: Kubernetes API Overview + path: /v1.2/docs/api/ + - title: Kubernetes API Operations + path: http://kubernetes.io/v1.2/docs/api-reference/v1/operations.html + - title: Kubernetes API Definitions + path: http://kubernetes.io/v1.2/docs/api-reference/v1/definitions.html + +- title: Extensions API + section: + - title: Extensions API Operations + path: http://kubernetes.io/v1.2/docs/api-reference/extensions/v1beta1/operations.html + - title: Extensions API Definitions + path: http://kubernetes.io/v1.2/docs/api-reference/extensions/v1beta1/definitions.html + +- title: kubectl + section: + - title: kubectl Overview + path: /v1.2/docs/user-guide/kubectl-overview/ + - title: kubectl for Docker Users + path: /v1.2/docs/user-guide/docker-cli-to-kubectl/ + - title: kubectl Commands + section: + - title: kubectl + path: https://github.com/kubernetes/kubernetes/blob/release-1.1/docs/user-guide/kubectl/kubectl.md + - title: kubectl annotate + path: https://github.com/kubernetes/kubernetes/blob/release-1.1/docs/user-guide/kubectl/kubectl_annotate.md + - title: kubectl api-versions + path: https://github.com/kubernetes/kubernetes/blob/release-1.1/docs/user-guide/kubectl/kubectl_api-versions.md + - title: kubectl apply + path: https://github.com/kubernetes/kubernetes/blob/release-1.1/docs/user-guide/kubectl/kubectl_apply.md + - title: kubectl attach + path: https://github.com/kubernetes/kubernetes/blob/release-1.1/docs/user-guide/kubectl/kubectl_attach.md + - title: kubectl autoscale + path: https://github.com/kubernetes/kubernetes/blob/release-1.1/docs/user-guide/kubectl/kubectl_autoscale.md + - title: kubectl cluster-info + path: https://github.com/kubernetes/kubernetes/blob/release-1.1/docs/user-guide/kubectl/kubectl_cluster-info.md + - title: kubectl config + path: https://github.com/kubernetes/kubernetes/blob/release-1.1/docs/user-guide/kubectl/kubectl_config.md + - title: kubectl config set-cluster + path: https://github.com/kubernetes/kubernetes/blob/release-1.1/docs/user-guide/kubectl/kubectl_config_set-cluster.md + - title: kubectl config set-context + path: https://github.com/kubernetes/kubernetes/blob/release-1.1/docs/user-guide/kubectl/kubectl_config_set-context.md + - title: kubectl set-credentials + path: https://github.com/kubernetes/kubernetes/blob/release-1.1/docs/user-guide/kubectl/kubectl_config_set-credentials.md + - title: kubectl config set + path: https://github.com/kubernetes/kubernetes/blob/release-1.1/docs/user-guide/kubectl/kubectl_config_set.md + - title: kubectl config unset + path: https://github.com/kubernetes/kubernetes/blob/release-1.1/docs/user-guide/kubectl/kubectl_config_unset.md + - title: kubectl config use-context + path: https://github.com/kubernetes/kubernetes/blob/release-1.1/docs/user-guide/kubectl/kubectl_config_use-context.md + - title: kubectl config view + path: https://github.com/kubernetes/kubernetes/blob/release-1.1/docs/user-guide/kubectl/kubectl_config_view.md + - title: kubectl create + path: https://github.com/kubernetes/kubernetes/blob/release-1.1/docs/user-guide/kubectl/kubectl_create.md + - title: kubectl delete + path: https://github.com/kubernetes/kubernetes/blob/release-1.1/docs/user-guide/kubectl/kubectl_delete.md + - title: kubectl describe + path: https://github.com/kubernetes/kubernetes/blob/release-1.1/docs/user-guide/kubectl/kubectl_describe.md + - title: kubectl edit + path: https://github.com/kubernetes/kubernetes/blob/release-1.1/docs/user-guide/kubectl/kubectl_edit.md + - title: kubectl exec + path: https://github.com/kubernetes/kubernetes/blob/release-1.1/docs/user-guide/kubectl/kubectl_exec.md + - title: kubectl expose + path: https://github.com/kubernetes/kubernetes/blob/release-1.1/docs/user-guide/kubectl/kubectl_expose.md + - title: kubectl get + path: https://github.com/kubernetes/kubernetes/blob/release-1.1/docs/user-guide/kubectl/kubectl_get.md + - title: kubectl label + path: https://github.com/kubernetes/kubernetes/blob/release-1.1/docs/user-guide/kubectl/kubectl_label.md + - title: kubectl logs + path: https://github.com/kubernetes/kubernetes/blob/release-1.1/docs/user-guide/kubectl/kubectl_logs.md + - title: kubectl patch + path: https://github.com/kubernetes/kubernetes/blob/release-1.1/docs/user-guide/kubectl/kubectl_patch.md + - title: kubectl port-forward + path: https://github.com/kubernetes/kubernetes/blob/release-1.1/docs/user-guide/kubectl/kubectl_port-forward.md + - title: kubectl proxy + path: https://github.com/kubernetes/kubernetes/blob/release-1.1/docs/user-guide/kubectl/kubectl_proxy.md + - title: kubectl replace + path: https://github.com/kubernetes/kubernetes/blob/release-1.1/docs/user-guide/kubectl/kubectl_replace.md + - title: kubectl rolling-update + path: https://github.com/kubernetes/kubernetes/blob/release-1.1/docs/user-guide/kubectl/kubectl_rolling-update.md + - title: kubectl run + path: https://github.com/kubernetes/kubernetes/blob/release-1.1/docs/user-guide/kubectl/kubectl_run.md + - title: kubectl scale + path: https://github.com/kubernetes/kubernetes/blob/release-1.1/docs/user-guide/kubectl/kubectl_scale.md + - title: kubectl stop + path: https://github.com/kubernetes/kubernetes/blob/release-1.1/docs/user-guide/kubectl/kubectl_stop.md + - title: kubectl version + path: https://github.com/kubernetes/kubernetes/blob/release-1.1/docs/user-guide/kubectl/kubectl_version.md + +- title: kube-apiserver + section: + - title: Overview + path: https://github.com/kubernetes/kubernetes/blob/release-1.1/docs/admin/kube-apiserver.md + - title: Authorization Plugins + path: /v1.2/docs/admin/authorization/ + - title: Authentication + path: /v1.2/docs/admin/authentication/ + - title: Accessing the API + path: /v1.2/docs/admin/accessing-the-api/ + - title: Admission Controllers + path: /v1.2/docs/admin/admission-controllers/ + - title: Managing Service Accounts + path: /v1.2/docs/admin/service-accounts-admin/ + +- title: kub-scheduler + path: https://github.com/kubernetes/kubernetes/blob/release-1.1/docs/admin/kube-scheduler.md + +- title: kubelet + path: https://github.com/kubernetes/kubernetes/blob/release-1.1/docs/admin/kubelet.md + +- title: kube-proxy + path: https://github.com/kubernetes/kubernetes/blob/release-1.1/docs/admin/kube-proxy.md + +- title: JSONpath + path: /v1.2/docs/user-guide/jsonpath/ + +- title: etcd + path: /v1.2/docs/admin/etcd/ + +- title: Concept Definitions + section: + - title: Container Environment + path: /v1.2/docs/user-guide/container-environment/ + - title: Images + path: /v1.2/docs/user-guide/images/ + - title: Pods + path: /v1.2/docs/user-guide/pods/ + - title: Labels and Selectors + path: /v1.2/docs/user-guide/labels/ + - title: Replication Controller + path: /v1.2/docs/user-guide/replication-controller/ + - title: Services + path: /v1.2/docs/user-guide/services/ + - title: Volumes + path: /v1.2/docs/user-guide/volumes/ + - title: Persistent Volumes + path: /v1.2/docs/user-guide/persistent-volumes/ + - title: Secrets + path: /v1.2/docs/user-guide/secrets/ + - title: Names + path: /v1.2/docs/user-guide/identifiers/ + - title: Namespaces + path: /v1.2/docs/user-guide/namespaces/ + - title: Nodes + path: /v1.2/docs/admin/node/ + - title: Service Accounts + path: /v1.2/docs/user-guide/service-accounts/ + - title: Annotations + path: /v1.2/docs/user-guide/annotations/ + - title: Daemon Sets + path: /v1.2/docs/admin/daemons/ + - title: Deployments + path: /v1.2/docs/user-guide/deployments/ + - title: Ingress Resources + path: /v1.2/docs/user-guide/ingress/ + - title: Horizontal Pod Autoscaling + path: /v1.2/docs/user-guide/horizontal-pod-autoscaler/ + - title: Jobs + path: /v1.2/docs/user-guide/jobs/ + - title: Resource Quotas + path: /v1.2/docs/admin/resource-quota/ + +- title: Kubernetes Design Docs + section: + - title: Kubernetes Architecture + path: https://github.com/kubernetes/kubernetes/blob/release-1.1/docs/design/architecture.md + - title: Kubernetes Design Overview + path: https://github.com/kubernetes/kubernetes/blob/release-1.1/docs/design/ + - title: Security in Kubernetes + path: https://github.com/kubernetes/kubernetes/blob/release-1.1/docs/design/security.md + - title: Kubernetes Identity and Access Management + path: https://github.com/kubernetes/kubernetes/blob/release-1.1/docs/design/access.md + - title: Security Contexts + path: https://github.com/kubernetes/kubernetes/blob/release-1.1/docs/design/security_context.md + - title: Kubernetes OpenVSwitch GRE/VxLAN networking + path: /v1.2/docs/admin/ovs-networking/ \ No newline at end of file diff --git a/_data/v1_2/samples.yml b/_data/v1_2/samples.yml new file mode 100644 index 0000000000..3ae4cf2f37 --- /dev/null +++ b/_data/v1_2/samples.yml @@ -0,0 +1,54 @@ +bigheader: "Samples" +abstract: "A collection of example applications that show how to use Kubernetes." +toc: +- title: Samples + path: /v1.2/samples/ + +- title: Clustered Application Samples + section: + - title: Apache Cassandra Database + path: https://github.com/kubernetes/kubernetes/tree/release-1.1/examples/cassandra + - title: Apache Spark + path: https://github.com/kubernetes/kubernetes/tree/release-1.1/examples/spark + - title: Apache Storm + path: https://github.com/kubernetes/kubernetes/tree/release-1.1/examples/storm + - title: Distributed Task Queue + path: https://github.com/kubernetes/kubernetes/tree/release-1.1/examples/celery-rabbitmq + - title: Hazelcast + path: https://github.com/kubernetes/kubernetes/tree/release-1.1/examples/hazelcast + - title: Meteor Applications + path: https://github.com/kubernetes/kubernetes/tree/release-1.1/examples/meteor/ + - title: Redis + path: https://github.com/kubernetes/kubernetes/tree/release-1.1/examples/redis/ + - title: RethinkDB + path: https://github.com/kubernetes/kubernetes/tree/release-1.1/examples/rethinkdb/ + - title: Elasticsearch/Kibana Logging Demonstration + path: https://github.com/kubernetes/kubernetes/tree/release-1.1/examples/logging-demo/ + - title: Elasticsearch + path: https://github.com/kubernetes/kubernetes/tree/release-1.1/examples/elasticsearch/ + - title: OpenShift Origin + path: https://github.com/kubernetes/kubernetes/tree/release-1.1/examples/openshift-origin/ + - title: Ceph + path: https://github.com/kubernetes/kubernetes/tree/release-1.1/examples/rbd/ + - title: MEAN stack on Google Cloud Platform + path: /v1.2/docs/getting-started-guides/meanstack/ + +- title: Persistent Volume Samples + section: + - title: WordPress on a Kubernetes Persistent Volume + path: https://github.com/kubernetes/kubernetes/tree/release-1.1/examples/mysql-wordpress-pd/ + - title: GlusterFS + path: /https://github.com/kubernetes/kubernetes/tree/release-1.1/examples/glusterfs/ + - title: iSCSI + path: https://github.com/kubernetes/kubernetes/tree/release-1.1/examples/iscsi/ + - title: NFS + path: https://github.com/kubernetes/kubernetes/tree/release-1.1/examples/nfs/ + +- title: Multi-tier Application Samples + section: + - title: Guestbook - Go Server + path: https://github.com/kubernetes/kubernetes/tree/release-1.1/examples/guestbook-go/ + - title: GuestBook - PHP Server + path: https://github.com/kubernetes/kubernetes/tree/release-1.1/examples/guestbook/ + - title: MySQL - Phabricator Server + path: https://github.com/kubernetes/kubernetes/tree/release-1.1/examples/phabricator/ diff --git a/_data/v1_2/support.yml b/_data/v1_2/support.yml new file mode 100644 index 0000000000..4185295cc8 --- /dev/null +++ b/_data/v1_2/support.yml @@ -0,0 +1,40 @@ +bigheader: "Support" +abstract: "Troubleshooting resources, frequently asked questions, and community support channels." +toc: +- title: Support + path: /v1.2/docs/troubleshooting/ + +- title: Troubleshooting + section: + - title: Web Interface + path: /v1.2/docs/user-guide/ui/ + - title: Troubleshooting Applications + path: /v1.2/docs/user-guide/application-troubleshooting/ + - title: Troubleshooting Clusters + path: /v1.2/docs/admin/cluster-troubleshooting/ + +- title: Frequently Asked Questions + section: + - title: User FAQ + path: https://github.com/kubernetes/kubernetes/wiki/User-FAQ/ + - title: Debugging FAQ + path: https://github.com/kubernetes/kubernetes/wiki/Debugging-FAQ/ + - title: Services FAQ + path: https://github.com/kubernetes/kubernetes/wiki/Services-FAQ/ + +- title: Other Resources + section: + - title: Known Issues + path: /v1.2/docs/user-guide/known-issues/ + - title: Kubernetes Issue Tracker on GitHub + path: https://github.com/kubernetes/kubernetes/issues/ + - title: Report a Security Vulnerability + path: /v1.2/docs/reporting-security-issues/ + - title: Release Notes + path: https://github.com/kubernetes/kubernetes/releases/ + - title: Release Roadmap + path: /v1.2/docs/roadmap/ + - title: Contributing to Kubernetes Documentation + path: /v1.2/editdocs/ + - title: Sitemap for v1.2 + path: /v1.2/pagelist/ diff --git a/_includes/docs/docs/admin/accessing-the-api.md b/_includes/docs/docs/admin/accessing-the-api.md new file mode 100644 index 0000000000..9c00b5d51c --- /dev/null +++ b/_includes/docs/docs/admin/accessing-the-api.md @@ -0,0 +1,73 @@ + +This document describes what ports the Kubernetes apiserver +may serve on and how to reach them. The audience is +cluster administrators who want to customize their cluster +or understand the details. + +Most questions about accessing the cluster are covered +in [Accessing the cluster](/{{page.version}}/docs/user-guide/accessing-the-cluster). + + +## Ports and IPs Served On + +The Kubernetes API is served by the Kubernetes apiserver process. Typically, +there is one of these running on a single kubernetes-master node. + +By default the Kubernetes APIserver serves HTTP on 2 ports: + + 1. Localhost Port + - serves HTTP + - default is port 8080, change with `--insecure-port` flag. + - defaults IP is localhost, change with `--insecure-bind-address` flag. + - no authentication or authorization checks in HTTP + - protected by need to have host access + 2. Secure Port + - default is port 6443, change with `--secure-port` flag. + - default IP is first non-localhost network interface, change with `--bind-address` flag. + - serves HTTPS. Set cert with `--tls-cert-file` and key with `--tls-private-key-file` flag. + - uses token-file or client-certificate based [authentication](/{{page.version}}/docs/admin/authentication). + - uses policy-based [authorization](/{{page.version}}/docs/admin/authorization). + 3. Removed: ReadOnly Port + - For security reasons, this had to be removed. Use the [service account](/{{page.version}}/docs/user-guide/service-accounts) feature instead. + +## Proxies and Firewall rules + +Additionally, in some configurations there is a proxy (nginx) running +on the same machine as the apiserver process. The proxy serves HTTPS protected +by Basic Auth on port 443, and proxies to the apiserver on localhost:8080. In +these configurations the secure port is typically set to 6443. + +A firewall rule is typically configured to allow external HTTPS access to port 443. + +The above are defaults and reflect how Kubernetes is deployed to Google Compute Engine using +kube-up.sh. Other cloud providers may vary. + +## Use Cases vs IP:Ports + +There are three differently configured serving ports because there are a +variety of uses cases: + + 1. Clients outside of a Kubernetes cluster, such as human running `kubectl` + on desktop machine. Currently, accesses the Localhost Port via a proxy (nginx) + running on the `kubernetes-master` machine. The proxy can use cert-based authentication + or token-based authentication. + 2. Processes running in Containers on Kubernetes that need to read from + the apiserver. Currently, these can use a [service account](/{{page.version}}/docs/user-guide/service-accounts). + 3. Scheduler and Controller-manager processes, which need to do read-write + API operations. Currently, these have to run on the same host as the + apiserver and use the Localhost Port. In the future, these will be + switched to using service accounts to avoid the need to be co-located. + 4. Kubelets, which need to do read-write API operations and are necessarily + on different machines than the apiserver. Kubelet uses the Secure Port + to get their pods, to find the services that a pod can see, and to + write events. Credentials are distributed to kubelets at cluster + setup time. Kubelet and kube-proxy can use cert-based authentication or token-based + authentication. + +## Expected changes + + - Policy will limit the actions kubelets can do via the authed port. + - Scheduler and Controller-manager will use the Secure Port too. They + will then be able to run on different machines than the apiserver. + + diff --git a/_includes/docs/docs/admin/admission-controllers.md b/_includes/docs/docs/admin/admission-controllers.md new file mode 100644 index 0000000000..460b155697 --- /dev/null +++ b/_includes/docs/docs/admin/admission-controllers.md @@ -0,0 +1,131 @@ + +* TOC +{:toc} + +## What are they? + +An admission control plug-in is a piece of code that intercepts requests to the Kubernetes +API server prior to persistence of the object, but after the request is authenticated +and authorized. The plug-in code is in the API server process +and must be compiled into the binary in order to be used at this time. + +Each admission control plug-in is run in sequence before a request is accepted into the cluster. If +any of the plug-ins in the sequence reject the request, the entire request is rejected immediately +and an error is returned to the end-user. + +Admission control plug-ins may mutate the incoming object in some cases to apply system configured +defaults. In addition, admission control plug-ins may mutate related resources as part of request +processing to do things like increment quota usage. + +## Why do I need them? + +Many advanced features in Kubernetes require an admission control plug-in to be enabled in order +to properly support the feature. As a result, a Kubernetes API server that is not properly +configured with the right set of admission control plug-ins is an incomplete server and will not +support all the features you expect. + +## How do I turn on an admission control plug-in? + +The Kubernetes API server supports a flag, `admission-control` that takes a comma-delimited, +ordered list of admission control choices to invoke prior to modifying objects in the cluster. + +## What does each plug-in do? + +### AlwaysAdmit + +Use this plugin by itself to pass-through all requests. + +### AlwaysDeny + +Rejects all requests. Used for testing. + +### DenyExecOnPrivileged (deprecated) + +This plug-in will intercept all requests to exec a command in a pod if that pod has a privileged container. + +If your cluster supports privileged containers, and you want to restrict the ability of end-users to exec +commands in those containers, we strongly encourage enabling this plug-in. + +This functionality has been merged into [DenyEscalatingExec](#denyescalatingexec). + +### DenyEscalatingExec + +This plug-in will deny exec and attach commands to pods that run with escalated privileges that +allow host access. This includes pods that run as privileged, have access to the host IPC namespace, and +have access to the host PID namespace. + +If your cluster supports containers that run with escalated privileges, and you want to +restrict the ability of end-users to exec commands in those containers, we strongly encourage +enabling this plug-in. + +### ServiceAccount + +This plug-in implements automation for [serviceAccounts](/{{page.version}}/docs/user-guide/service-accounts). +We strongly recommend using this plug-in if you intend to make use of Kubernetes `ServiceAccount` objects. + +### SecurityContextDeny + +This plug-in will deny any pod with a [SecurityContext](/{{page.version}}/docs/user-guide/security-context) that defines options that were not available on the `Container`. + +### ResourceQuota + +This plug-in will observe the incoming request and ensure that it does not violate any of the constraints +enumerated in the `ResourceQuota` object in a `Namespace`. If you are using `ResourceQuota` +objects in your Kubernetes deployment, you MUST use this plug-in to enforce quota constraints. + +See the [resourceQuota design doc](https://github.com/kubernetes/kubernetes/blob/{{page.githubbranch}}/docs/design/admission_control_resource_quota.md) and the [example of Resource Quota](/{{page.version}}/docs/admin/resourcequota/) for more details. + +It is strongly encouraged that this plug-in is configured last in the sequence of admission control plug-ins. This is +so that quota is not prematurely incremented only for the request to be rejected later in admission control. + +### LimitRanger + +This plug-in will observe the incoming request and ensure that it does not violate any of the constraints +enumerated in the `LimitRange` object in a `Namespace`. If you are using `LimitRange` objects in +your Kubernetes deployment, you MUST use this plug-in to enforce those constraints. LimitRanger can also +be used to apply default resource requests to Pods that don't specify any; currently, the default LimitRanger +applies a 0.1 CPU requirement to all Pods in the `default` namespace. + +See the [limitRange design doc](https://github.com/kubernetes/kubernetes/blob/{{page.githubbranch}}/docs/design/admission_control_limit_range.md) and the [example of Limit Range](/{{page.version}}/docs/admin/limitrange/) for more details. + +### InitialResources (experimental) + +This plug-in observes pod creation requests. If a container omits compute resource requests and limits, +then the plug-in auto-populates a compute resource request based on historical usage of containers running the same image. +If there is not enough data to make a decision the Request is left unchanged. +When the plug-in sets a compute resource request, it annotates the pod with information on what compute resources it auto-populated. + +See the [InitialResouces proposal](https://github.com/kubernetes/kubernetes/blob/{{page.githubbranch}}/docs/proposals/initial-resources.md) for more details. + +### NamespaceExists (deprecated) + +This plug-in will observe all incoming requests that attempt to create a resource in a Kubernetes `Namespace` +and reject the request if the `Namespace` was not previously created. We strongly recommend running +this plug-in to ensure integrity of your data. + +The functionality of this admission controller has been merged into `NamespaceLifecycle` + +### NamespaceAutoProvision (deprecated) + +This plug-in will observe all incoming requests that attempt to create a resource in a Kubernetes `Namespace` +and create a new `Namespace` if one did not already exist previously. + +We strongly recommend `NamespaceLifecycle` over `NamespaceAutoProvision`. + +### NamespaceLifecycle + +This plug-in enforces that a `Namespace` that is undergoing termination cannot have new objects created in it, +and ensures that requests in a non-existant `Namespace` are rejected. + +A `Namespace` deletion kicks off a sequence of operations that remove all objects (pods, services, etc.) in that +namespace. In order to enforce integrity of that process, we strongly recommend running this plug-in. + +## Is there a recommended set of plug-ins to use? + +Yes. + +For Kubernetes 1.0, we strongly recommend running the following set of admission control plug-ins (order matters): + +```shell +--admission-control=NamespaceLifecycle,LimitRanger,SecurityContextDeny,ServiceAccount,ResourceQuota +``` diff --git a/_includes/docs/docs/admin/authentication.md b/_includes/docs/docs/admin/authentication.md new file mode 100644 index 0000000000..fee71dbb18 --- /dev/null +++ b/_includes/docs/docs/admin/authentication.md @@ -0,0 +1,126 @@ + +Kubernetes uses client certificates, tokens, or http basic auth to authenticate users for API calls. + +**Client certificate authentication** is enabled by passing the `--client-ca-file=SOMEFILE` +option to apiserver. The referenced file must contain one or more certificates authorities +to use to validate client certificates presented to the apiserver. If a client certificate +is presented and verified, the common name of the subject is used as the user name for the +request. + +**Token File** is enabled by passing the `--token-auth-file=SOMEFILE` option +to apiserver. Currently, tokens last indefinitely, and the token list cannot +be changed without restarting apiserver. + +The token file format is implemented in `plugin/pkg/auth/authenticator/token/tokenfile/...` +and is a csv file with 3 columns: token, user name, user uid. + +When using token authentication from an http client the apiserver expects an `Authorization` +header with a value of `Bearer SOMETOKEN`. + +**OpenID Connect ID Token** is enabled by passing the following options to the apiserver: +- `--oidc-issuer-url` (required) tells the apiserver where to connect to the OpenID provider. Only HTTPS scheme will be accepted. +- `--oidc-client-id` (required) is used by apiserver to verify the audience of the token. +A valid [ID token](http://openid.net/specs/openid-connect-core-1_0/#IDToken) MUST have this +client-id in its `aud` claims. +- `--oidc-ca-file` (optional) is used by apiserver to establish and verify the secure connection +to the OpenID provider. +- `--oidc-username-claim` (optional, experimental) specifies which OpenID claim to use as the user name. By default, `sub` +will be used, which should be unique and immutable under the issuer's domain. Cluster administrator can +choose other claims such as `email` to use as the user name, but the uniqueness and immutability is not guaranteed. + +Please note that this flag is still experimental until we settle more on how to handle the mapping of the OpenID user to the Kubernetes user. Thus further changes are possible. + +Currently, the ID token will be obtained by some third-party app. This means the app and apiserver +MUST share the `--oidc-client-id`. + +Like **Token File**, when using token authentication from an http client the apiserver expects +an `Authorization` header with a value of `Bearer SOMETOKEN`. + +**Basic authentication** is enabled by passing the `--basic-auth-file=SOMEFILE` +option to apiserver. Currently, the basic auth credentials last indefinitely, +and the password cannot be changed without restarting apiserver. Note that basic +authentication is currently supported for convenience while we finish making the +more secure modes described above easier to use. + +The basic auth file format is implemented in `plugin/pkg/auth/authenticator/password/passwordfile/...` +and is a csv file with 3 columns: password, user name, user id. + +When using basic authentication from an http client, the apiserver expects an `Authorization` header +with a value of `Basic BASE64ENCODED(USER:PASSWORD)`. + +**Keystone authentication** is enabled by passing the `--experimental-keystone-url=` +option to the apiserver during startup. The plugin is implemented in +`plugin/pkg/auth/authenticator/request/keystone/keystone.go`. +For details on how to use keystone to manage projects and users, refer to the +[Keystone documentation](http://docs.openstack.org/developer/keystone/). Please note that +this plugin is still experimental which means it is subject to changes. +Please refer to the [discussion](https://github.com/kubernetes/kubernetes/pull/11798#issuecomment-129655212) +and the [blueprint](https://github.com/kubernetes/kubernetes/issues/11626) for more details + +## Plugin Development + +We plan for the Kubernetes API server to issue tokens +after the user has been (re)authenticated by a *bedrock* authentication +provider external to Kubernetes. We plan to make it easy to develop modules +that interface between Kubernetes and a bedrock authentication provider (e.g. +github.com, google.com, enterprise directory, kerberos, etc.) + +## APPENDIX + +### Creating Certificates + +When using client certificate authentication, you can generate certificates manually or +using an existing deployment script. + +**Deployment script** is implemented at +`cluster/saltbase/salt/generate-cert/make-ca-cert.sh`. +Execute this script with two parameters. First is the IP address of apiserver, the second is +a list of subject alternate names in the form `IP: or DNS:`. +The script will generate three files:ca.crt, server.crt and server.key. +Finally, add these parameters +`--client-ca-file=/srv/kubernetes/ca.crt` +`--tls-cert-file=/srv/kubernetes/server.cert` +`--tls-private-key-file=/srv/kubernetes/server.key` +into apiserver start parameters. + +**easyrsa** can be used to manually generate certificates for your cluster. + +1. Download, unpack, and initialize the patched version of easyrsa3. + + curl -L -O https://storage.googleapis.com/kubernetes-release/easy-rsa/easy-rsa.tar.gz + tar xzf easy-rsa.tar.gz + cd easy-rsa-master/easyrsa3 + ./easyrsa init-pki +1. Generate a CA. (`--batch` set automatic mode. `--req-cn` default CN to use.) + + ./easyrsa --batch "--req-cn=${MASTER_IP}@`date +%s`" build-ca nopass +1. Generate server certificate and key. + (build-server-full [filename]: Generate a keypair and sign locally for a client or server) + + ./easyrsa --subject-alt-name="IP:${MASTER_IP}" build-server-full kubernetes-master nopass +1. Copy `pki/ca.crt` `pki/issued/kubernetes-master.crt` + `pki/private/kubernetes-master.key` to your directory. +1. Remember fill the parameters + `--client-ca-file=/yourdirectory/ca.crt` + `--tls-cert-file=/yourdirectory/server.cert` + `--tls-private-key-file=/yourdirectory/server.key` + and add these into apiserver start parameters. + +**openssl** can also be use to manually generate certificates for your cluster. + +1. Generate a ca.key with 2048bit + `openssl genrsa -out ca.key 2048` +1. According to the ca.key generate a ca.crt. (-days set the certificate effective time). + `openssl req -x509 -new -nodes -key ca.key -subj "/CN=${MASTER_IP}" -days 10000 -out ca.crt` +1. Generate a server.key with 2048bit + `openssl genrsa -out server.key 2048` +1. According to the server.key generate a server.csr. + `openssl req -new -key server.key -subj "/CN=${MASTER_IP}" -out server.csr` +1. According to the ca.key, ca.crt and server.csr generate the server.crt. + `openssl x509 -req -in server.csr -CA ca.crt -CAkey ca.key -CAcreateserial -out server.crt + -days 10000` +1. View the certificate. + `openssl x509 -noout -text -in ./server.crt` + Finally, do not forget fill the same parameters and add parameters into apiserver start parameters. + + diff --git a/_includes/docs/docs/admin/authorization.md b/_includes/docs/docs/admin/authorization.md new file mode 100644 index 0000000000..ef61e03caf --- /dev/null +++ b/_includes/docs/docs/admin/authorization.md @@ -0,0 +1,129 @@ + +In Kubernetes, authorization happens as a separate step from authentication. +See the [authentication documentation](/{{page.version}}/docs/admin/authentication) for an +overview of authentication. + +Authorization applies to all HTTP accesses on the main (secure) apiserver port. + +The authorization check for any request compares attributes of the context of +the request, (such as user, resource, and namespace) with access +policies. An API call must be allowed by some policy in order to proceed. + +The following implementations are available, and are selected by flag: + + - `--authorization-mode=AlwaysDeny` + - `--authorization-mode=AlwaysAllow` + - `--authorization-mode=ABAC` + +`AlwaysDeny` blocks all requests (used in tests). +`AlwaysAllow` allows all requests; use if you don't need authorization. +`ABAC` allows for user-configured authorization policy. ABAC stands for Attribute-Based Access Control. + +## ABAC Mode + +### Request Attributes + +A request has 5 attributes that can be considered for authorization: + + - user (the user-string which a user was authenticated as). + - group (the list of group names the authenticated user is a member of). + - whether the request is readonly (GETs are readonly). + - what resource is being accessed. + - applies only to the API endpoints, such as + `/api/v1/namespaces/default/pods`. For miscellaneous endpoints, like `/version`, the + resource is the empty string. + - the namespace of the object being access, or the empty string if the + endpoint does not support namespaced objects. + +We anticipate adding more attributes to allow finer grained access control and +to assist in policy management. + +### Policy File Format + +For mode `ABAC`, also specify `--authorization-policy-file=SOME_FILENAME`. + +The file format is [one JSON object per line](http://jsonlines.org/). There should be no enclosing list or map, just +one map per line. + +Each line is a "policy object". A policy object is a map with the following properties: + + - `user`, type string; the user-string from `--token-auth-file`. If you specify `user`, it must match the username of the authenticated user. + - `group`, type string; if you specify `group`, it must match one of the groups of the authenticated user. + - `readonly`, type boolean, when true, means that the policy only applies to GET + operations. + - `resource`, type string; a resource from an URL, such as `pods`. + - `namespace`, type string; a namespace string. + +An unset property is the same as a property set to the zero value for its type (e.g. empty string, 0, false). +However, unset should be preferred for readability. + +In the future, policies may be expressed in a JSON format, and managed via a REST +interface. + +### Authorization Algorithm + +A request has attributes which correspond to the properties of a policy object. + +When a request is received, the attributes are determined. Unknown attributes +are set to the zero value of its type (e.g. empty string, 0, false). + +An unset property will match any value of the corresponding +attribute. An unset attribute will match any value of the corresponding property. + +The tuple of attributes is checked for a match against every policy in the policy file. +If at least one line matches the request attributes, then the request is authorized (but may fail later validation). + +To permit any user to do something, write a policy with the user property unset. +To permit an action Policy with an unset namespace applies regardless of namespace. + +### Examples + + 1. Alice can do anything: `{"user":"alice"}` + 2. Kubelet can read any pods: `{"user":"kubelet", "resource": "pods", "readonly": true}` + 3. Kubelet can read and write events: `{"user":"kubelet", "resource": "events"}` + 4. Bob can just read pods in namespace "projectCaribou": `{"user":"bob", "resource": "pods", "readonly": true, "namespace": "projectCaribou"}` + +[Complete file example](http://releases.k8s.io/{{page.githubbranch}}/pkg/auth/authorizer/abac/example_policy_file.jsonl) + +### A quick note on service accounts + +A service account automatically generates a user. The user's name is generated according to the naming convention: + +```shell +system:serviceaccount:: +``` +Creating a new namespace also causes a new service account to be created, of this form:* + +```shell +system:serviceaccount::default +``` + +For example, if you wanted to grant the default service account in the kube-system full privilege to the API, you would add this line to your policy file: + +```json +{"user":"system:serviceaccount:kube-system:default"} +``` + +The apiserver will need to be restarted to pickup the new policy lines. + +## Plugin Development + +Other implementations can be developed fairly easily. +The APIserver calls the Authorizer interface: + +```go +type Authorizer interface { + Authorize(a Attributes) error +} +``` + +to determine whether or not to allow each API action. + +An authorization plugin is a module that implements this interface. +Authorization plugin code goes in `pkg/auth/authorizer/$MODULENAME`. + +An authorization module can be completely implemented in go, or can call out +to a remote authorization service. Authorization modules can implement +their own caching to reduce the cost of repeated authorization calls with the +same or similar arguments. Developers should then consider the interaction between +caching and revocation of permissions. \ No newline at end of file diff --git a/_includes/docs/docs/admin/cluster-components.md b/_includes/docs/docs/admin/cluster-components.md new file mode 100644 index 0000000000..a7331432cf --- /dev/null +++ b/_includes/docs/docs/admin/cluster-components.md @@ -0,0 +1,116 @@ + +This document outlines the various binary components that need to run to +deliver a functioning Kubernetes cluster. + +## Master Components + +Master components are those that provide the cluster's control plane. For +example, master components are responsible for making global decisions about the +cluster (e.g., scheduling), and detecting and responding to cluster events +(e.g., starting up a new pod when a replication controller's 'replicas' field is +unsatisfied). + +Master components could in theory be run on any node in the cluster. However, +for simplicity, current set up scripts typically start all master components on +the same VM, and does not run user containers on this VM. See +[high-availability.md](/{{page.version}}/docs/admin/high-availability) for an example multi-master-VM setup. + +Even in the future, when Kubernetes is fully self-hosting, it will probably be +wise to only allow master components to schedule on a subset of nodes, to limit +co-running with user-run pods, reducing the possible scope of a +node-compromising security exploit. + +### kube-apiserver + +[kube-apiserver](/{{page.version}}/docs/admin/kube-apiserver) exposes the Kubernetes API; it is the front-end for the +Kubernetes control plane. It is designed to scale horizontally (i.e., one scales +it by running more of them-- [high-availability.md](/{{page.version}}/docs/admin/high-availability)). + +### etcd + +[etcd](/{{page.version}}/docs/admin/etcd) is used as Kubernetes' backing store. All cluster data is stored here. +Proper administration of a Kubernetes cluster includes a backup plan for etcd's +data. + +### kube-controller-manager + +[kube-controller-manager](/{{page.version}}/docs/admin/kube-controller-manager) is a binary that runs controllers, which are the +background threads that handle routine tasks in the cluster. Logically, each +controller is a separate process, but to reduce the number of moving pieces in +the system, they are all compiled into a single binary and run in a single +process. + +These controllers include: + +* Node Controller + * Responsible for noticing & responding when nodes go down. +* Replication Controller + * Responsible for maintaining the correct number of pods for every replication + controller object in the system. +* Endpoints Controller + * Populates the Endpoints object (i.e., join Services & Pods). +* Service Account & Token Controllers + * Create default accounts and API access tokens for new namespaces. +* ... and others. + +### kube-scheduler + +[kube-scheduler](/{{page.version}}/docs/admin/kube-scheduler) watches newly created pods that have no node assigned, and +selects a node for them to run on. + +### addons + +Addons are pods and services that implement cluster features. They don't run on +the master VM, but currently the default setup scripts that make the API calls +to create these pods and services does run on the master VM. See: +[kube-master-addons](http://releases.k8s.io/{{page.githubbranch}}/cluster/saltbase/salt/kube-master-addons/kube-master-addons.sh) + +Addon objects are created in the "kube-system" namespace. + +Example addons are: +* [DNS](http://releases.k8s.io/{{page.githubbranch}}/cluster/addons/dns/) provides cluster local DNS. +* [kube-ui](http://releases.k8s.io/{{page.githubbranch}}/cluster/addons/kube-ui/) provides a graphical UI for the + cluster. +* [fluentd-elasticsearch](http://releases.k8s.io/{{page.githubbranch}}/cluster/addons/fluentd-elasticsearch/) provides + log storage. Also see the [gcp version](http://releases.k8s.io/{{page.githubbranch}}/cluster/addons/fluentd-gcp/). +* [cluster-monitoring](http://releases.k8s.io/{{page.githubbranch}}/cluster/addons/cluster-monitoring/) provides + monitoring for the cluster. + +## Node components + +Node components run on every node, maintaining running pods and providing them +the Kubernetes runtime environment. + +### kubelet + +[kubelet](/{{page.version}}/docs/admin/kubelet) is the primary node agent. It: +* Watches for pods that have been assigned to its node (either by apiserver + or via local configuration file) and: + * Mounts the pod's required volumes + * Downloads the pod's secrets + * Run the pod's containers via docker (or, experimentally, rkt). + * Periodically executes any requested container liveness probes. + * Reports the status of the pod back to the rest of the system, by creating a + "mirror pod" if necessary. +* Reports the status of the node back to the rest of the system. + +### kube-proxy + +[kube-proxy](/{{page.version}}/docs/admin/kube-proxy) enables the Kubernetes service abstraction by maintaining +network rules on the host and performing connection forwarding. + +### docker + +`docker` is of course used for actually running containers. + +### rkt + +`rkt` is supported experimentally as an alternative to docker. + +### supervisord + +`supervisord` is a lightweight process babysitting system for keeping kubelet and docker +running. + + + diff --git a/_includes/docs/docs/admin/cluster-large.md b/_includes/docs/docs/admin/cluster-large.md new file mode 100644 index 0000000000..fbdf48e6bf --- /dev/null +++ b/_includes/docs/docs/admin/cluster-large.md @@ -0,0 +1,67 @@ + + +## Support + +At v1.0, Kubernetes supports clusters up to 100 nodes with 30 pods per node and 1-2 containers per pod. + +* TOC +{:toc} + +## Setup + +A cluster is a set of nodes (physical or virtual machines) running Kubernetes agents, managed by a "master" (the cluster-level control plane). + +Normally the number of nodes in a cluster is controlled by the the value `NUM_MINIONS` in the platform-specific `config-default.sh` file (for example, see [GCE's `config-default.sh`](http://releases.k8s.io/{{page.githubbranch}}/cluster/gce/config-default.sh)). + +Simply changing that value to something very large, however, may cause the setup script to fail for many cloud providers. A GCE deployment, for example, will run in to quota issues and fail to bring the cluster up. + +When setting up a large Kubernetes cluster, the following issues must be considered. + +### Quota Issues + +To avoid running into cloud provider quota issues, when creating a cluster with many nodes, consider: + +* Increase the quota for things like CPU, IPs, etc. + * In [GCE, for example,](https://cloud.google.com/compute/docs/resource-quotas) you'll want to increase the quota for: + * CPUs + * VM instances + * Total persistent disk reserved + * In-use IP addresses + * Firewall Rules + * Forwarding rules + * Routes + * Target pools +* Gating the setup script so that it brings up new node VMs in smaller batches with waits in between, because some cloud providers rate limit the creation of VMs. + +### Addon Resources + +To prevent memory leaks or other resource issues in [cluster addons](https://releases.k8s.io/{{page.githubbranch}}/cluster/addons) from consuming all the resources available on a node, Kubernetes sets resource limits on addon containers to limit the CPU and Memory resources they can consume (See PR [#10653](http://pr.k8s.io/10653/files) and [#10778](http://pr.k8s.io/10778/files)). + +For example: + +```yaml +containers: + - image: gcr.io/google_containers/heapster:v0.15.0 + name: heapster + resources: + limits: + cpu: 100m + memory: 200Mi +``` + +These limits, however, are based on data collected from addons running on 4-node clusters (see [#10335](http://issue.k8s.io/10335#issuecomment-117861225)). The addons consume a lot more resources when running on large deployment clusters (see [#5880](http://issue.k8s.io/5880#issuecomment-113984085)). So, if a large cluster is deployed without adjusting these values, the addons may continuously get killed because they keep hitting the limits. + +To avoid running into cluster addon resource issues, when creating a cluster with many nodes, consider the following: + +- Scale memory and CPU limits for each of the following addons, if used, along with the size of cluster (there is one replica of each handling the entire cluster so memory and CPU usage tends to grow proportionally with size/load on cluster): + - Heapster ([GCM/GCL backed](http://releases.k8s.io/{{page.githubbranch}}/cluster/addons/cluster-monitoring/google/heapster-controller.yaml), [InfluxDB backed](http://releases.k8s.io/{{page.githubbranch}}/cluster/addons/cluster-monitoring/influxdb/heapster-controller.yaml), [InfluxDB/GCL backed](http://releases.k8s.io/{{page.githubbranch}}/cluster/addons/cluster-monitoring/googleinfluxdb/heapster-controller-combined.yaml), [standalone](http://releases.k8s.io/{{page.githubbranch}}/cluster/addons/cluster-monitoring/standalone/heapster-controller.yaml)) + * [InfluxDB and Grafana](http://releases.k8s.io/{{page.githubbranch}}/cluster/addons/cluster-monitoring/influxdb/influxdb-grafana-controller.yaml) + * [skydns, kube2sky, and dns etcd](http://releases.k8s.io/{{page.githubbranch}}/cluster/addons/dns/skydns-rc.yaml.in) + * [Kibana](http://releases.k8s.io/{{page.githubbranch}}/cluster/addons/fluentd-elasticsearch/kibana-controller.yaml) +* Scale number of replicas for the following addons, if used, along with the size of cluster (there are multiple replicas of each so increasing replicas should help handle increased load, but, since load per replica also increases slightly, also consider increasing CPU/memory limits): + * [elasticsearch](http://releases.k8s.io/{{page.githubbranch}}/cluster/addons/fluentd-elasticsearch/es-controller.yaml) +* Increase memory and CPU limits slightly for each of the following addons, if used, along with the size of cluster (there is one replica per node but CPU/memory usage increases slightly along with cluster load/size as well): + * [FluentD with ElasticSearch Plugin](http://releases.k8s.io/{{page.githubbranch}}/cluster/saltbase/salt/fluentd-es/fluentd-es.yaml) + * [FluentD with GCP Plugin](http://releases.k8s.io/{{page.githubbranch}}/cluster/saltbase/salt/fluentd-gcp/fluentd-gcp.yaml) + +For directions on how to detect if addon containers are hitting resource limits, see the [Troubleshooting section of Compute Resources](/{{page.version}}/docs/user-guide/compute-resources/#troubleshooting). \ No newline at end of file diff --git a/_includes/docs/docs/admin/cluster-management.md b/_includes/docs/docs/admin/cluster-management.md new file mode 100644 index 0000000000..e1cd8973f7 --- /dev/null +++ b/_includes/docs/docs/admin/cluster-management.md @@ -0,0 +1,185 @@ + +This document describes several topics related to the lifecycle of a cluster: creating a new cluster, +upgrading your cluster's +master and worker nodes, performing node maintenance (e.g. kernel upgrades), and upgrading the Kubernetes API version of a +running cluster. + +## Creating and configuring a Cluster + +To install Kubernetes on a set of machines, consult one of the existing [Getting Started guides](/{{page.version}}/docs/getting-started-guides/) depending on your environment. + +## Upgrading a cluster + +The current state of cluster upgrades is provider dependent. + +### Master Upgrades + +Both Google Container Engine (GKE) and +Compute Engine Open Source (GCE-OSS) support node upgrades via a [Managed Instance Group](https://cloud.google.com/compute/docs/instance-groups/). +Managed Instance Group upgrades sequentially delete and recreate each virtual machine, while maintaining the same +Persistent Disk (PD) to ensure that data is retained across the upgrade. + +In contrast, the `kube-push.sh` process used on [other platforms](#other-platforms) attempts to upgrade the binaries in +places, without recreating the virtual machines. + +### Node Upgrades + +Node upgrades for GKE and GCE-OSS again use a Managed Instance Group, each node is sequentially destroyed and then recreated with new software. Any Pods that are running +on that node need to be controlled by a Replication Controller, or manually re-created after the roll out. + +For other platforms, `kube-push.sh` is again used, performing an in-place binary upgrade on existing machines. + +### Upgrading Google Container Engine (GKE) + +Google Container Engine automatically updates master components (e.g. `kube-apiserver`, `kube-scheduler`) to the latest +version. It also handles upgrading the operating system and other components that the master runs on. + +The node upgrade process is user-initiated and is described in the [GKE documentation.](https://cloud.google.com/container-engine/docs/clusters/upgrade) + +### Upgrading open source Google Compute Engine clusters + +Upgrades on open source Google Compute Engine (GCE) clusters are controlled by the `cluster/gce/upgrade.sh` script. + +Get its usage by running `cluster/gce/upgrade.sh -h`. + +For example, to upgrade just your master to a specific version (v1.0.2): + +```shell +cluster/gce/upgrade.sh -M v1.0.2 +``` + +Alternatively, to upgrade your entire cluster to the latest stable release: + +```shell +cluster/gce/upgrade.sh release/stable +``` + +### Other platforms + +The `cluster/kube-push.sh` script will do a rudimentary update. This process is still quite experimental, we +recommend testing the upgrade on an experimental cluster before performing the update on a production cluster. + +## Resizing a cluster + +If your cluster runs short on resources you can easily add more machines to it if your cluster is running in [Node self-registration mode](/{{page.version}}/docs/admin/node/#self-registration-of-nodes). +If you're using GCE or GKE it's done by resizing Instance Group managing your Nodes. It can be accomplished by modifying number of instances on `Compute > Compute Engine > Instance groups > your group > Edit group` [Google Cloud Console page](https://console.developers.google.com) or using gcloud CLI: + +```shell +gcloud compute instance-groups managed --zone compute-zone resize my-cluster-minon-group --new-size 42 +``` + +Instance Group will take care of putting appropriate image on new machines and start them, while Kubelet will register its Node with API server to make it available for scheduling. If you scale the instance group down, system will randomly choose Nodes to kill. + +In other environments you may need to configure the machine yourself and tell the Kubelet on which machine API server is running. + + +### Horizontal auto-scaling of nodes (GCE) + +If you are using GCE, you can configure your cluster so that the number of nodes will be automatically scaled based on their CPU and memory utilization. +Before setting up the cluster by `kube-up.sh`, you can set `KUBE_ENABLE_NODE_AUTOSCALE` +environment variable to `true` +and export it. +The script will create an autoscaler for the instance group managing your nodes. + +The autoscaler will try to maintain the average CPU and memory utilization of nodes within the cluster close to the target value. +The target value can be configured by `KUBE_TARGET_NODE_UTILIZATION` +environment variable (default: 0.7) for `kube-up.sh` when creating the cluster. +The node utilization is the total node's CPU/memory usage (OS + k8s + user load) divided by the node's capacity. +If the desired numbers of nodes in the cluster resulting from CPU utilization and memory utilization are different, +the autoscaler will choose the bigger number. +The number of nodes in the cluster set by the autoscaler will be limited from `KUBE_AUTOSCALER_MIN_NODES` +(default: 1) +to `KUBE_AUTOSCALER_MAX_NODES` +(default: the initial number of nodes in the cluster). + +The autoscaler is implemented as a Compute Engine Autoscaler. +The initial values of the autoscaler parameters set by `kube-up.sh` and some more advanced options can be tweaked on +`Compute > Compute Engine > Instance groups > your group > Edit group`[Google Cloud Console page](https://console.developers.google.com) +or using gcloud CLI: + +```shell +gcloud preview autoscaler --zone compute-zone +``` + +Note that autoscaling will work properly only if node metrics are accessible in Google Cloud Monitoring. To make the metrics accessible, you need to create your cluster with `KUBE_ENABLE_CLUSTER_MONITORING` equal to `google` or `googleinfluxdb` (`googleinfluxdb` is the default value). + +## Maintenance on a Node + +If you need to reboot a node (such as for a kernel upgrade, libc upgrade, hardware repair, etc.), and the downtime is +brief, then when the Kubelet restarts, it will attempt to restart the pods scheduled to it. If the reboot takes longer, +then the node controller will terminate the pods that are bound to the unavailable node. If there is a corresponding +replication controller, then a new copy of the pod will be started on a different node. So, in the case where all +pods are replicated, upgrades can be done without special coordination, assuming that not all nodes will go down at the same time. + +If you want more control over the upgrading process, you may use the following workflow: + +Mark the node to be rebooted as unschedulable: + +```shell +kubectl replace nodes $NODENAME --patch='{"apiVersion": "v1", "spec": {"unschedulable": true}}' +``` + +This keeps new pods from landing on the node while you are trying to get them off. + +Get the pods off the machine, via any of the following strategies: + * Wait for finite-duration pods to complete. + * Delete pods with: + +```shell +kubectl delete pods $PODNAME +``` + +For pods with a replication controller, the pod will eventually be replaced by a new pod which will be scheduled to a new node. Additionally, if the pod is part of a service, then clients will automatically be redirected to the new pod. + +For pods with no replication controller, you need to bring up a new copy of the pod, and assuming it is not part of a service, redirect clients to it. + +Perform maintenance work on the node. + +Make the node schedulable again: + +```shell +kubectl replace nodes $NODENAME --patch='{"apiVersion": "v1", "spec": {"unschedulable": false}}' +``` + +If you deleted the node's VM instance and created a new one, then a new schedulable node resource will +be created automatically when you create a new VM instance (if you're using a cloud provider that supports +node discovery; currently this is only Google Compute Engine, not including CoreOS on Google Compute Engine using kube-register). See [Node](/{{page.version}}/docs/admin/node) for more details. + +## Advanced Topics + +### Upgrading to a different API version + +When a new API version is released, you may need to upgrade a cluster to support the new API version (e.g. switching from 'v1' to 'v2' when 'v2' is launched) + +This is an infrequent event, but it requires careful management. There is a sequence of steps to upgrade to a new API version. + + 1. Turn on the new api version. + 1. Upgrade the cluster's storage to use the new version. + 1. Upgrade all config files. Identify users of the old API version endpoints. + 1. Update existing objects in the storage to new version by running `cluster/update-storage-objects.sh`. + 1. Turn off the old API version. + +### Turn on or off an API version for your cluster + +Specific API versions can be turned on or off by passing --runtime-config=api/ flag while bringing up the API server. For example: to turn off v1 API, pass `--runtime-config=api/v1=false`. +runtime-config also supports 2 special keys: api/all and api/legacy to control all and legacy APIs respectively. +For example, for turning off all api versions except v1, pass `--runtime-config=api/all=false,api/v1=true`. +For the purposes of these flags, _legacy_ APIs are those APIs which have been explicitly deprecated (e.g. `v1beta3`). + +### Switching your cluster's storage API version + +The objects that are stored to disk for a cluster's internal representation of the Kubernetes resources active in the cluster are written using a particular version of the API. +When the supported API changes, these objects may need to be rewritten in the newer API. Failure to do this will eventually result in resources that are no longer decodable or usable +by the kubernetes API server. + +`KUBE_API_VERSIONS` environment variable for the `kube-apiserver` binary which controls the API versions that are supported in the cluster. The first version in the list is used as the cluster's storage version. Hence, to set a specific version as the storage version, bring it to the front of list of versions in the value of `KUBE_API_VERSIONS`. You need to restart the `kube-apiserver` binary +for changes to this variable to take effect. + +### Switching your config files to a new API version + +You can use the `kube-version-change` utility to convert config files between different API versions. + +```shell +$ hack/build-go.sh cmd/kube-version-change +$ _output/local/go/bin/kube-version-change -i myPod.v1beta3.yaml -o myPod.v1.yaml +``` diff --git a/_includes/docs/docs/admin/cluster-troubleshooting.md b/_includes/docs/docs/admin/cluster-troubleshooting.md new file mode 100644 index 0000000000..ab7a546804 --- /dev/null +++ b/_includes/docs/docs/admin/cluster-troubleshooting.md @@ -0,0 +1,110 @@ + +This doc is about cluster troubleshooting; we assume you have already ruled out your application as the root cause of the +problem you are experiencing. See +the [application troubleshooting guide](/{{page.version}}/docs/user-guide/application-troubleshooting) for tips on application debugging. +You may also visit [troubleshooting document](/{{page.version}}/docs/troubleshooting/) for more information. + +## Listing your cluster + +The first thing to debug in your cluster is if your nodes are all registered correctly. + +Run + +```shell +kubectl get nodes +``` + +And verify that all of the nodes you expect to see are present and that they are all in the `Ready` state. + +## Looking at logs + +For now, digging deeper into the cluster requires logging into the relevant machines. Here are the locations +of the relevant log files. (note that on systemd-based systems, you may need to use `journalctl` instead) + +### Master + + * /var/log/kube-apiserver.log - API Server, responsible for serving the API + * /var/log/kube-scheduler.log - Scheduler, responsible for making scheduling decisions + * /var/log/kube-controller-manager.log - Controller that manages replication controllers + +### Worker Nodes + + * /var/log/kubelet.log - Kubelet, responsible for running containers on the node + * /var/log/kube-proxy.log - Kube Proxy, responsible for service load balancing + +## A general overview of cluster failure modes + +This is an incomplete list of things that could go wrong, and how to adjust your cluster setup to mitigate the problems. + +Root causes: + + - VM(s) shutdown + - Network partition within cluster, or between cluster and users + - Crashes in Kubernetes software + - Data loss or unavailability of persistent storage (e.g. GCE PD or AWS EBS volume) + - Operator error, e.g. misconfigured Kubernetes software or application software + +Specific scenarios: + + - Apiserver VM shutdown or apiserver crashing + - Results + - unable to stop, update, or start new pods, services, replication controller + - existing pods and services should continue to work normally, unless they depend on the Kubernetes API + - Apiserver backing storage lost + - Results + - apiserver should fail to come up + - kubelets will not be able to reach it but will continue to run the same pods and provide the same service proxying + - manual recovery or recreation of apiserver state necessary before apiserver is restarted + - Supporting services (node controller, replication controller manager, scheduler, etc) VM shutdown or crashes + - currently those are colocated with the apiserver, and their unavailability has similar consequences as apiserver + - in future, these will be replicated as well and may not be co-located + - they do not have their own persistent state + - Individual node (VM or physical machine) shuts down + - Results + - pods on that Node stop running + - Network partition + - Results + - partition A thinks the nodes in partition B are down; partition B thinks the apiserver is down. (Assuming the master VM ends up in partition A.) + - Kubelet software fault + - Results + - crashing kubelet cannot start new pods on the node + - kubelet might delete the pods or not + - node marked unhealthy + - replication controllers start new pods elsewhere + - Cluster operator error + - Results + - loss of pods, services, etc + - lost of apiserver backing store + - users unable to read API + - etc. + +Mitigations: + +- Action: Use IaaS provider's automatic VM restarting feature for IaaS VMs + - Mitigates: Apiserver VM shutdown or apiserver crashing + - Mitigates: Supporting services VM shutdown or crashes + +- Action use IaaS providers reliable storage (e.g GCE PD or AWS EBS volume) for VMs with apiserver+etcd + - Mitigates: Apiserver backing storage lost + +- Action: Use (experimental) [high-availability](/{{page.version}}/docs/admin/high-availability) configuration + - Mitigates: Master VM shutdown or master components (scheduler, API server, controller-managing) crashing + - Will tolerate one or more simultaneous node or component failures + - Mitigates: Apiserver backing storage (i.e., etcd's data directory) lost + - Assuming you used clustered etcd. + +- Action: Snapshot apiserver PDs/EBS-volumes periodically + - Mitigates: Apiserver backing storage lost + - Mitigates: Some cases of operator error + - Mitigates: Some cases of Kubernetes software fault + +- Action: use replication controller and services in front of pods + - Mitigates: Node shutdown + - Mitigates: Kubelet software fault + +- Action: applications (containers) designed to tolerate unexpected restarts + - Mitigates: Node shutdown + - Mitigates: Kubelet software fault + +- Action: [Multiple independent clusters](/{{page.version}}/docs/admin/multi-cluster) (and avoid making risky changes to all clusters at once) + - Mitigates: Everything listed above. \ No newline at end of file diff --git a/_includes/docs/docs/admin/daemon.yaml b/_includes/docs/docs/admin/daemon.yaml new file mode 100644 index 0000000000..c5cd14a592 --- /dev/null +++ b/_includes/docs/docs/admin/daemon.yaml @@ -0,0 +1,18 @@ +apiVersion: extensions/v1beta1 +kind: DaemonSet +metadata: + name: prometheus-node-exporter +spec: + template: + metadata: + name: prometheus-node-exporter + labels: + daemon: prom-node-exp + spec: + containers: + - name: c + image: prom/prometheus + ports: + - containerPort: 9090 + hostPort: 9090 + name: serverport diff --git a/_includes/docs/docs/admin/daemons.md b/_includes/docs/docs/admin/daemons.md new file mode 100644 index 0000000000..242ef85502 --- /dev/null +++ b/_includes/docs/docs/admin/daemons.md @@ -0,0 +1,167 @@ + +* TOC +{:toc} + +## What is a _Daemon Set_? + +A _Daemon Set_ ensures that all (or some) nodes run a copy of a pod. As nodes are added to the +cluster, pods are added to them. As nodes are removed from the cluster, those pods are garbage +collected. Deleting a Daemon Set will clean up the pods it created. + +Some typical uses of a Daemon Set are: + +- running a cluster storage daemon, such as `glusterd`, `ceph`, on each node. +- running a logs collection daemon on every node, such as `fluentd` or `logstash`. +- running a node monitoring daemon on every node, such as [Prometheus Node Exporter]( + https://github.com/prometheus/node_exporter), `collectd`, New Relic agent, or Ganglia `gmond`. + +In a simple case, one Daemon Set, covering all nodes, would be used for each type of daemon. +A more complex setup might use multiple DaemonSets would be used for a single type of daemon, +but with different flags and/or different memory and cpu requests for different hardware types. + +## Writing a DaemonSet Spec + +### Required Fields + +As with all other Kubernetes config, a DaemonSet needs `apiVersion`, `kind`, and `metadata` fields. For +general information about working with config files, see [here](/{{page.version}}/docs/user-guide/simple-yaml), +[here](/{{page.version}}/docs/user-guide/configuring-containers), and [here](/{{page.version}}/docs/user-guide/working-with-resources). + +A DaemonSet also needs a [`.spec`](/{{page.version}}/docs/devel/api-conventions/#spec-and-status) section. + +### Pod Template + +The `.spec.template` is the only required field of the `.spec`. + +The `.spec.template` is a [pod template](/{{page.version}}/docs/user-guide/replication-controller/#pod-template). +It has exactly the same schema as a [pod](/{{page.version}}/docs/user-guide/pods), except +it is nested and does not have an `apiVersion` or `kind`. + +In addition to required fields for a pod, a pod template in a DaemonSet has to specify appropriate +labels (see [pod selector](#pod-selector)). + +A pod template in a DaemonSet must have a [`RestartPolicy`](/{{page.version}}/docs/user-guide/pod-states) + equal to `Always`, or be unspecified, which defaults to `Always`. + +### Pod Selector + +The `.spec.selector` field is a pod selector. It works the same as the `.spec.selector` of +a [ReplicationController](/{{page.version}}/docs/user-guide/replication-controller) or +[Job](/{{page.version}}/docs/user-guide/jobs). + +If the `.spec.selector` is specified, it must equal the `.spec.template.metadata.labels`. If not +specified, the are default to be equal. Config with these unequal will be rejected by the API. + +Also you should not normally create any pods whose labels match this selector, either directly, via +another DaemonSet, or via other controller such as ReplicationController. Otherwise, the DaemonSet +controller will think that those pods were created by it. Kubernetes will not stop you from doing +this. Once case where you might want to do this is manually create a pod with a different value on +a node for testing. + +### Running Pods on Only Some Nodes + +If you specify a `.spec.template.spec.nodeSelector`, then the DaemonSet controller will +create pods on nodes which match that [node +selector](/{{page.version}}/docs/user-guide/node-selection/). + +If you do not specify a `.spec.template.spec.nodeSelector`, then the DaemonSet controller will +create pods on all nodes. + +## How Daemon Pods are Scheduled + +Normally, the machine that a pod runs on is selected by the Kubernetes scheduler. However, pods +created by the Daemon controller have the machine already selected (`.spec.nodeName` is specified +when the pod is created, so it is ignored by the scheduler). Therefore: + + - the [`unschedulable`](/{{page.version}}/docs/admin/node/#manual-node-administration) field of a node is not respected + by the daemon set controller. + - daemon set controller can make pods even when the scheduler has not been started, which can help cluster + bootstrap. + +## Communicating with DaemonSet Pods + +Some possible patterns for communicating with pods in a DaemonSet are: + +- **Push**: Pods in the Daemon Set are configured to send updates to another service, such + as a stats database. They do not have clients. +- **NodeIP and Known Port**: Pods in the Daemon Set use a `hostPort`, so that the pods are reachable + via the node IPs. Clients knows the the list of nodes ips somehow, and know the port by convention. +- **DNS**: Create a [headless service](/{{page.version}}/docs/user-guide/services/#headless-services) with the same pod selector, + and then discover DaemonSets using the `endpoints` resource or retrieve multiple A records from + DNS. +- **Service**: Create a service with the same pod selector, and use the service to reach a + daemon on a random node. (No way to reach specific node.) + +## Updating a DaemonSet + +If node labels are changed, the DaemonSet will promptly add pods to newly matching nodes and delete +pods from newly not-matching nodes. + +You can modify the pods that a DaemonSet creates. However, pods do not allow all +fields to be updated. Also, the DeamonSet controller will use the original template the next +time a node (even with the same name) is created. + + +You can delete a DeamonSet. If you specify `--cascade=false` with `kubectl`, then the pods +will be left on the nodes. You can then create a new DaemonSet with a different template. +the new DaemonSet with the different template will recognize all the existing pods as having +matching labels. It will not modify or delete them despite a mismatch in the pod template. +You will need to force new pod creation by deleting the pod or deleting the node. + +You cannot update a DaemonSet. + +Support for updating DaemonSets and controlled updating of nodes is planned. + +## Alternatives to Daemon Set + +### Init Scripts + +It is certainly possible to run daemon processes by directly starting them on a node (e.g using +`init`, `upstartd`, or `systemd`). This is perfectly fine. However, there are several advantages to +running such processes via a DaemonSet: + +- Ability to monitor and manage logs for daemons in the same way as applications. +- Same config language and tools (e.g. pod templates, `kubectl`) for daemons and applications. +- Future versions of Kubernetes will likely support integration between DaemonSet-created + pods and node upgrade workflows. +- Running daemons in containers with resource limits increases isolation between daemons from app + containers. However, this can also be accomplished by running the daemons in a container but not in a pod + (e.g. start directly via Docker). + +### Bare Pods + +It is possible to create pods directly which specify a particular node to run on. However, +a Daemon Set replaces pods that are deleted or terminated for any reason, such as in the case of +node failure or disruptive node maintenance, such as a kernel upgrade. For this reason, you should +use a Daemon Set rather than creating individual pods. + +### Static Pods + +It is possible to create pods by writing a file to a certain directory watched by Kubelet. These +are called [static pods](/{{page.version}}/docs/admin/static-pods). +Unlike DaemonSet, static pods cannot be managed with kubectl +or other Kubernetes API clients. Static pods do not depend on the apiserver, making them useful +in cluster bootstrapping cases. Also, static pods may be deprecated in the future. + +### Replication Controller + +Daemon Set are similar to [Replication Controllers](/{{page.version}}/docs/user-guide/replication-controller) in that +they both create pods, and those pods have processes which are not expected to terminate (e.g. web servers, +storage servers). + +Use a replication controller for stateless services, like frontends, where scaling up and down the +number of replicas and rolling out updates are more important than controlling exactly which host +the pod runs on. Use a Daemon Controller when it is important that a copy of a pod always run on +all or certain hosts, and when it needs to start before other pods. + +## Caveats + +DaemonSet objects are in the [`extensions` API Group](/{{page.version}}/docs/api/#api-groups). +DaemonSet is not enabled by default. Enable it by setting +`--runtime-config=extensions/v1beta1/daemonsets=true` on the api server. This can be +achieved by exporting ENABLE_DAEMONSETS=true before running kube-up.sh script +on GCE. + +DaemonSet objects effectively have [API version `v1alpha1`](/{{page.version}}/docs/api/)#api-versioning). + Alpha objects may change or even be discontinued in future software releases. +However, due to to a known issue, they will appear as API version `v1beta1` if enabled. \ No newline at end of file diff --git a/_includes/docs/docs/admin/dns.md b/_includes/docs/docs/admin/dns.md new file mode 100644 index 0000000000..342e2f36c5 --- /dev/null +++ b/_includes/docs/docs/admin/dns.md @@ -0,0 +1,37 @@ + +As of Kubernetes 0.8, DNS is offered as a [cluster add-on](http://releases.k8s.io/{{page.githubbranch}}/cluster/addons/README.md). +If enabled, a DNS Pod and Service will be scheduled on the cluster, and the kubelets will be +configured to tell individual containers to use the DNS Service's IP to resolve DNS names. + +Every Service defined in the cluster (including the DNS server itself) will be +assigned a DNS name. By default, a client Pod's DNS search list will +include the Pod's own namespace and the cluster's default domain. This is best +illustrated by example: + +Assume a Service named `foo` in the Kubernetes namespace `bar`. A Pod running +in namespace `bar` can look up this service by simply doing a DNS query for +`foo`. A Pod running in namespace `quux` can look up this service by doing a +DNS query for `foo.bar`. + +The cluster DNS server ([SkyDNS](https://github.com/skynetservices/skydns)) +supports forward lookups (A records) and service lookups (SRV records). + +## How it Works + +The running DNS pod holds 3 containers - skydns, etcd (a private instance which skydns uses), +and a Kubernetes-to-skydns bridge called kube2sky. The kube2sky process +watches the Kubernetes master for changes in Services, and then writes the +information to etcd, which skydns reads. This etcd instance is not linked to +any other etcd clusters that might exist, including the Kubernetes master. + +## Issues + +The skydns service is reachable directly from Kubernetes nodes (outside +of any container) and DNS resolution works if the skydns service is targeted +explicitly. However, nodes are not configured to use the cluster DNS service or +to search the cluster's DNS domain by default. This may be resolved at a later +time. + +## For more information + +See [the docs for the DNS cluster addon](http://releases.k8s.io/{{page.githubbranch}}/cluster/addons/dns/README.md). \ No newline at end of file diff --git a/_includes/docs/docs/admin/etcd.md b/_includes/docs/docs/admin/etcd.md new file mode 100644 index 0000000000..5aef2a046d --- /dev/null +++ b/_includes/docs/docs/admin/etcd.md @@ -0,0 +1,44 @@ + +[etcd](https://coreos.com/etcd/docs/2.0.12/) is a highly-available key value +store which Kubernetes uses for persistent storage of all of its REST API +objects. + +## Configuration: high-level goals + +Access Control: give *only* kube-apiserver read/write access to etcd. You do not +want apiserver's etcd exposed to every node in your cluster (or worse, to the +internet at large), because access to etcd is equivalent to root in your +cluster. + +Data Reliability: for reasonable safety, either etcd needs to be run as a +[cluster](/{{page.version}}/docs/admin/high-availability/#clustering-etcd) (multiple machines each running +etcd) or etcd's data directory should be located on durable storage (e.g., GCE's +persistent disk). In either case, if high availability is required--as it might +be in a production cluster--the data directory ought to be [backed up +periodically](https://coreos.com/etcd/docs/2.0.12/admin_guide/#disaster-recovery), +to reduce downtime in case of corruption. + +## Default configuration + +The default setup scripts use kubelet's file-based static pods feature to run etcd in a +[pod](http://releases.k8s.io/{{page.githubbranch}}/cluster/saltbase/salt/etcd/etcd.manifest). This manifest should only +be run on master VMs. The default location that kubelet scans for manifests is +`/etc/kubernetes/manifests/`. + +## Kubernetes's usage of etcd + +By default, Kubernetes objects are stored under the `/registry` key in etcd. +This path can be prefixed by using the [kube-apiserver](/{{page.version}}/docs/admin/kube-apiserver) flag +`--etcd-prefix="/foo"`. + +`etcd` is the only place that Kubernetes keeps state. + +## Troubleshooting + +To test whether `etcd` is running correctly, you can try writing a value to a +test key. On your master VM (or somewhere with firewalls configured such that +you can talk to your cluster's etcd), try: + +```shell +curl -fs -X PUT "http://${host}:${port}/v2/keys/_test" +``` diff --git a/_includes/docs/docs/admin/garbage-collection.md b/_includes/docs/docs/admin/garbage-collection.md new file mode 100644 index 0000000000..d097bab32f --- /dev/null +++ b/_includes/docs/docs/admin/garbage-collection.md @@ -0,0 +1,59 @@ + +Garbage collection is managed by kubelet automatically, mainly including unreferenced +images and dead containers. kubelet applies container garbage collection every minute +and image garbage collection every 5 minutes. +Note that we don't recommend external garbage collection tool generally, since it could +break the behavior of kubelet potentially if it attempts to remove all of the containers +which acts as the tombstone kubelet relies on. Yet those garbage collector aims to deal +with the docker leaking issues would be appreciated. + +### Image Collection + +kubernetes manages lifecycle of all images through imageManager, with the cooperation +of cadvisor. +The policy for garbage collecting images we apply takes two factors into consideration, +`HighThresholdPercent` and `LowThresholdPercent`. Disk usage above the the high threshold +will trigger garbage collection, which attempts to delete unused images until the low +threshold is met. Least recently used images are deleted first. + +### Container Collection + +The policy for garbage collecting containers we apply takes on three variables, which can +be user-defined. `MinAge` is the minimum age at which a container can be garbage collected, +zero for no limit. `MaxPerPodContainer` is the max number of dead containers any single +pod (UID, container name) pair is allowed to have, less than zero for no limit. +`MaxContainers` is the max number of total dead containers, less than zero for no limit as well. + +kubelet sorts out containers which are unidentified or stay out of bounds set by previous +mentioned three flags. Gernerally the oldest containers are removed first. Since we take both +`MaxPerPodContainer` and `MaxContainers` into consideration, it could happen when they +have conflict -- retaining the max number of containers per pod goes out of range set by max +number of global dead containers. In this case, we would sacrifice the `MaxPerPodContainer` +a little bit. For the worst case, we first downgrade it to 1 container per pod, and then +evict the oldest containers for the greater good. + +When kubelet removes the dead containers, all the files inside the container will be cleaned up as well. +Note that we will skip the containers that are not managed by kubelet. + +### User Configuration + +Users are free to set their own value to address image garbage collection. + +1. `image-gc-high-threshold`, the percent of disk usage which triggers image garbage collection. +Default is 90%. +2. `image-gc-low-threshold`, the percent of disk usage to which image garbage collection attempts +to free. Default is 80%. + +We also allow users to customize garbage collection policy, basically via following three flags. + +1. `minimum-container-ttl-duration`, minimum age for a finished container before it is +garbage collected. Default is 1 minute. +2. `maximum-dead-containers-per-container`, maximum number of old instances to retain +per container. Default is 2. +3. `maximum-dead-containers`, maximum number of old instances of containers to retain globally. +Default is 100. + +Note that we highly recommend a large enough value for `maximum-dead-containers-per-container` +to allow at least 2 dead containers retaining per expected container when you customize the flag +configuration. A loose value for `maximum-dead-containers` also assumes importance for a similar reason. +See [this issue](https://github.com/kubernetes/kubernetes/issues/13287) for more details. diff --git a/_includes/docs/docs/admin/high-availability.md b/_includes/docs/docs/admin/high-availability.md new file mode 100644 index 0000000000..ece1d1bfae --- /dev/null +++ b/_includes/docs/docs/admin/high-availability.md @@ -0,0 +1,225 @@ + +This document describes how to build a high-availability (HA) Kubernetes cluster. This is a fairly advanced topic. +Users who merely want to experiment with Kubernetes are encouraged to use configurations that are simpler to set up such as +the simple [Docker based single node cluster instructions](/{{page.version}}/docs/getting-started-guides/docker), +or try [Google Container Engine](https://cloud.google.com/container-engine/) for hosted Kubernetes. + +Also, at this time high availability support for Kubernetes is not continuously tested in our end-to-end (e2e) testing. We will +be working to add this continuous testing, but for now the single-node master installations are more heavily tested. + +* TOC +{:toc} + +## Overview + +Setting up a truly reliable, highly available distributed system requires a number of steps, it is akin to +wearing underwear, pants, a belt, suspenders, another pair of underwear, and another pair of pants. We go into each +of these steps in detail, but a summary is given here to help guide and orient the user. + +The steps involved are as follows: + + * [Creating the reliable constituent nodes that collectively form our HA master implementation.](#reliable-nodes) + * [Setting up a redundant, reliable storage layer with clustered etcd.](#establishing-a-redundant-reliable-data-storage-layer) + * [Starting replicated, load balanced Kubernetes API servers](#replicated-api-servers) + * [Setting up master-elected Kubernetes scheduler and controller-manager daemons](#master-elected-components) + +Here's what the system should look like when it's finished: +![High availability Kubernetes diagram](/images/docs/ha.svg) + +Ready? Let's get started. + +## Initial set-up + +The remainder of this guide assumes that you are setting up a 3-node clustered master, where each machine is running some flavor of Linux. +Examples in the guide are given for Debian distributions, but they should be easily adaptable to other distributions. +Likewise, this set up should work whether you are running in a public or private cloud provider, or if you are running +on bare metal. + +The easiest way to implement an HA Kubernetes cluster is to start with an existing single-master cluster. The +instructions at [https://get.k8s.io](https://get.k8s.io) +describe easy installation for single-master clusters on a variety of platforms. + +## Reliable nodes + +On each master node, we are going to run a number of processes that implement the Kubernetes API. The first step in making these reliable is +to make sure that each automatically restarts when it fails. To achieve this, we need to install a process watcher. We choose to use +the `kubelet` that we run on each of the worker nodes. This is convenient, since we can use containers to distribute our binaries, we can +establish resource limits, and introspect the resource usage of each daemon. Of course, we also need something to monitor the kubelet +itself (insert who watches the watcher jokes here). For Debian systems, we choose monit, but there are a number of alternate +choices. For example, on systemd-based systems (e.g. RHEL, CentOS), you can run 'systemctl enable kubelet'. + +If you are extending from a standard Kubernetes installation, the `kubelet` binary should already be present on your system. You can run +`which kubelet` to determine if the binary is in fact installed. If it is not installed, +you should install the [kubelet binary](https://storage.googleapis.com/kubernetes-release/release/v0.19.3/bin/linux/amd64/kubelet), the +[kubelet init file](http://releases.k8s.io/{{page.githubbranch}}/cluster/saltbase/salt/kubelet/initd) and [high-availability/default-kubelet](/{{page.version}}/docs/admin/high-availability/default-kubelet) +scripts. + +If you are using monit, you should also install the monit daemon (`apt-get install monit`) and the [high-availability/monit-kubelet](/{{page.version}}/docs/admin/high-availability/monit-kubelet) and +[high-availability/monit-docker](/{{page.version}}/docs/admin/high-availability/monit-docker) configs. + +On systemd systems you `systemctl enable kubelet` and `systemctl enable docker`. + + +## Establishing a redundant, reliable data storage layer + +The central foundation of a highly available solution is a redundant, reliable storage layer. The number one rule of high-availability is +to protect the data. Whatever else happens, whatever catches on fire, if you have the data, you can rebuild. If you lose the data, you're +done. + +Clustered etcd already replicates your storage to all master instances in your cluster. This means that to lose data, all three nodes would need +to have their physical (or virtual) disks fail at the same time. The probability that this occurs is relatively low, so for many people +running a replicated etcd cluster is likely reliable enough. You can add additional reliability by increasing the +size of the cluster from three to five nodes. If that is still insufficient, you can add +[even more redundancy to your storage layer](#even-more-reliable-storage). + +### Clustering etcd + +The full details of clustering etcd are beyond the scope of this document, lots of details are given on the +[etcd clustering page](https://github.com/coreos/etcd/blob/master/Documentation/clustering.md). This example walks through +a simple cluster set up, using etcd's built in discovery to build our cluster. + +First, hit the etcd discovery service to create a new token: + +```shell +curl https://discovery.etcd.io/new?size=3 +``` + +On each node, copy the [etcd.yaml](/{{page.version}}/docs/admin/high-availability/etcd.yaml) file into `/etc/kubernetes/manifests/etcd.yaml` + +The kubelet on each node actively monitors the contents of that directory, and it will create an instance of the `etcd` +server from the definition of the pod specified in `etcd.yaml`. + +Note that in `etcd.yaml` you should substitute the token URL you got above for `${DISCOVERY_TOKEN}` on all three machines, +and you should substitute a different name (e.g. `node-1`) for ${NODE_NAME} and the correct IP address +for `${NODE_IP}` on each machine. + + +#### Validating your cluster + +Once you copy this into all three nodes, you should have a clustered etcd set up. You can validate with + +```shell +etcdctl member list +``` + +and + +```shell +etcdctl cluster-health +``` + +You can also validate that this is working with `etcdctl set foo bar` on one node, and `etcd get foo` +on a different node. + +### Even more reliable storage + +Of course, if you are interested in increased data reliability, there are further options which makes the place where etcd +installs it's data even more reliable than regular disks (belts *and* suspenders, ftw!). + +If you use a cloud provider, then they usually provide this +for you, for example [Persistent Disk](https://cloud.google.com/compute/docs/disks/persistent-disks) on the Google Cloud Platform. These +are block-device persistent storage that can be mounted onto your virtual machine. Other cloud providers provide similar solutions. + +If you are running on physical machines, you can also use network attached redundant storage using an iSCSI or NFS interface. +Alternatively, you can run a clustered file system like Gluster or Ceph. Finally, you can also run a RAID array on each physical machine. + +Regardless of how you choose to implement it, if you chose to use one of these options, you should make sure that your storage is mounted +to each machine. If your storage is shared between the three masters in your cluster, you should create a different directory on the storage +for each node. Throughout these instructions, we assume that this storage is mounted to your machine in `/var/etcd/data` + + +## Replicated API Servers + +Once you have replicated etcd set up correctly, we will also install the apiserver using the kubelet. + +### Installing configuration files + +First you need to create the initial log file, so that Docker mounts a file instead of a directory: + +```shell +touch /var/log/kube-apiserver.log +``` + +Next, you need to create a `/srv/kubernetes/` directory on each node. This directory includes: + + * basic_auth.csv - basic auth user and password + * ca.crt - Certificate Authority cert + * known_tokens.csv - tokens that entities (e.g. the kubelet) can use to talk to the apiserver + * kubecfg.crt - Client certificate, public key + * kubecfg.key - Client certificate, private key + * server.cert - Server certificate, public key + * server.key - Server certificate, private key + +The easiest way to create this directory, may be to copy it from the master node of a working cluster, or you can manually generate these files yourself. + +### Starting the API Server + +Once these files exist, copy the [kube-apiserver.yaml](/{{page.version}}/docs/admin/high-availability/kube-apiserver.yaml) into `/etc/kubernetes/manifests/` on each master node. + +The kubelet monitors this directory, and will automatically create an instance of the `kube-apiserver` container using the pod definition specified +in the file. + +### Load balancing + +At this point, you should have 3 apiservers all working correctly. If you set up a network load balancer, you should +be able to access your cluster via that load balancer, and see traffic balancing between the apiserver instances. Setting +up a load balancer will depend on the specifics of your platform, for example instructions for the Google Cloud +Platform can be found [here](https://cloud.google.com/compute/docs/load-balancing/) + +Note, if you are using authentication, you may need to regenerate your certificate to include the IP address of the balancer, +in addition to the IP addresses of the individual nodes. + +For pods that you deploy into the cluster, the `kubernetes` service/dns name should provide a load balanced endpoint for the master automatically. + +For external users of the API (e.g. the `kubectl` command line interface, continuous build pipelines, or other clients) you will want to configure +them to talk to the external load balancer's IP address. + +## Master elected components + +So far we have set up state storage, and we have set up the API server, but we haven't run anything that actually modifies +cluster state, such as the controller manager and scheduler. To achieve this reliably, we only want to have one actor modifying state at a time, but we want replicated +instances of these actors, in case a machine dies. To achieve this, we are going to use a lease-lock in etcd to perform +master election. On each of the three apiserver nodes, we run a small utility application named `podmaster`. It's job is to implement a master +election protocol using etcd "compare and swap". If the apiserver node wins the election, it starts the master component it is managing (e.g. the scheduler), if it +loses the election, it ensures that any master components running on the node (e.g. the scheduler) are stopped. + +In the future, we expect to more tightly integrate this lease-locking into the scheduler and controller-manager binaries directly, as described in the [high availability design proposal](https://github.com/kubernetes/kubernetes/blob/{{page.githubbranch}}/docs/proposals/high-availability.md) + +### Installing configuration files + +First, create empty log files on each node, so that Docker will mount the files not make new directories: + +```shell +touch /var/log/kube-scheduler.log +touch /var/log/kube-controller-manager.log +``` + +Next, set up the descriptions of the scheduler and controller manager pods on each node. +by copying [kube-scheduler.yaml](/{{page.version}}/docs/admin/high-availability/kube-scheduler.yaml) and [kube-controller-manager.yaml](high-availability//{{page.version}}/docs/admin/kube-controller-manager.yaml) into the `/srv/kubernetes/` directory. + +### Running the podmaster + +Now that the configuration files are in place, copy the [podmaster.yaml](/{{page.version}}/docs/admin/high-availability/podmaster.yaml) config file into `/etc/kubernetes/manifests/` + +As before, the kubelet on the node monitors this directory, and will start an instance of the podmaster using the pod specification provided in `podmaster.yaml`. + +Now you will have one instance of the scheduler process running on a single master node, and likewise one +controller-manager process running on a single (possibly different) master node. If either of these processes fail, +the kubelet will restart them. If any of these nodes fail, the process will move to a different instance of a master +node. + +## Conclusion + +At this point, you are done (yeah!) with the master components, but you still need to add worker nodes (boo!). + +If you have an existing cluster, this is as simple as reconfiguring your kubelets to talk to the load-balanced endpoint, and +restarting the kubelets on each node. + +If you are turning up a fresh cluster, you will need to install the kubelet and kube-proxy on each worker node, and +set the `--apiserver` flag to your replicated endpoint. + +## Vagrant up! + +We indeed have an initial proof of concept tester for this, which is available [here](https://releases.k8s.io/{{page.githubbranch}}/examples/high-availability). + +It implements the major concepts (with a few minor reductions for simplicity), of the podmaster HA implementation alongside a quick smoke test using k8petstore. diff --git a/_includes/docs/docs/admin/high-availability/default-kubelet b/_includes/docs/docs/admin/high-availability/default-kubelet new file mode 100644 index 0000000000..41ee530151 --- /dev/null +++ b/_includes/docs/docs/admin/high-availability/default-kubelet @@ -0,0 +1,8 @@ +# This should be the IP address of the load balancer for all masters +MASTER_IP= +# This should be the internal service IP address reserved for DNS +DNS_IP= + +DAEMON_ARGS="$DAEMON_ARGS --api-servers=https://${MASTER_IP} --enable-debugging-handlers=true --cloud-provider= +gce --config=/etc/kubernetes/manifests --allow-privileged=False --v=2 --cluster-dns=${DNS_IP} --cluster-domain=c +luster.local --configure-cbr0=true --cgroup-root=/ --system-container=/system " diff --git a/_includes/docs/docs/admin/high-availability/etcd.yaml b/_includes/docs/docs/admin/high-availability/etcd.yaml new file mode 100644 index 0000000000..fc9fe67e75 --- /dev/null +++ b/_includes/docs/docs/admin/high-availability/etcd.yaml @@ -0,0 +1,87 @@ +apiVersion: v1 +kind: Pod +metadata: + name: etcd-server +spec: + hostNetwork: true + containers: + - image: gcr.io/google_containers/etcd:2.0.9 + name: etcd-container + command: + - /usr/local/bin/etcd + - --name + - ${NODE_NAME} + - --initial-advertise-peer-urls + - http://${NODE_IP}:2380 + - --listen-peer-urls + - http://${NODE_IP}:2380 + - --advertise-client-urls + - http://${NODE_IP}:4001 + - --listen-client-urls + - http://127.0.0.1:4001 + - --data-dir + - /var/etcd/data + - --discovery + - ${DISCOVERY_TOKEN} + ports: + - containerPort: 2380 + hostPort: 2380 + name: serverport + - containerPort: 4001 + hostPort: 4001 + name: clientport + volumeMounts: + - mountPath: /var/etcd + name: varetcd + - mountPath: /etc/ssl + name: etcssl + readOnly: true + - mountPath: /usr/share/ssl + name: usrsharessl + readOnly: true + - mountPath: /var/ssl + name: varssl + readOnly: true + - mountPath: /usr/ssl + name: usrssl + readOnly: true + - mountPath: /usr/lib/ssl + name: usrlibssl + readOnly: true + - mountPath: /usr/local/openssl + name: usrlocalopenssl + readOnly: true + - mountPath: /etc/openssl + name: etcopenssl + readOnly: true + - mountPath: /etc/pki/tls + name: etcpkitls + readOnly: true + volumes: + - hostPath: + path: /var/etcd/data + name: varetcd + - hostPath: + path: /etc/ssl + name: etcssl + - hostPath: + path: /usr/share/ssl + name: usrsharessl + - hostPath: + path: /var/ssl + name: varssl + - hostPath: + path: /usr/ssl + name: usrssl + - hostPath: + path: /usr/lib/ssl + name: usrlibssl + - hostPath: + path: /usr/local/openssl + name: usrlocalopenssl + - hostPath: + path: /etc/openssl + name: etcopenssl + - hostPath: + path: /etc/pki/tls + name: etcpkitls diff --git a/_includes/docs/docs/admin/high-availability/kube-apiserver.yaml b/_includes/docs/docs/admin/high-availability/kube-apiserver.yaml new file mode 100644 index 0000000000..33d5cff5cd --- /dev/null +++ b/_includes/docs/docs/admin/high-availability/kube-apiserver.yaml @@ -0,0 +1,90 @@ +apiVersion: v1 +kind: Pod +metadata: + name: kube-apiserver +spec: + hostNetwork: true + containers: + - name: kube-apiserver + image: gcr.io/google_containers/kube-apiserver:9680e782e08a1a1c94c656190011bd02 + command: + - /bin/sh + - -c + - /usr/local/bin/kube-apiserver --address=127.0.0.1 --etcd-servers=http://127.0.0.1:4001 + --cloud-provider=gce --admission-control=NamespaceLifecycle,LimitRanger,SecurityContextDeny,ServiceAccount,ResourceQuota + --service-cluster-ip-range=10.0.0.0/16 --client-ca-file=/srv/kubernetes/ca.crt + --basic-auth-file=/srv/kubernetes/basic_auth.csv --cluster-name=e2e-test-bburns + --tls-cert-file=/srv/kubernetes/server.cert --tls-private-key-file=/srv/kubernetes/server.key + --secure-port=443 --token-auth-file=/srv/kubernetes/known_tokens.csv --v=2 + --allow-privileged=False 1>>/var/log/kube-apiserver.log 2>&1 + ports: + - containerPort: 443 + hostPort: 443 + name: https + - containerPort: 7080 + hostPort: 7080 + name: http + - containerPort: 8080 + hostPort: 8080 + name: local + volumeMounts: + - mountPath: /srv/kubernetes + name: srvkube + readOnly: true + - mountPath: /var/log/kube-apiserver.log + name: logfile + - mountPath: /etc/ssl + name: etcssl + readOnly: true + - mountPath: /usr/share/ssl + name: usrsharessl + readOnly: true + - mountPath: /var/ssl + name: varssl + readOnly: true + - mountPath: /usr/ssl + name: usrssl + readOnly: true + - mountPath: /usr/lib/ssl + name: usrlibssl + readOnly: true + - mountPath: /usr/local/openssl + name: usrlocalopenssl + readOnly: true + - mountPath: /etc/openssl + name: etcopenssl + readOnly: true + - mountPath: /etc/pki/tls + name: etcpkitls + readOnly: true + volumes: + - hostPath: + path: /srv/kubernetes + name: srvkube + - hostPath: + path: /var/log/kube-apiserver.log + name: logfile + - hostPath: + path: /etc/ssl + name: etcssl + - hostPath: + path: /usr/share/ssl + name: usrsharessl + - hostPath: + path: /var/ssl + name: varssl + - hostPath: + path: /usr/ssl + name: usrssl + - hostPath: + path: /usr/lib/ssl + name: usrlibssl + - hostPath: + path: /usr/local/openssl + name: usrlocalopenssl + - hostPath: + path: /etc/openssl + name: etcopenssl + - hostPath: + path: /etc/pki/tls + name: etcpkitls diff --git a/_includes/docs/docs/admin/high-availability/kube-controller-manager.yaml b/_includes/docs/docs/admin/high-availability/kube-controller-manager.yaml new file mode 100644 index 0000000000..736181fc7c --- /dev/null +++ b/_includes/docs/docs/admin/high-availability/kube-controller-manager.yaml @@ -0,0 +1,82 @@ +apiVersion: v1 +kind: Pod +metadata: + name: kube-controller-manager +spec: + containers: + - command: + - /bin/sh + - -c + - /usr/local/bin/kube-controller-manager --master=127.0.0.1:8080 --cluster-name=e2e-test-bburns + --cluster-cidr=10.245.0.0/16 --allocate-node-cidrs=true --cloud-provider=gce --service-account-private-key-file=/srv/kubernetes/server.key + --v=2 1>>/var/log/kube-controller-manager.log 2>&1 + image: gcr.io/google_containers/kube-controller-manager:fda24638d51a48baa13c35337fcd4793 + livenessProbe: + httpGet: + path: /healthz + port: 10252 + initialDelaySeconds: 15 + timeoutSeconds: 1 + name: kube-controller-manager + volumeMounts: + - mountPath: /srv/kubernetes + name: srvkube + readOnly: true + - mountPath: /var/log/kube-controller-manager.log + name: logfile + - mountPath: /etc/ssl + name: etcssl + readOnly: true + - mountPath: /usr/share/ssl + name: usrsharessl + readOnly: true + - mountPath: /var/ssl + name: varssl + readOnly: true + - mountPath: /usr/ssl + name: usrssl + readOnly: true + - mountPath: /usr/lib/ssl + name: usrlibssl + readOnly: true + - mountPath: /usr/local/openssl + name: usrlocalopenssl + readOnly: true + - mountPath: /etc/openssl + name: etcopenssl + readOnly: true + - mountPath: /etc/pki/tls + name: etcpkitls + readOnly: true + hostNetwork: true + volumes: + - hostPath: + path: /srv/kubernetes + name: srvkube + - hostPath: + path: /var/log/kube-controller-manager.log + name: logfile + - hostPath: + path: /etc/ssl + name: etcssl + - hostPath: + path: /usr/share/ssl + name: usrsharessl + - hostPath: + path: /var/ssl + name: varssl + - hostPath: + path: /usr/ssl + name: usrssl + - hostPath: + path: /usr/lib/ssl + name: usrlibssl + - hostPath: + path: /usr/local/openssl + name: usrlocalopenssl + - hostPath: + path: /etc/openssl + name: etcopenssl + - hostPath: + path: /etc/pki/tls + name: etcpkitls diff --git a/_includes/docs/docs/admin/high-availability/kube-scheduler.yaml b/_includes/docs/docs/admin/high-availability/kube-scheduler.yaml new file mode 100644 index 0000000000..1b23ee1bb8 --- /dev/null +++ b/_includes/docs/docs/admin/high-availability/kube-scheduler.yaml @@ -0,0 +1,30 @@ +apiVersion: v1 +kind: Pod +metadata: + name: kube-scheduler +spec: + hostNetwork: true + containers: + - name: kube-scheduler + image: gcr.io/google_containers/kube-scheduler:34d0b8f8b31e27937327961528739bc9 + command: + - /bin/sh + - -c + - /usr/local/bin/kube-scheduler --master=127.0.0.1:8080 --v=2 1>>/var/log/kube-scheduler.log + 2>&1 + livenessProbe: + httpGet: + path: /healthz + port: 10251 + initialDelaySeconds: 15 + timeoutSeconds: 1 + volumeMounts: + - mountPath: /var/log/kube-scheduler.log + name: logfile + - mountPath: /var/run/secrets/kubernetes.io/serviceaccount + name: default-token-s8ejd + readOnly: true + volumes: + - hostPath: + path: /var/log/kube-scheduler.log + name: logfile diff --git a/_includes/docs/docs/admin/high-availability/monit-docker b/_includes/docs/docs/admin/high-availability/monit-docker new file mode 100644 index 0000000000..8c2753a430 --- /dev/null +++ b/_includes/docs/docs/admin/high-availability/monit-docker @@ -0,0 +1,9 @@ +check process docker with pidfile /var/run/docker.pid +group docker +start program = "/etc/init.d/docker start" +stop program = "/etc/init.d/docker stop" +if does not exist then restart +if failed + unixsocket /var/run/docker.sock + protocol HTTP request "/version" +then restart \ No newline at end of file diff --git a/_includes/docs/docs/admin/high-availability/monit-kubelet b/_includes/docs/docs/admin/high-availability/monit-kubelet new file mode 100644 index 0000000000..eb3211b06a --- /dev/null +++ b/_includes/docs/docs/admin/high-availability/monit-kubelet @@ -0,0 +1,11 @@ +check process kubelet with pidfile /var/run/kubelet.pid +group kubelet +start program = "/etc/init.d/kubelet start" +stop program = "/etc/init.d/kubelet stop" +if does not exist then restart +if failed + host 127.0.0.1 + port 10255 + protocol HTTP + request "/healthz" +then restart diff --git a/_includes/docs/docs/admin/high-availability/podmaster.yaml b/_includes/docs/docs/admin/high-availability/podmaster.yaml new file mode 100644 index 0000000000..d634225b93 --- /dev/null +++ b/_includes/docs/docs/admin/high-availability/podmaster.yaml @@ -0,0 +1,43 @@ +apiVersion: v1 +kind: Pod +metadata: + name: scheduler-master +spec: + hostNetwork: true + containers: + - name: scheduler-elector + image: gcr.io/google_containers/podmaster:1.1 + command: + - /podmaster + - --etcd-servers=http://127.0.0.1:4001 + - --key=scheduler + - --source-file=/kubernetes/kube-scheduler.manifest + - --dest-file=/manifests/kube-scheduler.manifest + volumeMounts: + - mountPath: /kubernetes + name: k8s + readOnly: true + - mountPath: /manifests + name: manifests + - name: controller-manager-elector + image: gcr.io/google_containers/podmaster:1.1 + command: + - /podmaster + - --etcd-servers=http://127.0.0.1:4001 + - --key=controller + - --source-file=/kubernetes/kube-controller-manager.manifest + - --dest-file=/manifests/kube-controller-manager.manifest + terminationMessagePath: /dev/termination-log + volumeMounts: + - mountPath: /kubernetes + name: k8s + readOnly: true + - mountPath: /manifests + name: manifests + volumes: + - hostPath: + path: /srv/kubernetes + name: k8s + - hostPath: + path: /etc/kubernetes/manifests + name: manifests diff --git a/_includes/docs/docs/admin/index.md b/_includes/docs/docs/admin/index.md new file mode 100644 index 0000000000..38df8bba83 --- /dev/null +++ b/_includes/docs/docs/admin/index.md @@ -0,0 +1,76 @@ + +The cluster admin guide is for anyone creating or administering a Kubernetes cluster. +It assumes some familiarity with concepts in the [User Guide](/{{page.version}}/docs/user-guide/). + +* TOC +{:toc} + +## Planning a cluster + +There are many different examples of how to setup a kubernetes cluster. Many of them are listed in this +[matrix](/{{page.version}}/docs/getting-started-guides/). We call each of the combinations in this matrix a *distro*. + +Before choosing a particular guide, here are some things to consider: + + - Are you just looking to try out Kubernetes on your laptop, or build a high-availability many-node cluster? Both + models are supported, but some distros are better for one case or the other. + - Will you be using a hosted Kubernetes cluster, such as [GKE](https://cloud.google.com/container-engine), or setting + one up yourself? + - Will your cluster be on-premises, or in the cloud (IaaS)? Kubernetes does not directly support hybrid clusters. We + recommend setting up multiple clusters rather than spanning distant locations. + - Will you be running Kubernetes on "bare metal" or virtual machines? Kubernetes supports both, via different distros. + - Do you just want to run a cluster, or do you expect to do active development of kubernetes project code? If the + latter, it is better to pick a distro actively used by other developers. Some distros only use binary releases, but + offer is a greater variety of choices. + - Not all distros are maintained as actively. Prefer ones which are listed as tested on a more recent version of + Kubernetes. + - If you are configuring kubernetes on-premises, you will need to consider what [networking + model](/{{page.version}}/docs/admin/networking) fits best. + - If you are designing for very high-availability, you may want [clusters in multiple zones](/{{page.version}}/docs/admin/multi-cluster). + - You may want to familiarize yourself with the various + [components](/{{page.version}}/docs/admin/cluster-components) needed to run a cluster. + +## Setting up a cluster + +Pick one of the Getting Started Guides from the [matrix](/{{page.version}}/docs/getting-started-guides/) and follow it. +If none of the Getting Started Guides fits, you may want to pull ideas from several of the guides. + +One option for custom networking is *OpenVSwitch GRE/VxLAN networking* ([ovs-networking.md](/{{page.version}}/docs/admin/ovs-networking)), which +uses OpenVSwitch to set up networking between pods across + Kubernetes nodes. + +If you are modifying an existing guide which uses Salt, this document explains [how Salt is used in the Kubernetes +project](/{{page.version}}/docs/admin/salt). + +## Managing a cluster, including upgrades + +[Managing a cluster](/{{page.version}}/docs/admin/cluster-management). + +## Managing nodes + +[Managing nodes](/{{page.version}}/docs/admin/node). + +## Optional Cluster Services + +* **DNS Integration with SkyDNS** ([dns.md](/{{page.version}}/docs/admin/dns)): + Resolving a DNS name directly to a Kubernetes service. + +* **Logging** with [Kibana](/{{page.version}}/docs/user-guide/logging) + +## Multi-tenant support + +* **Resource Quota** ([resource-quota.md](/{{page.version}}/docs/admin/resource-quota)) + +## Security + +* **Kubernetes Container Environment** ([docs/user-guide/container-environment.md](/{{page.version}}/docs/user-guide/container-environment)): + Describes the environment for Kubelet managed containers on a Kubernetes + node. + +* **Securing access to the API Server** [accessing the api](/{{page.version}}/docs/admin/accessing-the-api) + +* **Authentication** [authentication](/{{page.version}}/docs/admin/authentication) + +* **Authorization** [authorization](/{{page.version}}/docs/admin/authorization) + +* **Admission Controllers** [admission_controllers](/{{page.version}}/docs/admin/admission-controllers) \ No newline at end of file diff --git a/_includes/docs/docs/admin/limitrange/index.md b/_includes/docs/docs/admin/limitrange/index.md new file mode 100644 index 0000000000..dad30c5883 --- /dev/null +++ b/_includes/docs/docs/admin/limitrange/index.md @@ -0,0 +1,192 @@ + +By default, pods run with unbounded CPU and memory limits. This means that any pod in the +system will be able to consume as much CPU and memory on the node that executes the pod. + +Users may want to impose restrictions on the amount of resource a single pod in the system may consume +for a variety of reasons. + +For example: + +1. Each node in the cluster has 2GB of memory. The cluster operator does not want to accept pods +that require more than 2GB of memory since no node in the cluster can support the requirement. To prevent a +pod from being permanently unscheduled to a node, the operator instead chooses to reject pods that exceed 2GB +of memory as part of admission control. +2. A cluster is shared by two communities in an organization that runs production and development workloads +respectively. Production workloads may consume up to 8GB of memory, but development workloads may consume up +to 512MB of memory. The cluster operator creates a separate namespace for each workload, and applies limits to +each namespace. +3. Users may create a pod which consumes resources just below the capacity of a machine. The left over space +may be too small to be useful, but big enough for the waste to be costly over the entire cluster. As a result, +the cluster operator may want to set limits that a pod must consume at least 20% of the memory and cpu of their +average node size in order to provide for more uniform scheduling and to limit waste. + +This example demonstrates how limits can be applied to a Kubernetes namespace to control +min/max resource limits per pod. In addition, this example demonstrates how you can +apply default resource limits to pods in the absence of an end-user specified value. + +See [LimitRange design doc](https://github.com/kubernetes/kubernetes/blob/{{page.githubbranch}}/docs/design/admission_control_limit_range.md) for more information. For a detailed description of the Kubernetes resource model, see [Resources](/{{page.version}}/docs/user-guide/compute-resources) + +## Step 0: Prerequisites + +This example requires a running Kubernetes cluster. See the [Getting Started guides](/{{page.version}}/docs/getting-started-guides/) for how to get started. + +Change to the `` directory if you're not already there. + +## Step 1: Create a namespace + +This example will work in a custom namespace to demonstrate the concepts involved. + +Let's create a new namespace called limit-example: + +```shell +$ kubectl create -f docs/admin/limitrange/namespace.yaml +namespace "limit-example" created +$ kubectl get namespaces +NAME LABELS STATUS AGE +default Active 5m +limit-example Active 53s +``` + +## Step 2: Apply a limit to the namespace + +Let's create a simple limit in our namespace. + +```shell +$ kubectl create -f docs/admin/limitrange/limits.yaml --namespace=limit-example +limitrange "mylimits" created +``` + +Let's describe the limits that we have imposed in our namespace. + +```shell +$ kubectl describe limits mylimits --namespace=limit-example +Name: mylimits +Namespace: limit-example +Type Resource Min Max Request Limit Limit/Request +---- -------- --- --- ------- ----- ------------- +Pod cpu 200m 2 - - - +Pod memory 6Mi 1Gi - - - +Container cpu 100m 2 200m 300m - +Container memory 3Mi 1Gi 100Mi 200Mi - +``` + +In this scenario, we have said the following: + +1. If a max constraint is specified for a resource (2 CPU and 1Gi memory in this case), then a limit +must be specified for that resource across all containers. Failure to specify a limit will result in +a validation error when attempting to create the pod. Note that a default value of limit is set by +*default* in file `limits.yaml` (300m CPU and 200Mi memory). +2. If a min constraint is specified for a resource (100m CPU and 3Mi memory in this case), then a +request must be specified for that resource across all containers. Failure to specify a request will +result in a validation error when attempting to create the pod. Note that a default value of request is +set by *defaultRequest* in file `limits.yaml` (200m CPU and 100Mi memory). +3. For any pod, the sum of all containers memory requests must be >= 6Mi and the sum of all containers +memory limits must be <= 1Gi; the sum of all containers CPU requests must be >= 200m and the sum of all +containers CPU limits must be <= 2. + +## Step 3: Enforcing limits at point of creation + +The limits enumerated in a namespace are only enforced when a pod is created or updated in +the cluster. If you change the limits to a different value range, it does not affect pods that +were previously created in a namespace. + +If a resource (cpu or memory) is being restricted by a limit, the user will get an error at time +of creation explaining why. + +Let's first spin up a replication controller that creates a single container pod to demonstrate +how default values are applied to each pod. + +```shell +$ kubectl run nginx --image=nginx --replicas=1 --namespace=limit-example +replicationcontroller "nginx" created +$ kubectl get pods --namespace=limit-example +NAME READY STATUS RESTARTS AGE +nginx-aq0mf 1/1 Running 0 35s +$ kubectl get pods nginx-aq0mf --namespace=limit-example -o yaml | grep resources -C 8 +``` + +```yaml +resourceVersion: "127" + selfLink: /api/v1/namespaces/limit-example/pods/nginx-aq0mf + uid: 51be42a7-7156-11e5-9921-286ed488f785 +spec: + containers: + - image: nginx + imagePullPolicy: IfNotPresent + name: nginx + resources: + limits: + cpu: 300m + memory: 200Mi + requests: + cpu: 200m + memory: 100Mi + terminationMessagePath: /dev/termination-log + volumeMounts: +``` + +Note that our nginx container has picked up the namespace default cpu and memory resource *limits* and *requests*. + +Let's create a pod that exceeds our allowed limits by having it have a container that requests 3 cpu cores. + +```shell +$ kubectl create -f docs/admin/limitrange/invalid-pod.yaml --namespace=limit-example +Error from server: error when creating "docs/admin/limitrange/invalid-pod.yaml": Pod "invalid-pod" is forbidden: [Maximum cpu usage per Pod is 2, but limit is 3., Maximum cpu usage per Container is 2, but limit is 3.] +``` + +Let's create a pod that falls within the allowed limit boundaries. + +```shell +$ kubectl create -f docs/admin/limitrange/valid-pod.yaml --namespace=limit-example +pod "valid-pod" created +$ kubectl get pods valid-pod --namespace=limit-example -o yaml | grep -C 6 resources +``` + +```yaml +uid: 162a12aa-7157-11e5-9921-286ed488f785 +spec: + containers: + - image: gcr.io/google_containers/serve_hostname + imagePullPolicy: IfNotPresent + name: kubernetes-serve-hostname + resources: + limits: + cpu: "1" + memory: 512Mi + requests: + cpu: "1" + memory: 512Mi +``` + +Note that this pod specifies explicit resource *limits* and *requests* so it did not pick up the namespace +default values. + +Note: The *limits* for CPU resource are not enforced in the default Kubernetes setup on the physical node +that runs the container unless the administrator deploys the kubelet with the folllowing flag: + +```shell +$ kubelet --help +Usage of kubelet +.... + --cpu-cfs-quota[=false]: Enable CPU CFS quota enforcement for containers that specify CPU limits +$ kubelet --cpu-cfs-quota=true ... +``` + +## Step 4: Cleanup + +To remove the resources used by this example, you can just delete the limit-example namespace. + +```shell +$ kubectl delete namespace limit-example +namespace "limit-example" deleted +$ kubectl get namespaces +NAME LABELS STATUS AGE +default Active 20m +``` + +## Summary + +Cluster operators that want to restrict the amount of resources a single container or pod may consume +are able to define allowable ranges per Kubernetes namespace. In the absence of any explicit assignments, +the Kubernetes system is able to apply default resource *limits* and *requests* if desired in order to +constrain the amount of resource a pod consumes on a node. \ No newline at end of file diff --git a/_includes/docs/docs/admin/limitrange/invalid-pod.yaml b/_includes/docs/docs/admin/limitrange/invalid-pod.yaml new file mode 100644 index 0000000000..b63f25deba --- /dev/null +++ b/_includes/docs/docs/admin/limitrange/invalid-pod.yaml @@ -0,0 +1,12 @@ +apiVersion: v1 +kind: Pod +metadata: + name: invalid-pod +spec: + containers: + - name: kubernetes-serve-hostname + image: gcr.io/google_containers/serve_hostname + resources: + limits: + cpu: "3" + memory: 100Mi diff --git a/_includes/docs/docs/admin/limitrange/limits.yaml b/_includes/docs/docs/admin/limitrange/limits.yaml new file mode 100644 index 0000000000..5de4841705 --- /dev/null +++ b/_includes/docs/docs/admin/limitrange/limits.yaml @@ -0,0 +1,26 @@ +apiVersion: v1 +kind: LimitRange +metadata: + name: mylimits +spec: + limits: + - max: + cpu: "2" + memory: 1Gi + min: + cpu: 200m + memory: 6Mi + type: Pod + - default: + cpu: 300m + memory: 200Mi + defaultRequest: + cpu: 200m + memory: 100Mi + max: + cpu: "2" + memory: 1Gi + min: + cpu: 100m + memory: 3Mi + type: Container diff --git a/_includes/docs/docs/admin/limitrange/namespace.yaml b/_includes/docs/docs/admin/limitrange/namespace.yaml new file mode 100644 index 0000000000..200a894b0b --- /dev/null +++ b/_includes/docs/docs/admin/limitrange/namespace.yaml @@ -0,0 +1,4 @@ +apiVersion: v1 +kind: Namespace +metadata: + name: limit-example diff --git a/_includes/docs/docs/admin/limitrange/valid-pod.yaml b/_includes/docs/docs/admin/limitrange/valid-pod.yaml new file mode 100644 index 0000000000..c1ec54183b --- /dev/null +++ b/_includes/docs/docs/admin/limitrange/valid-pod.yaml @@ -0,0 +1,14 @@ +apiVersion: v1 +kind: Pod +metadata: + name: valid-pod + labels: + name: valid-pod +spec: + containers: + - name: kubernetes-serve-hostname + image: gcr.io/google_containers/serve_hostname + resources: + limits: + cpu: "1" + memory: 512Mi diff --git a/_includes/docs/docs/admin/multi-cluster.md b/_includes/docs/docs/admin/multi-cluster.md new file mode 100644 index 0000000000..f1b2fd66ce --- /dev/null +++ b/_includes/docs/docs/admin/multi-cluster.md @@ -0,0 +1,64 @@ + +You may want to set up multiple Kubernetes clusters, both to +have clusters in different regions to be nearer to your users, and to tolerate failures and/or invasive maintenance. +This document describes some of the issues to consider when making a decision about doing so. + +Note that at present, +Kubernetes does not offer a mechanism to aggregate multiple clusters into a single virtual cluster. However, +we [plan to do this in the future](https://github.com/kubernetes/kubernetes/blob/{{page.githubbranch}}/docs/proposals/federation.md). + +## Scope of a single cluster + +On IaaS providers such as Google Compute Engine or Amazon Web Services, a VM exists in a +[zone](https://cloud.google.com/compute/docs/zones) or [availability +zone](http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-regions-availability-zones). + +We suggest that all the VMs in a Kubernetes cluster should be in the same availability zone, because: + + - compared to having a single global Kubernetes cluster, there are fewer single-points of failure + - compared to a cluster that spans availability zones, it is easier to reason about the availability properties of a + single-zone cluster. + - when the Kubernetes developers are designing the system (e.g. making assumptions about latency, bandwidth, or + correlated failures) they are assuming all the machines are in a single data center, or otherwise closely connected. + +It is okay to have multiple clusters per availability zone, though on balance we think fewer is better. +Reasons to prefer fewer clusters are: + + - improved bin packing of Pods in some cases with more nodes in one cluster (less resource fragmentation) + - reduced operational overhead (though the advantage is diminished as ops tooling and processes matures) + - reduced costs for per-cluster fixed resource costs, e.g. apiserver VMs (but small as a percentage + of overall cluster cost for medium to large clusters). + +Reasons to have multiple clusters include: + + - strict security policies requiring isolation of one class of work from another (but, see Partitioning Clusters + below). + - test clusters to canary new Kubernetes releases or other cluster software. + +## Selecting the right number of clusters + +The selection of the number of Kubernetes clusters may be a relatively static choice, only revisited occasionally. +By contrast, the number of nodes in a cluster and the number of pods in a service may be change frequently according to +load and growth. + +To pick the number of clusters, first, decide which regions you need to be in to have adequate latency to all your end users, for services that will run +on Kubernetes (if you use a Content Distribution Network, the latency requirements for the CDN-hosted content need not +be considered). Legal issues might influence this as well. For example, a company with a global customer base might decide to have clusters in US, EU, AP, and SA regions. +Call the number of regions to be in `R`. + +Second, decide how many clusters should be able to be unavailable at the same time, while still being available. Call +the number that can be unavailable `U`. If you are not sure, then 1 is a fine choice. + +If it is allowable for load-balancing to direct traffic to any region in the event of a cluster failure, then +you need `R + U` clusters. If it is not (e.g you want to ensure low latency for all users in the event of a +cluster failure), then you need to have `R * U` clusters (`U` in each of `R` regions). In any case, try to put each cluster in a different zone. + +Finally, if any of your clusters would need more than the maximum recommended number of nodes for a Kubernetes cluster, then +you may need even more clusters. Kubernetes v1.0 currently supports clusters up to 100 nodes in size, but we are targeting +1000-node clusters by early 2016. + +## Working with multiple clusters + +When you have multiple clusters, you would typically create services with the same config in each cluster and put each of those +service instances behind a load balancer (AWS Elastic Load Balancer, GCE Forwarding Rule or HTTP Load Balancer) spanning all of them, so that +failures of a single cluster are not visible to end users. \ No newline at end of file diff --git a/_includes/docs/docs/admin/namespaces.md b/_includes/docs/docs/admin/namespaces.md new file mode 100644 index 0000000000..0153e84abb --- /dev/null +++ b/_includes/docs/docs/admin/namespaces.md @@ -0,0 +1,143 @@ + +A Namespace is a mechanism to partition resources created by users into +a logically named group. + +## Motivation + +A single cluster should be able to satisfy the needs of multiple users or groups of users (henceforth a 'user community'). + +Each user community wants to be able to work in isolation from other communities. + +Each user community has its own: + +1. resources (pods, services, replication controllers, etc.) +2. policies (who can or cannot perform actions in their community) +3. constraints (this community is allowed this much quota, etc.) + +A cluster operator may create a Namespace for each unique user community. + +The Namespace provides a unique scope for: + +1. named resources (to avoid basic naming collisions) +2. delegated management authority to trusted users +3. ability to limit community resource consumption + +## Use cases + +1. As a cluster operator, I want to support multiple user communities on a single cluster. +2. As a cluster operator, I want to delegate authority to partitions of the cluster to trusted users + in those communities. +3. As a cluster operator, I want to limit the amount of resources each community can consume in order + to limit the impact to other communities using the cluster. +4. As a cluster user, I want to interact with resources that are pertinent to my user community in + isolation of what other user communities are doing on the cluster. + + +## Usage + +Look [here](/{{page.version}}/docs/admin/namespaces/) for an in depth example of namespaces. + +### Viewing namespaces + +You can list the current namespaces in a cluster using: + +```shell +$ kubectl get namespaces +NAME LABELS STATUS +default Active +kube-system Active +``` + +Kubernetes starts with two initial namespaces: + * `default` The default namespace for objects with no other namespace + * `kube-system` The namespace for objects created by the Kubernetes system + +You can also get the summary of a specific namespace using: + +```shell +$ kubectl get namespaces +``` + +Or you can get detailed information with: + +```shell +$ kubectl describe namespaces +Name: default +Labels: +Status: Active + +No resource quota. + +Resource Limits + Type Resource Min Max Default + ---- -------- --- --- --- + Container cpu - - 100m +``` + +Note that these details show both resource quota (if present) as well as resource limit ranges. + +Resource quota tracks aggregate usage of resources in the *Namespace* and allows cluster operators +to define *Hard* resource usage limits that a *Namespace* may consume. + +A limit range defines min/max constraints on the amount of resources a single entity can consume in +a *Namespace*. + +See [Admission control: Limit Range](https://github.com/kubernetes/kubernetes/blob/{{page.githubbranch}}/docs/design/admission_control_limit_range.md) + +A namespace can be in one of two phases: + * `Active` the namespace is in use + * `Terminating` the namespace is being deleted, and can not be used for new objects + +See the [design doc](https://github.com/kubernetes/kubernetes/blob/{{page.githubbranch}}/docs/design/namespaces.md#phases) for more details. + +### Creating a new namespace + +To create a new namespace, first create a new YAML file called `my-namespace.yaml` with the contents: + +```yaml +apiVersion: v1 +kind: Namespace +metadata: + name: +``` + +Note that the name of your namespace must be a DNS compatible label. + +More information on the `finalizers` field can be found in the namespace [design doc](https://github.com/kubernetes/kubernetes/blob/{{page.githubbranch}}/docs/design/namespaces.md#finalizers). + +Then run: + +```shell +$ kubectl create -f ./my-namespace.yaml +``` + +### Working in namespaces + +See [Setting the namespace for a request](/{{page.version}}/docs/user-guide/namespaces/#setting-the-namespace-for-a-request) +and [Setting the namespace preference](/{{page.version}}/docs/user-guide/namespaces/#setting-the-namespace-preference). + +### Deleting a namespace + +You can delete a namespace with + +```shell +$ kubectl delete namespaces +``` + +**WARNING, this deletes _everything_ under the namespace!** + +This delete is asynchronous, so for a time you will see the namespace in the `Terminating` state. + +## Namespaces and DNS + +When you create a [Service](/{{page.version}}/docs/user-guide/services), it creates a corresponding [DNS entry](/{{page.version}}/docs/admin/dns). +This entry is of the form `..svc.cluster.local`, which means +that if a container just uses `` it will resolve to the service which +is local to a namespace. This is useful for using the same configuration across +multiple namespaces such as Development, Staging and Production. If you want to reach +across namespaces, you need to use the fully qualified domain name (FQDN). + +## Design + +Details of the design of namespaces in Kubernetes, including a [detailed example](https://github.com/kubernetes/kubernetes/blob/{{page.githubbranch}}/docs/design/namespaces.md#example-openshift-origin-managing-a-kubernetes-namespace) +can be found in the [namespaces design doc](https://github.com/kubernetes/kubernetes/blob/{{page.githubbranch}}/docs/design/namespaces.md) \ No newline at end of file diff --git a/_includes/docs/docs/admin/namespaces/index.md b/_includes/docs/docs/admin/namespaces/index.md new file mode 100644 index 0000000000..8ea9443181 --- /dev/null +++ b/_includes/docs/docs/admin/namespaces/index.md @@ -0,0 +1,248 @@ + +Kubernetes _namespaces_ help different projects, teams, or customers to share a Kubernetes cluster. + +It does this by providing the following: + +1. A scope for [Names](/{{page.version}}/docs/user-guide/identifiers). +2. A mechanism to attach authorization and policy to a subsection of the cluster. + +Use of multiple namespaces is optional. + +This example demonstrates how to use Kubernetes namespaces to subdivide your cluster. + +### Step Zero: Prerequisites + +This example assumes the following: + +1. You have an [existing Kubernetes cluster](/{{page.version}}/docs/getting-started-guides/). +2. You have a basic understanding of Kubernetes _[pods](/{{page.version}}/docs/user-guide/pods)_, _[services](/{{page.version}}/docs/user-guide/services)_, and _[replication controllers](/{{page.version}}/docs/user-guide/replication-controller)_. + +### Step One: Understand the default namespace + +By default, a Kubernetes cluster will instantiate a default namespace when provisioning the cluster to hold the default set of pods, +services, and replication controllers used by the cluster. + +Assuming you have a fresh cluster, you can introspect the available namespace's by doing the following: + +```shell +$ kubectl get namespaces +NAME LABELS +default +``` + +### Step Two: Create new namespaces + +For this exercise, we will create two additional Kubernetes namespaces to hold our content. + +Let's imagine a scenario where an organization is using a shared Kubernetes cluster for development and production use cases. + +The development team would like to maintain a space in the cluster where they can get a view on the list of pods, services, and replication controllers +they use to build and run their application. In this space, Kubernetes resources come and go, and the restrictions on who can or cannot modify resources +are relaxed to enable agile development. + +The operations team would like to maintain a space in the cluster where they can enforce strict procedures on who can or cannot manipulate the set of +pods, services, and replication controllers that run the production site. + +One pattern this organization could follow is to partition the Kubernetes cluster into two namespaces: development and production. + +Let's create two new namespaces to hold our work. + +Use the file [`namespace-dev.json`](/{{page.version}}/docs/admin/namespacesnamespace-dev.json) which describes a development namespace: + + + +```json +{ + "kind": "Namespace", + "apiVersion": "v1", + "metadata": { + "name": "development", + "labels": { + "name": "development" + } + } +} +``` + +[Download example](/{{page.version}}/docs/admin/namespacesnamespace-dev.json) + + +Create the development namespace using kubectl. + +```shell +$ kubectl create -f docs/admin/namespaces/namespace-dev.json +``` + +And then lets create the production namespace using kubectl. + +```shell +$ kubectl create -f docs/admin/namespaces/namespace-prod.json +``` + +To be sure things are right, let's list all of the namespaces in our cluster. + +```shell +$ kubectl get namespaces +NAME LABELS STATUS +default Active +development name=development Active +production name=production Active +``` + +### Step Three: Create pods in each namespace + +A Kubernetes namespace provides the scope for pods, services, and replication controllers in the cluster. + +Users interacting with one namespace do not see the content in another namespace. + +To demonstrate this, let's spin up a simple replication controller and pod in the development namespace. + +We first check what is the current context: + +```yaml +apiVersion: v1 +clusters: +- cluster: + certificate-authority-data: REDACTED + server: https://130.211.122.180 + name: lithe-cocoa-92103_kubernetes +contexts: +- context: + cluster: lithe-cocoa-92103_kubernetes + user: lithe-cocoa-92103_kubernetes + name: lithe-cocoa-92103_kubernetes +current-context: lithe-cocoa-92103_kubernetes +kind: Config +preferences: {} +users: +- name: lithe-cocoa-92103_kubernetes + user: + client-certificate-data: REDACTED + client-key-data: REDACTED + token: 65rZW78y8HbwXXtSXuUw9DbP4FLjHi4b +- name: lithe-cocoa-92103_kubernetes-basic-auth + user: + password: h5M0FtUUIflBSdI7 + username: admin +``` + +The next step is to define a context for the kubectl client to work in each namespace. The value of "cluster" and "user" fields are copied from the current context. + +```shell +$ kubectl config set-context dev --namespace=development --cluster=lithe-cocoa-92103_kubernetes --user=lithe-cocoa-92103_kubernetes +$ kubectl config set-context prod --namespace=production --cluster=lithe-cocoa-92103_kubernetes --user=lithe-cocoa-92103_kubernetes +``` + +The above commands provided two request contexts you can alternate against depending on what namespace you +wish to work against. + +Let's switch to operate in the development namespace. + +```shell +$ kubectl config use-context dev +``` + +You can verify your current context by doing the following: + +```shell +$ kubectl config view +``` + +```yaml +apiVersion: v1 +clusters: +- cluster: + certificate-authority-data: REDACTED + server: https://130.211.122.180 + name: lithe-cocoa-92103_kubernetes +contexts: +- context: + cluster: lithe-cocoa-92103_kubernetes + namespace: development + user: lithe-cocoa-92103_kubernetes + name: dev +- context: + cluster: lithe-cocoa-92103_kubernetes + user: lithe-cocoa-92103_kubernetes + name: lithe-cocoa-92103_kubernetes +- context: + cluster: lithe-cocoa-92103_kubernetes + namespace: production + user: lithe-cocoa-92103_kubernetes + name: prod +current-context: dev +kind: Config +preferences: {} +users: +- name: lithe-cocoa-92103_kubernetes + user: + client-certificate-data: REDACTED + client-key-data: REDACTED + token: 65rZW78y8HbwXXtSXuUw9DbP4FLjHi4b +- name: lithe-cocoa-92103_kubernetes-basic-auth + user: + password: h5M0FtUUIflBSdI7 + username: admin +``` + +At this point, all requests we make to the Kubernetes cluster from the command line are scoped to the development namespace. + +Let's create some content. + +```shell +$ kubectl run snowflake --image=kubernetes/serve_hostname --replicas=2 +``` + +We have just created a replication controller whose replica size is 2 that is running the pod called snowflake with a basic container that just serves the hostname. + +```shell +$ kubectl get rc +CONTROLLER CONTAINER(S) IMAGE(S) SELECTOR REPLICAS +snowflake snowflake kubernetes/serve_hostname run=snowflake 2 + +$ kubectl get pods +NAME READY STATUS RESTARTS AGE +snowflake-8w0qn 1/1 Running 0 22s +snowflake-jrpzb 1/1 Running 0 22s +``` + +And this is great, developers are able to do what they want, and they do not have to worry about affecting content in the production namespace. + +Let's switch to the production namespace and show how resources in one namespace are hidden from the other. + +```shell +$ kubectl config use-context prod +``` + +The production namespace should be empty. + +```shell +$ kubectl get rc +CONTROLLER CONTAINER(S) IMAGE(S) SELECTOR REPLICAS + +$ kubectl get pods +NAME READY STATUS RESTARTS AGE +``` + +Production likes to run cattle, so let's create some cattle pods. + +```shell +$ kubectl run cattle --image=kubernetes/serve_hostname --replicas=5 + +$ kubectl get rc +CONTROLLER CONTAINER(S) IMAGE(S) SELECTOR REPLICAS +cattle cattle kubernetes/serve_hostname run=cattle 5 + +$ kubectl get pods +NAME READY STATUS RESTARTS AGE +cattle-97rva 1/1 Running 0 12s +cattle-i9ojn 1/1 Running 0 12s +cattle-qj3yv 1/1 Running 0 12s +cattle-yc7vn 1/1 Running 0 12s +cattle-zz7ea 1/1 Running 0 12s +``` + +At this point, it should be clear that the resources users create in one namespace are hidden from the other namespace. + +As the policy support in Kubernetes evolves, we will extend this scenario to show how you can provide different +authorization rules for each namespace. \ No newline at end of file diff --git a/_includes/docs/docs/admin/namespaces/namespace-dev.json b/_includes/docs/docs/admin/namespaces/namespace-dev.json new file mode 100644 index 0000000000..b2b43b0b73 --- /dev/null +++ b/_includes/docs/docs/admin/namespaces/namespace-dev.json @@ -0,0 +1,10 @@ +{ + "kind": "Namespace", + "apiVersion": "v1", + "metadata": { + "name": "development", + "labels": { + "name": "development" + } + } +} diff --git a/_includes/docs/docs/admin/namespaces/namespace-prod.json b/_includes/docs/docs/admin/namespaces/namespace-prod.json new file mode 100644 index 0000000000..d4503f1ac1 --- /dev/null +++ b/_includes/docs/docs/admin/namespaces/namespace-prod.json @@ -0,0 +1,10 @@ +{ + "kind": "Namespace", + "apiVersion": "v1", + "metadata": { + "name": "production", + "labels": { + "name": "production" + } + } +} diff --git a/_includes/docs/docs/admin/networking.md b/_includes/docs/docs/admin/networking.md new file mode 100644 index 0000000000..7b1d76cc22 --- /dev/null +++ b/_includes/docs/docs/admin/networking.md @@ -0,0 +1,182 @@ + +Kubernetes approaches networking somewhat differently than Docker does by +default. There are 4 distinct networking problems to solve: + +1. Highly-coupled container-to-container communications: this is solved by + [pods](/{{page.version}}/docs/user-guide/pods) and `localhost` communications. +2. Pod-to-Pod communications: this is the primary focus of this document. +3. Pod-to-Service communications: this is covered by [services](/{{page.version}}/docs/user-guide/services). +4. External-to-Service communications: this is covered by [services](/{{page.version}}/docs/user-guide/services). + +* TOC +{:toc} + + +## Summary + +Kubernetes assumes that pods can communicate with other pods, regardless of +which host they land on. We give every pod its own IP address so you do not +need to explicitly create links between pods and you almost never need to deal +with mapping container ports to host ports. This creates a clean, +backwards-compatible model where pods can be treated much like VMs or physical +hosts from the perspectives of port allocation, naming, service discovery, load +balancing, application configuration, and migration. + +To achieve this we must impose some requirements on how you set up your cluster +networking. + +## Docker model + +Before discussing the Kubernetes approach to networking, it is worthwhile to +review the "normal" way that networking works with Docker. By default, Docker +uses host-private networking. It creates a virtual bridge, called `docker0` by +default, and allocates a subnet from one of the private address blocks defined +in [RFC1918](https://tools.ietf.org/html/rfc1918) for that bridge. For each +container that Docker creates, it allocates a virtual ethernet device (called +`veth`) which is attached to the bridge. The veth is mapped to appear as `eth0` +in the container, using Linux namespaces. The in-container `eth0` interface is +given an IP address from the bridge's address range. + +The result is that Docker containers can talk to other containers only if they +are on the same machine (and thus the same virtual bridge). Containers on +different machines can not reach each other - in fact they may end up with the +exact same network ranges and IP addresses. + +In order for Docker containers to communicate across nodes, they must be +allocated ports on the machine's own IP address, which are then forwarded or +proxied to the containers. This obviously means that containers must either +coordinate which ports they use very carefully or else be allocated ports +dynamically. + +## Kubernetes model + +Coordinating ports across multiple developers is very difficult to do at +scale and exposes users to cluster-level issues outside of their control. +Dynamic port allocation brings a lot of complications to the system - every +application has to take ports as flags, the API servers have to know how to +insert dynamic port numbers into configuration blocks, services have to know +how to find each other, etc. Rather than deal with this, Kubernetes takes a +different approach. + +Kubernetes imposes the following fundamental requirements on any networking +implementation (barring any intentional network segmentation policies): + + * all containers can communicate with all other containers without NAT + * all nodes can communicate with all containers (and vice-versa) without NAT + * the IP that a container sees itself as is the same IP that others see it as + +What this means in practice is that you can not just take two computers +running Docker and expect Kubernetes to work. You must ensure that the +fundamental requirements are met. + +This model is not only less complex overall, but it is principally compatible +with the desire for Kubernetes to enable low-friction porting of apps from VMs +to containers. If your job previously ran in a VM, your VM had an IP and could +talk to other VMs in your project. This is the same basic model. + +Until now this document has talked about containers. In reality, Kubernetes +applies IP addresses at the `Pod` scope - containers within a `Pod` share their +network namespaces - including their IP address. This means that containers +within a `Pod` can all reach each other's ports on `localhost`. This does imply +that containers within a `Pod` must coordinate port usage, but this is no +different than processes in a VM. We call this the "IP-per-pod" model. This +is implemented in Docker as a "pod container" which holds the network namespace +open while "app containers" (the things the user specified) join that namespace +with Docker's `--net=container:` function. + +As with Docker, it is possible to request host ports, but this is reduced to a +very niche operation. In this case a port will be allocated on the host `Node` +and traffic will be forwarded to the `Pod`. The `Pod` itself is blind to the +existence or non-existence of host ports. + +## How to achieve this + +There are a number of ways that this network model can be implemented. This +document is not an exhaustive study of the various methods, but hopefully serves +as an introduction to various technologies and serves as a jumping-off point. +If some techniques become vastly preferable to others, we might detail them more +here. + +### Google Compute Engine (GCE) + +For the Google Compute Engine cluster configuration scripts, we use [advanced +routing](https://developers.google.com/compute/docs/networking#routing) to +assign each VM a subnet (default is `/24` - 254 IPs). Any traffic bound for that +subnet will be routed directly to the VM by the GCE network fabric. This is in +addition to the "main" IP address assigned to the VM, which is NAT'ed for +outbound internet access. A linux bridge (called `cbr0`) is configured to exist +on that subnet, and is passed to docker's `--bridge` flag. + +We start Docker with: + +```shell +DOCKER_OPTS="--bridge=cbr0 --iptables=false --ip-masq=false" +``` + +This bridge is created by Kubelet (controlled by the `--configure-cbr0=true` +flag) according to the `Node`'s `spec.podCIDR`. + +Docker will now allocate IPs from the `cbr-cidr` block. Containers can reach +each other and `Nodes` over the `cbr0` bridge. Those IPs are all routable +within the GCE project network. + +GCE itself does not know anything about these IPs, though, so it will not NAT +them for outbound internet traffic. To achieve that we use an iptables rule to +masquerade (aka SNAT - to make it seem as if packets came from the `Node` +itself) traffic that is bound for IPs outside the GCE project network +(10.0.0.0/8). + +```shell +iptables -t nat -A POSTROUTING ! -d 10.0.0.0/8 -o eth0 -j MASQUERADE +``` + +Lastly we enable IP forwarding in the kernel (so the kernel will process +packets for bridged containers): + +```shell +sysctl net.ipv4.ip_forward=1 +``` + +The result of all this is that all `Pods` can reach each other and can egress +traffic to the internet. + +### L2 networks and linux bridging + +If you have a "dumb" L2 network, such as a simple switch in a "bare-metal" +environment, you should be able to do something similar to the above GCE setup. +Note that these instructions have only been tried very casually - it seems to +work, but has not been thoroughly tested. If you use this technique and +perfect the process, please let us know. + +Follow the "With Linux Bridge devices" section of [this very nice +tutorial](http://blog.oddbit.com/2014/08/11/four-ways-to-connect-a-docker/) from +Lars Kellogg-Stedman. + +### Flannel + +[Flannel](https://github.com/coreos/flannel#flannel) is a very simple overlay +network that satisfies the Kubernetes requirements. It installs in minutes and +should get you up and running if the above techniques are not working. Many +people have reported success with Flannel and Kubernetes. + +### OpenVSwitch + +[OpenVSwitch](/{{page.version}}/docs/admin/ovs-networking) is a somewhat more mature but also +complicated way to build an overlay network. This is endorsed by several of the +"Big Shops" for networking. + +### Weave + +[Weave](https://github.com/zettio/weave) is yet another way to build an overlay +network, primarily aiming at Docker integration. + +### Calico + +[Calico](https://github.com/Metaswitch/calico) uses BGP to enable real container +IPs. + +## Other reading + +The early design of the networking model and its rationale, and some future +plans are described in more detail in the [networking design +document](https://github.com/kubernetes/kubernetes/blob/{{page.githubbranch}}/docs/design/networking.md). \ No newline at end of file diff --git a/_includes/docs/docs/admin/node.md b/_includes/docs/docs/admin/node.md new file mode 100644 index 0000000000..de7e896146 --- /dev/null +++ b/_includes/docs/docs/admin/node.md @@ -0,0 +1,210 @@ + +* TOC +{:toc} + +## What is a node? + +`Node` is a worker machine in Kubernetes, previously known as `Minion`. Node +may be a VM or physical machine, depending on the cluster. Each node has +the services necessary to run [Pods](/{{page.version}}/docs/user-guide/pods) and is managed by the master +components. The services on a node include docker, kubelet and network proxy. See +[The Kubernetes Node](https://github.com/kubernetes/kubernetes/blob/{{page.githubbranch}}/docs/design/architecture.md#the-kubernetes-node) section in the +architecture design doc for more details. + +## Node Status + +Node status describes current status of a node. For now, there are the following +pieces of information: + +### Node Addresses + +The usage of these fields varies depending on your cloud provider or bare metal configuration. + +* HostName: Generally not used + +* ExternalIP: Generally the IP address of the node that is externally routable (available from outside the cluster) + +* InternalIP: Generally the IP address of the node that is routable only within the cluster + + +### Node Phase + +Node Phase is the current lifecycle phase of node, one of `Pending`, +`Running` and `Terminated`. + +* Pending: New nodes are created in this state. A node stays in this state until it is configured. + +* Running: Node has been configured and the Kubernetes components are running + +* Terminated: Node has been removed from the cluster. It will not receive any scheduling requests, +and any running pods will be removed from the node. + +Node with `Running` phase is necessary but not sufficient requirement for +scheduling Pods. For a node to be considered a scheduling candidate, it +must have appropriate conditions, see below. + +### Node Condition + +Node Condition describes the conditions of `Running` nodes. Currently the only +node condition is Ready. The Status of this condition can be True, False, or +Unknown. True means the Kubelet is healthy and ready to accept pods. +False means the Kubelet is not healthy and is not accepting pods. Unknown +means the Node Controller, which manages node lifecycle and is responsible for +setting the Status of the condition, has not heard from the +node recently (currently 40 seconds). +Node condition is represented as a json object. For example, +the following conditions mean the node is in sane state: + +```json +"conditions": [ + { + "kind": "Ready", + "status": "True", + }, +] +``` + +If the Status of the Ready condition +is Unknown or False for more than five minutes, then all of the Pods on the node are terminated by the Node Controller. + +### Node Capacity + +Describes the resources available on the node: CPUs, memory and the maximum +number of pods that can be scheduled onto the node. + +### Node Info + +General information about the node, for instance kernel version, Kubernetes version +(kubelet version, kube-proxy version), docker version (if used), OS name. +The information is gathered by Kubelet from the node. + +## Node Management + +Unlike [Pods](/{{page.version}}/docs/user-guide/pods) and [Services](/{{page.version}}/docs/user-guide/services), a Node is not inherently +created by Kubernetes: it is either taken from cloud providers like Google Compute Engine, +or from your pool of physical or virtual machines. What this means is that when +Kubernetes creates a node, it is really just creating an object that represents the node in its internal state. +After creation, Kubernetes will check whether the node is valid or not. +For example, if you try to create a node from the following content: + +```json +{ + "kind": "Node", + "apiVersion": "v1", + "metadata": { + "name": "10.240.79.157", + "labels": { + "name": "my-first-k8s-node" + } + } +} +``` + +Kubernetes will create a Node object internally (the representation), and +validate the node by health checking based on the `metadata.name` field: we +assume `metadata.name` can be resolved. If the node is valid, i.e. all necessary +services are running, it is eligible to run a Pod; otherwise, it will be +ignored for any cluster activity, until it becomes valid. Note that Kubernetes +will keep the object for the invalid node unless it is explicitly deleted by the client, and it will keep +checking to see if it becomes valid. + +Currently, there are three components that interact with the Kubernetes node interface: Node Controller, Kubelet, and kubectl. + +### Node Controller + +Node controller is a component in Kubernetes master which manages Node +objects. It performs two major functions: cluster-wide node synchronization +and single node life-cycle management. + +Node controller has a sync loop that creates/deletes Nodes from Kubernetes +based on all matching VM instances listed from the cloud provider. The sync period +can be controlled via flag `--node-sync-period`. If a new VM instance +gets created, Node Controller creates a representation for it. If an existing +instance gets deleted, Node Controller deletes the representation. Note however, +that Node Controller is unable to provision the node for you, i.e. it won't install +any binary; therefore, to +join a node to a Kubernetes cluster, you as an admin need to make sure proper services are +running in the node. In the future, we plan to automatically provision some node +services. + +### Self-Registration of Nodes + +When kubelet flag `--register-node` is true (the default), the kubelet will attempt to +register itself with the API server. This is the preferred pattern, used by most distros. + +For self-registration, the kubelet is started with the following options: + + - `--api-servers=` tells the kubelet the location of the apiserver. + - `--kubeconfig` tells kubelet where to find credentials to authenticate itself to the apiserver. + - `--cloud-provider=` tells the kubelet how to talk to a cloud provider to read metadata about itself. + - `--register-node` tells the kubelet to create its own node resource. + +Currently, any kubelet is authorized to create/modify any node resource, but in practice it only creates/modifies +its own. (In the future, we plan to limit authorization to only allow a kubelet to modify its own Node resource.) + +#### Manual Node Administration + +A cluster administrator can create and modify Node objects. + +If the administrator wishes to create node objects manually, set kubelet flag +`--register-node=false`. + +The administrator can modify Node resources (regardless of the setting of `--register-node`). +Modifications include setting labels on the Node, and marking it unschedulable. + +Labels on nodes can be used in conjunction with node selectors on pods to control scheduling, +e.g. to constrain a Pod to only be eligible to run on a subset of the nodes. + +Making a node unscheduleable will prevent new pods from being scheduled to that +node, but will not affect any existing pods on the node. This is useful as a +preparatory step before a node reboot, etc. For example, to mark a node +unschedulable, run this command: + +```shell +kubectl replace nodes 10.1.2.3 --patch='{"apiVersion": "v1", "unschedulable": true}' +``` + +Note that pods which are created by a daemonSet controller bypass the Kubernetes scheduler, +and do not respect the unschedulable attribute on a node. The assumption is that daemons belong on +the machine even if it is being drained of applications in preparation for a reboot. + +### Node capacity + +The capacity of the node (number of cpus and amount of memory) is part of the node resource. +Normally, nodes register themselves and report their capacity when creating the node resource. If +you are doing [manual node administration](#manual-node-administration), then you need to set node +capacity when adding a node. + +The Kubernetes scheduler ensures that there are enough resources for all the pods on a node. It +checks that the sum of the limits of containers on the node is no greater than the node capacity. It +includes all containers started by kubelet, but not containers started directly by docker, nor +processes not in containers. + +If you want to explicitly reserve resources for non-Pod processes, you can create a placeholder +pod. Use the following template: + +```yaml +apiVersion: v1 +kind: Pod +metadata: + name: resource-reserver +spec: + containers: + - name: sleep-forever + image: gcr.io/google_containers/pause:0.8.0 + resources: + limits: + cpu: 100m + memory: 100Mi +``` + +Set the `cpu` and `memory` values to the amount of resources you want to reserve. +Place the file in the manifest directory (`--config=DIR` flag of kubelet). Do this +on each kubelet where you want to reserve resources. + + +## API Object + +Node is a top-level resource in the kubernetes REST API. More details about the +API object can be found at: [Node API +object](http://kubernetes.io/v1.1/docs/api-reference/v1/definitions/#_v1_node). diff --git a/_includes/docs/docs/admin/ovs-networking.md b/_includes/docs/docs/admin/ovs-networking.md new file mode 100644 index 0000000000..51e601ec0d --- /dev/null +++ b/_includes/docs/docs/admin/ovs-networking.md @@ -0,0 +1,16 @@ + +This document describes how OpenVSwitch is used to setup networking between pods across nodes. +The tunnel type could be GRE or VxLAN. VxLAN is preferable when large scale isolation needs to be performed within the network. + +![OVS Networking](/images/docs/ovs-networking.png) + +The vagrant setup in Kubernetes does the following: + +The docker bridge is replaced with a brctl generated linux bridge (kbr0) with a 256 address space subnet. Basically, a node gets 10.244.x.0/24 subnet and docker is configured to use that bridge instead of the default docker0 bridge. + +Also, an OVS bridge is created(obr0) and added as a port to the kbr0 bridge. All OVS bridges across all nodes are linked with GRE tunnels. So, each node has an outgoing GRE tunnel to all other nodes. It does not need to be a complete mesh really, just meshier the better. STP (spanning tree) mode is enabled in the bridges to prevent loops. + +Routing rules enable any 10.244.0.0/16 target to become reachable via the OVS bridge connected with the tunnels. + + + diff --git a/_includes/docs/docs/admin/resource-quota.md b/_includes/docs/docs/admin/resource-quota.md new file mode 100755 index 0000000000..47d7d299dd --- /dev/null +++ b/_includes/docs/docs/admin/resource-quota.md @@ -0,0 +1,152 @@ + +When several users or teams share a cluster with a fixed number of nodes, +there is a concern that one team could use more than its fair share of resources. + +Resource quotas are a tool for administrators to address this concern. Resource quotas +work like this: + +- Different teams work in different namespaces. Currently this is voluntary, but + support for making this mandatory via ACLs is planned. +- The administrator creates a Resource Quota for each namespace. +- Users put compute resource requests on their pods. The sum of all resource requests across + all pods in the same namespace must not exceed any hard resource limit in any Resource Quota + document for the namespace. Note that we used to verify Resource Quota by taking the sum of + resource limits of the pods, but this was altered to use resource requests. Backwards compatibility + for those pods previously created is preserved because pods that only specify a resource limit have + their resource requests defaulted to match their defined limits. The user is only charged for the + resources they request in the Resource Quota versus their limits because the request is the minimum + amount of resource guaranteed by the cluster during scheduling. For more information on over commit, + see [compute-resources](/{{page.version}}/docs/user-guide/compute-resources). +- If creating a pod would cause the namespace to exceed any of the limits specified in the + the Resource Quota for that namespace, then the request will fail with HTTP status + code `403 FORBIDDEN`. +- If quota is enabled in a namespace and the user does not specify *requests* on the pod for each + of the resources for which quota is enabled, then the POST of the pod will fail with HTTP + status code `403 FORBIDDEN`. Hint: Use the LimitRange admission controller to force default + values of *limits* (then resource *requests* would be equal to *limits* by default, see + [admission controller](/{{page.version}}/docs/admin/admission-controllers)) before the quota is checked to avoid this problem. + +Examples of policies that could be created using namespaces and quotas are: + +- In a cluster with a capacity of 32 GiB RAM, and 16 cores, let team A use 20 Gib and 10 cores, + let B use 10GiB and 4 cores, and hold 2GiB and 2 cores in reserve for future allocation. +- Limit the "testing" namespace to using 1 core and 1GiB RAM. Let the "production" namespace + use any amount. + +In the case where the total capacity of the cluster is less than the sum of the quotas of the namespaces, +there may be contention for resources. This is handled on a first-come-first-served basis. + +Neither contention nor changes to quota will affect already-running pods. + +## Enabling Resource Quota + +Resource Quota support is enabled by default for many Kubernetes distributions. It is +enabled when the apiserver `--admission-control=` flag has `ResourceQuota` as +one of its arguments. + +Resource Quota is enforced in a particular namespace when there is a +`ResourceQuota` object in that namespace. There should be at most one +`ResourceQuota` object in a namespace. + +## Compute Resource Quota + +The total sum of [compute resources](/{{page.version}}/docs/user-guide/compute-resources) requested by pods +in a namespace can be limited. The following compute resource types are supported: + +| ResourceName | Description | +| ------------ | ----------- | +| cpu | Total cpu requests of containers | +| memory | Total memory requests of containers + +For example, `cpu` quota sums up the `resources.requests.cpu` fields of every +container of every pod in the namespace, and enforces a maximum on that sum. + +## Object Count Quota + +The number of objects of a given type can be restricted. The following types +are supported: + +| ResourceName | Description | +| ------------ | ----------- | +| pods | Total number of pods | +| services | Total number of services | +| replicationcontrollers | Total number of replication controllers | +| resourcequotas | Total number of [resource quotas](/{{page.version}}/docs/admin/admission-controllers/#resourcequota) | +| secrets | Total number of secrets | +| persistentvolumeclaims | Total number of [persistent volume claims](/{{page.version}}/docs/user-guide/persistent-volumes/#persistentvolumeclaims) | + +For example, `pods` quota counts and enforces a maximum on the number of `pods` +created in a single namespace. + +You might want to set a pods quota on a namespace +to avoid the case where a user creates many small pods and exhausts the cluster's +supply of Pod IPs. + +## Viewing and Setting Quotas + +Kubectl supports creating, updating, and viewing quotas: + +```shell +$ kubectl namespace myspace +$ cat < quota.json +{ + "apiVersion": "v1", + "kind": "ResourceQuota", + "metadata": { + "name": "quota", + }, + "spec": { + "hard": { + "memory": "1Gi", + "cpu": "20", + "pods": "10", + "services": "5", + "replicationcontrollers":"20", + "resourcequotas":"1", + }, + } +} +EOF +$ kubectl create -f ./quota.json +$ kubectl get quota +NAME +quota +$ kubectl describe quota quota +Name: quota +Resource Used Hard +-------- ---- ---- +cpu 0m 20 +memory 0 1Gi +pods 5 10 +replicationcontrollers 5 20 +resourcequotas 1 1 +services 3 5 +``` + +## Quota and Cluster Capacity + +Resource Quota objects are independent of the Cluster Capacity. They are +expressed in absolute units. So, if you add nodes to your cluster, this does *not* +automatically give each namespace the ability to consume more resources. + +Sometimes more complex policies may be desired, such as: + + - proportionally divide total cluster resources among several teams. + - allow each tenant to grow resource usage as needed, but have a generous + limit to prevent accidental resource exhaustion. + - detect demand from one namespace, add nodes, and increase quota. + +Such policies could be implemented using ResourceQuota as a building-block, by +writing a 'controller' which watches the quota usage and adjusts the quota +hard limits of each namespace according to other signals. + +Note that resource quota divides up aggregate cluster resources, but it creates no +restrictions around nodes: pods from several namespaces may run on the same node. + +## Example + +See a [detailed example for how to use resource quota](/{{page.version}}/docs/admin/resourcequota/). + +## Read More + +See [ResourceQuota design doc](https://github.com/kubernetes/kubernetes/blob/{{page.githubbranch}}/docs/design/admission_control_resource_quota.md) for more information. \ No newline at end of file diff --git a/_includes/docs/docs/admin/resourcequota/index.md b/_includes/docs/docs/admin/resourcequota/index.md new file mode 100644 index 0000000000..6bc50a09d7 --- /dev/null +++ b/_includes/docs/docs/admin/resourcequota/index.md @@ -0,0 +1,154 @@ + +This example demonstrates how [resource quota](/{{page.version}}/docs/admin/admission-controllers/#resourcequota) and +[limitsranger](/{{page.version}}/docs/admin/admission-controllers/#limitranger) can be applied to a Kubernetes namespace. +See [ResourceQuota design doc](https://github.com/kubernetes/kubernetes/blob/{{page.githubbranch}}/docs/design/admission_control_resource_quota.md) for more information. + +This example assumes you have a functional Kubernetes setup. + +## Step 1: Create a namespace + +This example will work in a custom namespace to demonstrate the concepts involved. + +Let's create a new namespace called quota-example: + +```shell +$ kubectl create -f docs/admin/resourcequota/namespace.yaml +namespace "quota-example" created +$ kubectl get namespaces +NAME LABELS STATUS AGE +default Active 2m +quota-example Active 39s +``` + +## Step 2: Apply a quota to the namespace + +By default, a pod will run with unbounded CPU and memory requests/limits. This means that any pod in the +system will be able to consume as much CPU and memory on the node that executes the pod. + +Users may want to restrict how much of the cluster resources a given namespace may consume +across all of its pods in order to manage cluster usage. To do this, a user applies a quota to +a namespace. A quota lets the user set hard limits on the total amount of node resources (cpu, memory) +and API resources (pods, services, etc.) that a namespace may consume. In term of resources, Kubernetes +checks the total resource *requests*, not resource *limits* of all containers/pods in the namespace. + +Let's create a simple quota in our namespace: + +```shell +$ kubectl create -f docs/admin/resourcequota/quota.yaml --namespace=quota-example +resourcequota "quota" created +``` + +Once your quota is applied to a namespace, the system will restrict any creation of content +in the namespace until the quota usage has been calculated. This should happen quickly. + +You can describe your current quota usage to see what resources are being consumed in your +namespace. + +```shell +$ kubectl describe quota quota --namespace=quota-example +Name: quota +Namespace: quota-example +Resource Used Hard +-------- ---- ---- +cpu 0 20 +memory 0 1Gi +persistentvolumeclaims 0 10 +pods 0 10 +replicationcontrollers 0 20 +resourcequotas 1 1 +secrets 1 10 +services 0 5 +``` + +## Step 3: Applying default resource requests and limits + +Pod authors rarely specify resource requests and limits for their pods. + +Since we applied a quota to our project, let's see what happens when an end-user creates a pod that has unbounded +cpu and memory by creating an nginx container. + +To demonstrate, lets create a replication controller that runs nginx: + +```shell +$ kubectl run nginx --image=nginx --replicas=1 --namespace=quota-example +replicationcontroller "nginx" created +``` + +Now let's look at the pods that were created. + +```shell +$ kubectl get pods --namespace=quota-example +NAME READY STATUS RESTARTS AGE +``` + +What happened? I have no pods! Let's describe the replication controller to get a view of what is happening. + +```shell +kubectl describe rc nginx --namespace=quota-example +Name: nginx +Namespace: quota-example +Image(s): nginx +Selector: run=nginx +Labels: run=nginx +Replicas: 0 current / 1 desired +Pods Status: 0 Running / 0 Waiting / 0 Succeeded / 0 Failed +No volumes. +Events: + FirstSeen LastSeen Count From SubobjectPath Reason Message + 42s 11s 3 {replication-controller } FailedCreate Error creating: Pod "nginx-" is forbidden: Must make a non-zero request for memory since it is tracked by quota. +``` + +The Kubernetes API server is rejecting the replication controllers requests to create a pod because our pods +do not specify any memory usage *request*. + +So let's set some default values for the amount of cpu and memory a pod can consume: + +```shell +$ kubectl create -f docs/admin/resourcequota/limits.yaml --namespace=quota-example +limitrange "limits" created +$ kubectl describe limits limits --namespace=quota-example +Name: limits +Namespace: quota-example +Type Resource Min Max Request Limit Limit/Request +---- -------- --- --- ------- ----- ------------- +Container memory - - 256Mi 512Mi - +Container cpu - - 100m 200m - +``` + +Now any time a pod is created in this namespace, if it has not specified any resource request/limit, the default +amount of cpu and memory per container will be applied, and the request will be used as part of admission control. + +Now that we have applied default resource *request* for our namespace, our replication controller should be able to +create its pods. + +```shell +$ kubectl get pods --namespace=quota-example +NAME READY STATUS RESTARTS AGE +nginx-fca65 1/1 Running 0 1m +``` + +And if we print out our quota usage in the namespace: + +```shell +$ kubectl describe quota quota --namespace=quota-example +Name: quota +Namespace: quota-example +Resource Used Hard +-------- ---- ---- +cpu 100m 20 +memory 256Mi 1Gi +persistentvolumeclaims 0 10 +pods 1 10 +replicationcontrollers 1 20 +resourcequotas 1 1 +secrets 1 10 +services 0 5 +``` + +You can now see the pod that was created is consuming explicit amounts of resources (specified by resource *request*), and the usage is being tracked by the Kubernetes system properly. + +## Summary + +Actions that consume node resources for cpu and memory can be subject to hard quota limits defined by the namespace quota. The resource consumption is measured by resource *request* in pod specification. + +Any action that consumes those resources can be tweaked, or can pick up namespace level defaults to meet your end goal. \ No newline at end of file diff --git a/_includes/docs/docs/admin/resourcequota/limits.yaml b/_includes/docs/docs/admin/resourcequota/limits.yaml new file mode 100755 index 0000000000..84b50b4e2a --- /dev/null +++ b/_includes/docs/docs/admin/resourcequota/limits.yaml @@ -0,0 +1,13 @@ +apiVersion: v1 +kind: LimitRange +metadata: + name: limits +spec: + limits: + - default: + cpu: 200m + memory: 512Mi + defaultRequest: + cpu: 100m + memory: 256Mi + type: Container diff --git a/_includes/docs/docs/admin/resourcequota/namespace.yaml b/_includes/docs/docs/admin/resourcequota/namespace.yaml new file mode 100644 index 0000000000..2da842033b --- /dev/null +++ b/_includes/docs/docs/admin/resourcequota/namespace.yaml @@ -0,0 +1,4 @@ +apiVersion: v1 +kind: Namespace +metadata: + name: quota-example diff --git a/_includes/docs/docs/admin/resourcequota/quota.yaml b/_includes/docs/docs/admin/resourcequota/quota.yaml new file mode 100644 index 0000000000..6885efc6d1 --- /dev/null +++ b/_includes/docs/docs/admin/resourcequota/quota.yaml @@ -0,0 +1,14 @@ +apiVersion: v1 +kind: ResourceQuota +metadata: + name: quota +spec: + hard: + cpu: "20" + memory: 1Gi + persistentvolumeclaims: "10" + pods: "10" + replicationcontrollers: "20" + resourcequotas: "1" + secrets: "10" + services: "5" diff --git a/_includes/docs/docs/admin/salt.md b/_includes/docs/docs/admin/salt.md new file mode 100644 index 0000000000..66c2d46749 --- /dev/null +++ b/_includes/docs/docs/admin/salt.md @@ -0,0 +1,100 @@ + +The Kubernetes cluster can be configured using Salt. + +The Salt scripts are shared across multiple hosting providers, so it's important to understand some background information prior to making a modification to ensure your changes do not break hosting Kubernetes across multiple environments. Depending on where you host your Kubernetes cluster, you may be using different operating systems and different networking configurations. As a result, it's important to understand some background information before making Salt changes in order to minimize introducing failures for other hosting providers. + +## Salt cluster setup + +The **salt-master** service runs on the kubernetes-master [(except on the default GCE setup)](#standalone-salt-configuration-on-gce). + +The **salt-minion** service runs on the kubernetes-master and each kubernetes-node in the cluster. + +Each salt-minion service is configured to interact with the **salt-master** service hosted on the kubernetes-master via the **master.conf** file [(except on GCE)](#standalone-salt-configuration-on-gce). + +```shell +[root@kubernetes-master] $ cat /etc/salt/minion.d/master.conf +master: kubernetes-master +``` + +The salt-master is contacted by each salt-minion and depending upon the machine information presented, the salt-master will provision the machine as either a kubernetes-master or kubernetes-node with all the required capabilities needed to run Kubernetes. + +If you are running the Vagrant based environment, the **salt-api** service is running on the kubernetes-master. It is configured to enable the vagrant user to introspect the salt cluster in order to find out about machines in the Vagrant environment via a REST API. + +## Standalone Salt Configuration on GCE + +On GCE, the master and nodes are all configured as [standalone minions](http://docs.saltstack.com/en/latest/topics/tutorials/standalone_minion). The configuration for each VM is derived from the VM's [instance metadata](https://cloud.google.com/compute/docs/metadata) and then stored in Salt grains (`/etc/salt/minion.d/grains.conf`) and pillars (`/srv/salt-overlay/pillar/cluster-params.sls`) that local Salt uses to enforce state. + +All remaining sections that refer to master/minion setups should be ignored for GCE. One fallout of the GCE setup is that the Salt mine doesn't exist - there is no sharing of configuration amongst nodes. + +## Salt security + +*(Not applicable on default GCE setup.)* + +Security is not enabled on the salt-master, and the salt-master is configured to auto-accept incoming requests from minions. It is not recommended to use this security configuration in production environments without deeper study. (In some environments this isn't as bad as it might sound if the salt master port isn't externally accessible and you trust everyone on your network.) + +```shell +[root@kubernetes-master] $ cat /etc/salt/master.d/auto-accept.conf +open_mode: True +auto_accept: True +``` + +## Salt minion configuration + +Each minion in the salt cluster has an associated configuration that instructs the salt-master how to provision the required resources on the machine. + +An example file is presented below using the Vagrant based environment. + +```shell +[root@kubernetes-master] $ cat /etc/salt/minion.d/grains.conf +grains: + etcd_servers: $MASTER_IP + cloud_provider: vagrant + roles: + - kubernetes-master +``` + +Each hosting environment has a slightly different grains.conf file that is used to build conditional logic where required in the Salt files. + +The following enumerates the set of defined key/value pairs that are supported today. If you add new ones, please make sure to update this list. + +Key | Value +------------- | ------------- +`api_servers` | (Optional) The IP address / host name where a kubelet can get read-only access to kube-apiserver +`cbr-cidr` | (Optional) The minion IP address range used for the docker container bridge. +`cloud` | (Optional) Which IaaS platform is used to host Kubernetes, *gce*, *azure*, *aws*, *vagrant* +`etcd_servers` | (Optional) Comma-delimited list of IP addresses the kube-apiserver and kubelet use to reach etcd. Uses the IP of the first machine in the kubernetes_master role, or 127.0.0.1 on GCE. +`hostnamef` | (Optional) The full host name of the machine, i.e. uname -n +`node_ip` | (Optional) The IP address to use to address this node +`hostname_override` | (Optional) Mapped to the kubelet hostname-override +`network_mode` | (Optional) Networking model to use among nodes: *openvswitch* +`networkInterfaceName` | (Optional) Networking interface to use to bind addresses, default value *eth0* +`publicAddressOverride` | (Optional) The IP address the kube-apiserver should use to bind against for external read-only access +`roles` | (Required) 1. `kubernetes-master` means this machine is the master in the Kubernetes cluster. 2. `kubernetes-pool` means this machine is a kubernetes-node. Depending on the role, the Salt scripts will provision different resources on the machine. + +These keys may be leveraged by the Salt sls files to branch behavior. + +In addition, a cluster may be running a Debian based operating system or Red Hat based operating system (Centos, Fedora, RHEL, etc.). As a result, it's important to sometimes distinguish behavior based on operating system using if branches like the following. + +```liquid +{% raw %} +{% if grains['os_family'] == 'RedHat' %} +// something specific to a RedHat environment (Centos, Fedora, RHEL) where you may use yum, systemd, etc. +{% else %} +// something specific to Debian environment (apt-get, initd) +{% endif %} +{% endraw %} +``` + +## Best Practices + +1. When configuring default arguments for processes, it's best to avoid the use of EnvironmentFiles (Systemd in Red Hat environments) or init.d files (Debian distributions) to hold default values that should be common across operating system environments. This helps keep our Salt template files easy to understand for editors who may not be familiar with the particulars of each distribution. + +## Future enhancements (Networking) + +Per pod IP configuration is provider-specific, so when making networking changes, it's important to sandbox these as all providers may not use the same mechanisms (iptables, openvswitch, etc.) + +We should define a grains.conf key that captures more specifically what network configuration environment is being used to avoid future confusion across providers. + +## Further reading + +The [cluster/saltbase](http://releases.k8s.io/{{page.githubbranch}}/cluster/saltbase/) tree has more details on the current SaltStack configuration. \ No newline at end of file diff --git a/_includes/docs/docs/admin/service-accounts-admin.md b/_includes/docs/docs/admin/service-accounts-admin.md new file mode 100644 index 0000000000..7db5175843 --- /dev/null +++ b/_includes/docs/docs/admin/service-accounts-admin.md @@ -0,0 +1,95 @@ + +*This is a Cluster Administrator guide to service accounts. It assumes knowledge of +the [User Guide to Service Accounts](/{{page.version}}/docs/user-guide/service-accounts).* + +*Support for authorization and user accounts is planned but incomplete. Sometimes +incomplete features are referred to in order to better describe service accounts.* + +## User accounts vs service accounts + +Kubernetes distinguished between the concept of a user account and a service accounts +for a number of reasons: + + - User accounts are for humans. Service accounts are for processes, which + run in pods. + - User accounts are intended to be global. Names must be unique across all + namespaces of a cluster, future user resource will not be namespaced). + Service accounts are namespaced. + - Typically, a cluster's User accounts might be synced from a corporate + database, where new user account creation requires special privileges and + is tied to complex business processes. Service account creation is intended + to be more lightweight, allowing cluster users to create service accounts for + specific tasks (i.e. principle of least privilege). + - Auditing considerations for humans and service accounts may differ. + - A config bundle for a complex system may include definition of various service + accounts for components of that system. Because service accounts can be created + ad-hoc and have namespaced names, such config is portable. + +## Service account automation + +Three separate components cooperate to implement the automation around service accounts: + + - A Service account admission controller + - A Token controller + - A Service account controller + +### Service Account Admission Controller + +The modification of pods is implemented via a plugin +called an [Admission Controller](/{{page.version}}/docs/admin/admission-controllers). It is part of the apiserver. +It acts synchronously to modify pods as they are created or updated. When this plugin is active +(and it is by default on most distributions), then it does the following when a pod is created or modified: + + 1. If the pod does not have a `ServiceAccount` set, it sets the `ServiceAccount` to `default`. + 2. It ensures that the `ServiceAccount` referenced by the pod exists, and otherwise rejects it. + 4. If the pod does not contain any `ImagePullSecrets`, then `ImagePullSecrets` of the +`ServiceAccount` are added to the pod. + 5. It adds a `volume` to the pod which contains a token for API access. + 6. It adds a `volumeSource` to each container of the pod mounted at `/var/run/secrets/kubernetes.io/serviceaccount`. + +### Token Controller + +TokenController runs as part of controller-manager. It acts asynchronously. It: + +- observes serviceAccount creation and creates a corresponding Secret to allow API access. +- observes serviceAccount deletion and deletes all corresponding ServiceAccountToken Secrets +- observes secret addition, and ensures the referenced ServiceAccount exists, and adds a token to the secret if needed +- observes secret deletion and removes a reference from the corresponding ServiceAccount if needed + +#### To create additional API tokens + +A controller loop ensures a secret with an API token exists for each service +account. To create additional API tokens for a service account, create a secret +of type `ServiceAccountToken` with an annotation referencing the service +account, and the controller will update it with a generated token: + +```json +secret.json: +{ + "kind": "Secret", + "apiVersion": "v1", + "metadata": { + "name": "mysecretname", + "annotations": { + "kubernetes.io/service-account.name": "myserviceaccount" + } + }, + "type": "kubernetes.io/service-account-token" +} +``` + +```shell +kubectl create -f ./secret.json +kubectl describe secret mysecretname +``` + +#### To delete/invalidate a service account token + +```shell +kubectl delete secret mysecretname +``` + +### Service Account Controller + +Service Account Controller manages ServiceAccount inside namespaces, and ensures +a ServiceAccount named "default" exists in every active namespace. \ No newline at end of file diff --git a/_includes/docs/docs/admin/static-pods.md b/_includes/docs/docs/admin/static-pods.md new file mode 100644 index 0000000000..eff5c5cb2a --- /dev/null +++ b/_includes/docs/docs/admin/static-pods.md @@ -0,0 +1,121 @@ + + +**Static pods are to be deprecated and can be removed in any future Kubernetes release!** + +*Static pod* are managed directly by kubelet daemon on a specific node, without API server observing it. It does not have associated any replication controller, kubelet daemon itself watches it and restarts it when it crashes. There is no health check though. Static pods are always bound to one kubelet daemon and always run on the same node with it. + +Kubelet automatically creates so-called *mirror pod* on Kubernetes API server for each static pod, so the pods are visible there, but they cannot be controlled from the API server. + +## Static pod creation + +Static pod can be created in two ways: either by using configuration file(s) or by HTTP. + +### Configuration files + +The configuration files are just standard pod definition in json or yaml format in specific directory. Use `kubelet --config=` to start kubelet daemon, which periodically scans the directory and creates/deletes static pods as yaml/json files appear/disappear there. + +For example, this is how to start a simple web server as a static pod: + +1. Choose a node where we want to run the static pod. In this example, it's `my-minion1`. + +```shell +[joe@host ~] $ ssh my-minion1 +``` + +2. Choose a directory, say `/etc/kubelet.d` and place a web server pod definition there, e.g. `/etc/kubernetes.d/static-web.yaml`: + +```shell +[root@my-minion1 ~] $ mkdir /etc/kubernetes.d/ + [root@my-minion1 ~] $ cat </etc/kubernetes.d/static-web.yaml + apiVersion: v1 + kind: Pod + metadata: + name: static-web + labels: + role: myrole + spec: + containers: + - name: web + image: nginx + ports: + - name: web + containerPort: 80 + protocol: tcp + EOF +``` + +2. Configure your kubelet daemon on the node to use this directory by running it with `--config=/etc/kubelet.d/` argument. On Fedora Fedora 21 with Kubernetes 0.17 edit `/etc/kubernetes/kubelet` to include this line: + + ``` +KUBELET_ARGS="--cluster-dns=10.254.0.10 --cluster-domain=kube.local --config=/etc/kubelet.d/" + ``` +Instructions for other distributions or Kubernetes installations may vary. + +3. Restart kubelet. On Fedora 21, this is: + +```shell +[root@my-minion1 ~] $ systemctl restart kubelet +``` + +## Pods created via HTTP + +Kubelet periodically downloads a file specified by `--manifest-url=` argument and interprets it as a json/yaml file with a pod definition. It works the same as `--config=`, i.e. it's reloaded every now and then and changes are applied to running static pods (see below). + +## Behavior of static pods + +When kubelet starts, it automatically starts all pods defined in directory specified in `--config=` or `--manifest-url=` arguments, i.e. our static-web. (It may take some time to pull nginx image, be patient'|): + +```shell +[joe@my-minion1 ~] $ docker ps +CONTAINER ID IMAGE COMMAND CREATED STATUS NAMES +f6d05272b57e nginx:latest "nginx" 8 minutes ago Up 8 minutes k8s_web.6f802af4_static-web-fk-minion1_default_67e24ed9466ba55986d120c867395f3c_378e5f3c +``` + +If we look at our Kubernetes API server (running on host `my-master`), we see that a new mirror-pod was created there too: + +```shell +[joe@host ~] $ ssh my-master +[joe@my-master ~] $ kubectl get pods +POD IP CONTAINER(S) IMAGE(S) HOST LABELS STATUS CREATED MESSAGE +static-web-my-minion1 172.17.0.3 my-minion1/192.168.100.71 role=myrole Running 11 minutes + web nginx Running 11 minutes +``` + +Labels from the static pod are propagated into the mirror-pod and can be used as usual for filtering. + +Notice we cannot delete the pod with the API server (e.g. via [`kubectl`](/{{page.version}}/docs/user-guide/kubectl/kubectl) command), kubelet simply won't remove it. + +```shell +[joe@my-master ~] $ kubectl delete pod static-web-my-minion1 +pods/static-web-my-minion1 +[joe@my-master ~] $ kubectl get pods +POD IP CONTAINER(S) IMAGE(S) HOST ... +static-web-my-minion1 172.17.0.3 my-minion1/192.168.100.71 ... +``` + +Back to our `my-minion1` host, we can try to stop the container manually and see, that kubelet automatically restarts it in a while: + +```shell +[joe@host ~] $ ssh my-minion1 +[joe@my-minion1 ~] $ docker stop f6d05272b57e +[joe@my-minion1 ~] $ sleep 20 +[joe@my-minion1 ~] $ docker ps +CONTAINER ID IMAGE COMMAND CREATED ... +5b920cbaf8b1 nginx:latest "nginx -g 'daemon of 2 seconds ago ... +``` + +## Dynamic addition and removal of static pods + +Running kubelet periodically scans the configured directory (`/etc/kubelet.d` in our example) for changes and adds/removes pods as files appear/disappear in this directory. + +```shell +[joe@my-minion1 ~] $ mv /etc/kubernetes.d/static-web.yaml /tmp +[joe@my-minion1 ~] $ sleep 20 +[joe@my-minion1 ~] $ docker ps +// no nginx container is running +[joe@my-minion1 ~] $ mv /tmp/static-web.yaml /etc/kubernetes.d/ +[joe@my-minion1 ~] $ sleep 20 +[joe@my-minion1 ~] $ docker ps +CONTAINER ID IMAGE COMMAND CREATED ... +e7a62e3427f1 nginx:latest "nginx -g 'daemon of 27 seconds ago +``` \ No newline at end of file diff --git a/_includes/docs/docs/api.md b/_includes/docs/docs/api.md new file mode 100644 index 0000000000..2fe4044bc6 --- /dev/null +++ b/_includes/docs/docs/api.md @@ -0,0 +1,127 @@ + +Primary system and API concepts are documented in the [User guide](/{{page.version}}/docs/user-guide/). + +Overall API conventions are described in the [API conventions doc](/{{page.version}}/docs/devel/api-conventions). + +Complete API details are documented via [Swagger](http://swagger.io/). The Kubernetes apiserver (aka "master") exports an API that can be used to retrieve the [Swagger spec](https://github.com/swagger-api/swagger-spec/tree/master/schemas/v1.2) for the Kubernetes API, by default at `/swaggerapi`, and a UI you can use to browse the API documentation at `/swagger-ui`. We also periodically update a [statically generated UI](http://kubernetes.io/third_party/swagger-ui/). + +Remote access to the API is discussed in the [access doc](/{{page.version}}/docs/admin/accessing-the-api). + +The Kubernetes API also serves as the foundation for the declarative configuration schema for the system. The [Kubectl](/{{page.version}}/docs/user-guide/kubectl/kubectl) command-line tool can be used to create, update, delete, and get API objects. + +Kubernetes also stores its serialized state (currently in [etcd](https://coreos.com/docs/distributed-configuration/getting-started-with-etcd/)) in terms of the API resources. + +Kubernetes itself is decomposed into multiple components, which interact through its API. + +## API changes + +In our experience, any system that is successful needs to grow and change as new use cases emerge or existing ones change. Therefore, we expect the Kubernetes API to continuously change and grow. However, we intend to not break compatibility with existing clients, for an extended period of time. In general, new API resources and new resource fields can be expected to be added frequently. Elimination of resources or fields will require following a deprecation process. The precise deprecation policy for eliminating features is TBD, but once we reach our 1.0 milestone, there will be a specific policy. + +What constitutes a compatible change and how to change the API are detailed by the [API change document](/{{page.version}}/docs/devel/api_changes). + +## API versioning + +To make it easier to eliminate fields or restructure resource representations, Kubernetes supports +multiple API versions, each at a different API path, such as `/api/v1` or +`/apis/extensions/v1beta1`. + +We chose to version at the API level rather than at the resource or field level to ensure that the API presents a clear, consistent view of system resources and behavior, and to enable controlling access to end-of-lifed and/or experimental APIs. + +Note that API versioning and Software versioning are only indirectly related. The [API and release +versioning proposal](https://github.com/kubernetes/kubernetes/blob/{{page.githubbranch}}/docs/design/versioning.md) describes the relationship between API versioning and +software versioning. + + +Different API versions imply different levels of stability and support. The criteria for each level are described +in more detail in the [API Changes documentation](/{{page.version}}/docs/devel/api_changes/#alpha-beta-and-stable-versions). They are summarized here: + +- Alpha level: + - The version names contain `alpha` (e.g. `v1alpha1`). + - May be buggy. Enabling the feature may expose bugs. Disabled by default. + - Support for feature may be dropped at any time without notice. + - The API may change in incompatible ways in a later software release without notice. + - Recommended for use only in short-lived testing clusters, due to increased risk of bugs and lack of long-term support. +- Beta level: + - The version names contain `beta` (e.g. `v2beta3`). + - Code is well tested. Enabling the feature is considered safe. Enabled by default. + - Support for the overall feature will not be dropped, though details may change. + - The schema and/or semantics of objects may change in incompatible ways in a subsequent beta or stable release. When this happens, + we will provide instructions for migrating to the next version. This may require deleting, editing, and re-creating + API objects. The editing process may require some thought. This may require downtime for appplications that rely on the feature. + - Recommended for only non-business-critical uses because of potential for incompatible changes in subsequent releases. If you have + multiple clusters which can be upgraded independently, you may be able to relax this restriction. + - **Please do try our beta features and give feedback on them! Once they exit beta, it may not be practical for us to make more changes.** +- Stable level: + - The version name is `vX` where `X` is an integer. + - Stable versions of features will appear in released software for many subsequent versions. + +## API groups + +To make it easier to extend the Kubernetes API, we are in the process of implementing [*API +groups*](https://github.com/kubernetes/kubernetes/blob/{{page.githubbranch}}/docs/proposals/api-group.md). These are simply different interfaces to read and/or modify the +same underlying resources. The API group is specified in a REST path and in the `apiVersion` field +of a serialized object. + +Currently there are two API groups in use: + +1. the "core" group, which is at REST path `/api/v1` and is not specified as part of the `apiVersion` field, e.g. + `apiVersion: v1`. +1. the "extensions" group, which is at REST path `/apis/extensions/$VERSION`, and which uses + `apiVersion: extensions/$VERSION` (e.g. currently `apiVersion: extensions/v1beta1`). + +In the future we expect that there will be more API groups, all at REST path `/apis/$API_GROUP` and +using `apiVersion: $API_GROUP/$VERSION`. We expect that there will be a way for (third parties to +create their own API groups](https://github.com/kubernetes/kubernetes/blob/{{page.githubbranch}}/docs/design/extending-api.md), and to avoid naming collisions. + +## Enabling resources in the extensions group + +Jobs, Ingress and HorizontalPodAutoscalers are enabled by default. +Other extensions resources can be enabled by setting runtime-config on +apiserver. runtime-config accepts comma separated values. For ex: to enable deployments and disable jobs, set +`--runtime-config=extensions/v1beta1/deployments=true,extensions/v1beta1/jobs=false` + +## v1beta1, v1beta2, and v1beta3 are deprecated; please move to v1 ASAP + +As of June 4, 2015, the Kubernetes v1 API has been enabled by default. The v1beta1 and v1beta2 APIs were deleted on June 1, 2015. v1beta3 is planned to be deleted on July 6, 2015. + +### v1 conversion tips (from v1beta3) + +We're working to convert all documentation and examples to v1. A simple [API conversion tool](/{{page.version}}/docs/admin/cluster-management/#switching-your-config-files-to-a-new-api-version) has been written to simplify the translation process. Use `kubectl create --validate` in order to validate your json or yaml against our Swagger spec. + +Changes to services are the most significant difference between v1beta3 and v1. + +* The `service.spec.portalIP` property is renamed to `service.spec.clusterIP`. +* The `service.spec.createExternalLoadBalancer` property is removed. Specify `service.spec.type: "LoadBalancer"` to create an external load balancer instead. +* The `service.spec.publicIPs` property is deprecated and now called `service.spec.deprecatedPublicIPs`. This property will be removed entirely when v1beta3 is removed. The vast majority of users of this field were using it to expose services on ports on the node. Those users should specify `service.spec.type: "NodePort"` instead. Read [External Services](/{{page.version}}/docs/user-guide/services/#external-services) for more info. If this is not sufficient for your use case, please file an issue or contact @thockin. + +Some other difference between v1beta3 and v1: + +* The `pod.spec.containers[*].privileged` and `pod.spec.containers[*].capabilities` properties are now nested under the `pod.spec.containers[*].securityContext` property. See [Security Contexts](/{{page.version}}/docs/user-guide/security-context). +* The `pod.spec.host` property is renamed to `pod.spec.nodeName`. +* The `endpoints.subsets[*].addresses.IP` property is renamed to `endpoints.subsets[*].addresses.ip`. +* The `pod.status.containerStatuses[*].state.termination` and `pod.status.containerStatuses[*].lastState.termination` properties are renamed to `pod.status.containerStatuses[*].state.terminated` and `pod.status.containerStatuses[*].lastState.terminated` respectively. +* The `pod.status.Condition` property is renamed to `pod.status.conditions`. +* The `status.details.id` property is renamed to `status.details.name`. + +### v1beta3 conversion tips (from v1beta1/2) + +Some important differences between v1beta1/2 and v1beta3: + +* The resource `id` is now called `name`. +* `name`, `labels`, `annotations`, and other metadata are now nested in a map called `metadata` +* `desiredState` is now called `spec`, and `currentState` is now called `status` +* `/minions` has been moved to `/nodes`, and the resource has kind `Node` +* The namespace is required (for all namespaced resources) and has moved from a URL parameter to the path: `/api/v1beta3/namespaces/{namespace}/{resource_collection}/{resource_name}`. If you were not using a namespace before, use `default` here. +* The names of all resource collections are now lower cased - instead of `replicationControllers`, use `replicationcontrollers`. +* To watch for changes to a resource, open an HTTP or Websocket connection to the collection query and provide the `?watch=true` query parameter along with the desired `resourceVersion` parameter to watch from. +* The `labels` query parameter has been renamed to `labelSelector`. +* The `fields` query parameter has been renamed to `fieldSelector`. +* The container `entrypoint` has been renamed to `command`, and `command` has been renamed to `args`. +* Container, volume, and node resources are expressed as nested maps (e.g., `resources{cpu:1}`) rather than as individual fields, and resource values support [scaling suffixes](/{{page.version}}/docs/user-guide/compute-resources/#specifying-resource-quantities) rather than fixed scales (e.g., milli-cores). +* Restart policy is represented simply as a string (e.g., `"Always"`) rather than as a nested map (`always{}`). +* Pull policies changed from `PullAlways`, `PullNever`, and `PullIfNotPresent` to `Always`, `Never`, and `IfNotPresent`. +* The volume `source` is inlined into `volume` rather than nested. +* Host volumes have been changed from `hostDir` to `hostPath` to better reflect that they can be files or directories. + + + diff --git a/_includes/docs/docs/devel/api-conventions.md b/_includes/docs/docs/devel/api-conventions.md new file mode 100644 index 0000000000..52162f06a3 --- /dev/null +++ b/_includes/docs/docs/devel/api-conventions.md @@ -0,0 +1,657 @@ + +Updated: 9/20/2015 + +*This document is oriented at users who want a deeper understanding of the Kubernetes +API structure, and developers wanting to extend the Kubernetes API. An introduction to +using resources with kubectl can be found in [Working with resources](/{{page.version}}/docs/user-guide/working-with-resources).* + +* TOC +{:toc} + +The conventions of the [Kubernetes API](/{{page.version}}/docs/api/) (and related APIs in the ecosystem) are intended to ease client development and ensure that configuration mechanisms can be implemented that work across a diverse set of use cases consistently. + +The general style of the Kubernetes API is RESTful - clients create, update, delete, or retrieve a description of an object via the standard HTTP verbs (POST, PUT, DELETE, and GET) - and those APIs preferentially accept and return JSON. Kubernetes also exposes additional endpoints for non-standard verbs and allows alternative content types. All of the JSON accepted and returned by the server has a schema, identified by the "kind" and "apiVersion" fields. Where relevant HTTP header fields exist, they should mirror the content of JSON fields, but the information should not be represented only in the HTTP header. + +The following terms are defined: + +* **Kind** the name of a particular object schema (e.g. the "Cat" and "Dog" kinds would have different attributes and properties) +* **Resource** a representation of a system entity, sent or retrieved as JSON via HTTP to the server. Resources are exposed via: + * Collections - a list of resources of the same type, which may be queryable + * Elements - an individual resource, addressable via a URL + +Each resource typically accepts and returns data of a single kind. A kind may be accepted or returned by multiple resources that reflect specific use cases. For instance, the kind "Pod" is exposed as a "pods" resource that allows end users to create, update, and delete pods, while a separate "pod status" resource (that acts on "Pod" kind) allows automated processes to update a subset of the fields in that resource. + +Resource collections should be all lowercase and plural, whereas kinds are CamelCase and singular. + + +## Types (Kinds) + +Kinds are grouped into three categories: + +1. **Objects** represent a persistent entity in the system. + + Creating an API object is a record of intent - once created, the system will work to ensure that resource exists. All API objects have common metadata. + + An object may have multiple resources that clients can use to perform specific actions that create, update, delete, or get. + + Examples: `Pod`, `ReplicationController`, `Service`, `Namespace`, `Node`. + +2. **Lists** are collections of **resources** of one (usually) or more (occasionally) kinds. + + Lists have a limited set of common metadata. All lists use the "items" field to contain the array of objects they return. + + Most objects defined in the system should have an endpoint that returns the full set of resources, as well as zero or more endpoints that return subsets of the full list. Some objects may be singletons (the current user, the system defaults) and may not have lists. + + In addition, all lists that return objects with labels should support label filtering (see [docs/user-guide/labels.md](/{{page.version}}/docs/user-guide/labels), and most lists should support filtering by fields. + + Examples: PodLists, ServiceLists, NodeLists + + TODO: Describe field filtering below or in a separate doc. + +3. **Simple** kinds are used for specific actions on objects and for non-persistent entities. + + Given their limited scope, they have the same set of limited common metadata as lists. + + For instance, the "Status" kind is returned when errors occur and is not persisted in the system. + + Many simple resources are "subresources", which are rooted at API paths of specific resources. When resources wish to expose alternative actions or views that are closely coupled to a single resource, they should do so using new sub-resources. Common subresources include: + + * `/binding`: Used to bind a resource representing a user request (e.g., Pod, PersistentVolumeClaim) to a cluster infrastructure resource (e.g., Node, PersistentVolume). + * `/status`: Used to write just the status portion of a resource. For example, the `/pods` endpoint only allows updates to `metadata` and `spec`, since those reflect end-user intent. An automated process should be able to modify status for users to see by sending an updated Pod kind to the server to the "/pods/<name>/status" endpoint - the alternate endpoint allows different rules to be applied to the update, and access to be appropriately restricted. + * `/scale`: Used to read and write the count of a resource in a manner that is independent of the specific resource schema. + + Two additional subresources, `proxy` and `portforward`, provide access to cluster resources as described in [docs/user-guide/accessing-the-cluster.md](/{{page.version}}/docs/user-guide/accessing-the-cluster). + +The standard REST verbs (defined below) MUST return singular JSON objects. Some API endpoints may deviate from the strict REST pattern and return resources that are not singular JSON objects, such as streams of JSON objects or unstructured text log data. + +The term "kind" is reserved for these "top-level" API types. The term "type" should be used for distinguishing sub-categories within objects or subobjects. + +### Resources + +All JSON objects returned by an API MUST have the following fields: + +* kind: a string that identifies the schema this object should have +* apiVersion: a string that identifies the version of the schema the object should have + +These fields are required for proper decoding of the object. They may be populated by the server by default from the specified URL path, but the client likely needs to know the values in order to construct the URL path. + +### Objects + +#### Metadata + +Every object kind MUST have the following metadata in a nested object field called "metadata": + +* namespace: a namespace is a DNS compatible subdomain that objects are subdivided into. The default namespace is 'default'. See [docs/user-guide/namespaces.md](/{{page.version}}/docs/user-guide/namespaces) for more. +* name: a string that uniquely identifies this object within the current namespace (see [docs/user-guide/identifiers.md](/{{page.version}}/docs/user-guide/identifiers)). This value is used in the path when retrieving an individual object. +* uid: a unique in time and space value (typically an RFC 4122 generated identifier, see [docs/user-guide/identifiers.md](/{{page.version}}/docs/user-guide/identifiers)) used to distinguish between objects with the same name that have been deleted and recreated + +Every object SHOULD have the following metadata in a nested object field called "metadata": + +* resourceVersion: a string that identifies the internal version of this object that can be used by clients to determine when objects have changed. This value MUST be treated as opaque by clients and passed unmodified back to the server. Clients should not assume that the resource version has meaning across namespaces, different kinds of resources, or different servers. (see [concurrency control](#concurrency-control-and-consistency), below, for more details) +* generation: a sequence number representing a specific generation of the desired state. Set by the system and monotonically increasing, per-resource. May be compared, such as for RAW and WAW consistency. +* creationTimestamp: a string representing an RFC 3339 date of the date and time an object was created +* deletionTimestamp: a string representing an RFC 3339 date of the date and time after which this resource will be deleted. This field is set by the server when a graceful deletion is requested by the user, and is not directly settable by a client. The resource will be deleted (no longer visible from resource lists, and not reachable by name) after the time in this field. Once set, this value may not be unset or be set further into the future, although it may be shortened or the resource may be deleted prior to this time. +* labels: a map of string keys and values that can be used to organize and categorize objects (see [docs/user-guide/labels.md](/{{page.version}}/docs/user-guide/labels)) +* annotations: a map of string keys and values that can be used by external tooling to store and retrieve arbitrary metadata about this object (see [docs/user-guide/annotations.md](/{{page.version}}/docs/user-guide/annotations)) + +Labels are intended for organizational purposes by end users (select the pods that match this label query). Annotations enable third-party automation and tooling to decorate objects with additional metadata for their own use. + +#### Spec and Status + +By convention, the Kubernetes API makes a distinction between the specification of the desired state of an object (a nested object field called "spec") and the status of the object at the current time (a nested object field called "status"). The specification is a complete description of the desired state, including configuration settings provided by the user, [default values](#defaulting) expanded by the system, and properties initialized or otherwise changed after creation by other ecosystem components (e.g., schedulers, auto-scalers), and is persisted in stable storage with the API object. If the specification is deleted, the object will be purged from the system. The status summarizes the current state of the object in the system, and is usually persisted with the object by an automated processes but may be generated on the fly. At some cost and perhaps some temporary degradation in behavior, the status could be reconstructed by observation if it were lost. + +When a new version of an object is POSTed or PUT, the "spec" is updated and available immediately. Over time the system will work to bring the "status" into line with the "spec". The system will drive toward the most recent "spec" regardless of previous versions of that stanza. In other words, if a value is changed from 2 to 5 in one PUT and then back down to 3 in another PUT the system is not required to 'touch base' at 5 before changing the "status" to 3. In other words, the system's behavior is *level-based* rather than *edge-based*. This enables robust behavior in the presence of missed intermediate state changes. + +The Kubernetes API also serves as the foundation for the declarative configuration schema for the system. In order to facilitate level-based operation and expression of declarative configuration, fields in the specification should have declarative rather than imperative names and semantics -- they represent the desired state, not actions intended to yield the desired state. + +The PUT and POST verbs on objects will ignore the "status" values. A `/status` subresource is provided to enable system components to update statuses of resources they manage. + +Otherwise, PUT expects the whole object to be specified. Therefore, if a field is omitted it is assumed that the client wants to clear that field's value. The PUT verb does not accept partial updates. Modification of just part of an object may be achieved by GETting the resource, modifying part of the spec, labels, or annotations, and then PUTting it back. See [concurrency control](#concurrency-control-and-consistency), below, regarding read-modify-write consistency when using this pattern. Some objects may expose alternative resource representations that allow mutation of the status, or performing custom actions on the object. + +All objects that represent a physical resource whose state may vary from the user's desired intent SHOULD have a "spec" and a "status". Objects whose state cannot vary from the user's desired intent MAY have only "spec", and MAY rename "spec" to a more appropriate name. + +Objects that contain both spec and status should not contain additional top-level fields other than the standard metadata fields. + +##### Typical status properties + +**Conditions** represent the latest available observations of an object's current state. Objects may report multiple conditions, and new types of conditions may be added in the future. Therefore, conditions are represented using a list/slice, where all have similar structure. + +The `FooCondition` type for some resource type `Foo` may include a subset of the following fields, but must contain at least `type` and `status` fields: + +```go +Type FooConditionType `json:"type" description:"type of Foo condition"` + Status ConditionStatus `json:"status" description:"status of the condition, one of True, False, Unknown"` + LastHeartbeatTime unversioned.Time `json:"lastHeartbeatTime,omitempty" description:"last time we got an update on a given condition"` + LastTransitionTime unversioned.Time `json:"lastTransitionTime,omitempty" description:"last time the condition transit from one status to another"` + Reason string `json:"reason,omitempty" description:"one-word CamelCase reason for the condition's last transition"` + Message string `json:"message,omitempty" description:"human-readable message indicating details about last transition"` +``` + +Additional fields may be added in the future. + +Conditions should be added to explicitly convey properties that users and components care about rather than requiring those properties to be inferred from other observations. + +Condition status values may be `True`, `False`, or `Unknown`. The absence of a condition should be interpreted the same as `Unknown`. + +In general, condition values may change back and forth, but some condition transitions may be monotonic, depending on the resource and condition type. However, conditions are observations and not, themselves, state machines, nor do we define comprehensive state machines for objects, nor behaviors associated with state transitions. The system is level-based rather than edge-triggered, and should assume an Open World. + +A typical oscillating condition type is `Ready`, which indicates the object was believed to be fully operational at the time it was last probed. A possible monotonic condition could be `Succeeded`. A `False` status for `Succeeded` would imply failure. An object that was still active would not have a `Succeeded` condition, or its status would be `Unknown`. + +Some resources in the v1 API contain fields called **`phase`**, and associated `message`, `reason`, and other status fields. The pattern of using `phase` is deprecated. Newer API types should use conditions instead. Phase was essentially a state-machine enumeration field, that contradicted [system-design principles](https://github.com/kubernetes/kubernetes/blob/{{page.githubbranch}}/docs/design/principles.md#control-logic) and hampered evolution, since [adding new enum values breaks backward compatibility](/{{page.version}}/docs/devel/api_changes). Rather than encouraging clients to infer implicit properties from phases, we intend to explicitly expose the conditions that clients need to monitor. Conditions also have the benefit that it is possible to create some conditions with uniform meaning across all resource types, while still exposing others that are unique to specific resource types. See [#7856](http://issues.k8s.io/7856) for more details and discussion. + +In condition types, and everywhere else they appear in the API, **`Reason`** is intended to be a one-word, CamelCase representation of the category of cause of the current status, and **`Message`** is intended to be a human-readable phrase or sentence, which may contain specific details of the individual occurrence. `Reason` is intended to be used in concise output, such as one-line `kubectl get` output, and in summarizing occurrences of causes, whereas `Message` is intended to be presented to users in detailed status explanations, such as `kubectl describe` output. + +Historical information status (e.g., last transition time, failure counts) is only provided with reasonable effort, and is not guaranteed to not be lost. + +Status information that may be large (especially proportional in size to collections of other resources, such as lists of references to other objects -- see below) and/or rapidly changing, such as [resource usage](https://github.com/kubernetes/kubernetes/blob/{{page.githubbranch}}/docs/design/resources.md#usage-data), should be put into separate objects, with possibly a reference from the original object. This helps to ensure that GETs and watch remain reasonably efficient for the majority of clients, which may not need that data. + +Some resources report the `observedGeneration`, which is the `generation` most recently observed by the component responsible for acting upon changes to the desired state of the resource. This can be used, for instance, to ensure that the reported status reflects the most recent desired status. + +#### References to related objects + +References to loosely coupled sets of objects, such as [pods](/{{page.version}}/docs/user-guide/pods) overseen by a [replication controller](/{{page.version}}/docs/user-guide/replication-controller), are usually best referred to using a [label selector](/{{page.version}}/docs/user-guide/labels). In order to ensure that GETs of individual objects remain bounded in time and space, these sets may be queried via separate API queries, but will not be expanded in the referring object's status. + +References to specific objects, especially specific resource versions and/or specific fields of those objects, are specified using the `ObjectReference` type (or other types representing strict subsets of it). Unlike partial URLs, the ObjectReference type facilitates flexible defaulting of fields from the referring object or other contextual information. + +References in the status of the referee to the referrer may be permitted, when the references are one-to-one and do not need to be frequently updated, particularly in an edge-based manner. + +#### Lists of named subobjects preferred over maps + +Discussed in [#2004](http://issue.k8s.io/2004) and elsewhere. There are no maps of subobjects in any API objects. Instead, the convention is to use a list of subobjects containing name fields. + +For example: + +```yaml +ports: + - name: www + containerPort: 80 +``` + +vs. + +```yaml +ports: + www: + containerPort: 80 +``` + +This rule maintains the invariant that all JSON/YAML keys are fields in API objects. The only exceptions are pure maps in the API (currently, labels, selectors, annotations, data), as opposed to sets of subobjects. + +#### Constants + +Some fields will have a list of allowed values (enumerations). These values will be strings, and they will be in CamelCase, with an initial uppercase letter. Examples: "ClusterFirst", "Pending", "ClientIP". + +### Lists and Simple kinds + +Every list or simple kind SHOULD have the following metadata in a nested object field called "metadata": + +* resourceVersion: a string that identifies the common version of the objects returned by in a list. This value MUST be treated as opaque by clients and passed unmodified back to the server. A resource version is only valid within a single namespace on a single kind of resource. + +Every simple kind returned by the server, and any simple kind sent to the server that must support idempotency or optimistic concurrency should return this value.Since simple resources are often used as input alternate actions that modify objects, the resource version of the simple resource should correspond to the resource version of the object. + + +## Differing Representations + +An API may represent a single entity in different ways for different clients, or transform an object after certain transitions in the system occur. In these cases, one request object may have two representations available as different resources, or different kinds. + +An example is a Service, which represents the intent of the user to group a set of pods with common behavior on common ports. When Kubernetes detects a pod matches the service selector, the IP address and port of the pod are added to an Endpoints resource for that Service. The Endpoints resource exists only if the Service exists, but exposes only the IPs and ports of the selected pods. The full service is represented by two distinct resources - under the original Service resource the user created, as well as in the Endpoints resource. + +As another example, a "pod status" resource may accept a PUT with the "pod" kind, with different rules about what fields may be changed. + +Future versions of Kubernetes may allow alternative encodings of objects beyond JSON. + + +## Verbs on Resources + +API resources should use the traditional REST pattern: + +* GET /<resourceNamePlural> - Retrieve a list of type <resourceName>, e.g. GET /pods returns a list of Pods. +* POST /<resourceNamePlural> - Create a new resource from the JSON object provided by the client. +* GET /<resourceNamePlural>/<name> - Retrieves a single resource with the given name, e.g. GET /pods/first returns a Pod named 'first'. Should be constant time, and the resource should be bounded in size. +* DELETE /<resourceNamePlural>/<name> - Delete the single resource with the given name. DeleteOptions may specify gracePeriodSeconds, the optional duration in seconds before the object should be deleted. Individual kinds may declare fields which provide a default grace period, and different kinds may have differing kind-wide default grace periods. A user provided grace period overrides a default grace period, including the zero grace period ("now"). +* PUT /<resourceNamePlural>/<name> - Update or create the resource with the given name with the JSON object provided by the client. +* PATCH /<resourceNamePlural>/<name> - Selectively modify the specified fields of the resource. See more information [below](#patch). +* GET /<resourceNamePlural>&watch=true - Receive a stream of JSON objects corresponding to changes made to any resource of the given kind over time. + +### PATCH operations + +The API supports three different PATCH operations, determined by their corresponding Content-Type header: + +* JSON Patch, `Content-Type: application/json-patch+json` + * As defined in [RFC6902](https://tools.ietf.org/html/rfc6902), a JSON Patch is a sequence of operations that are executed on the resource, e.g. `{"op": "add", "path": "/a/b/c", "value": [ "foo", "bar" ]}`. For more details on how to use JSON Patch, see the RFC. +* Merge Patch, `Content-Type: application/merge-patch+json` + * As defined in [RFC7386](https://tools.ietf.org/html/rfc7386), a Merge Patch is essentially a partial representation of the resource. The submitted JSON is "merged" with the current resource to create a new one, then the new one is saved. For more details on how to use Merge Patch, see the RFC. +* Strategic Merge Patch, `Content-Type: application/strategic-merge-patch+json` + * Strategic Merge Patch is a custom implementation of Merge Patch. For a detailed explanation of how it works and why it needed to be introduced, see below. + +#### Strategic Merge Patch + +In the standard JSON merge patch, JSON objects are always merged but lists are always replaced. Often that isn't what we want. Let's say we start with the following Pod: + +```yaml +spec: + containers: + - name: nginx + image: nginx-1.0 +``` + +...and we POST that to the server (as JSON). Then let's say we want to *add* a container to this Pod. + +```yaml +PATCH /api/v1/namespaces/default/pods/pod-name +spec: + containers: + - name: log-tailer + image: log-tailer-1.0 +``` + +If we were to use standard Merge Patch, the entire container list would be replaced with the single log-tailer container. However, our intent is for the container lists to merge together based on the `name` field. + +To solve this problem, Strategic Merge Patch uses metadata attached to the API objects to determine what lists should be merged and which ones should not. Currently the metadata is available as struct tags on the API objects themselves, but will become available to clients as Swagger annotations in the future. In the above example, the `patchStrategy` metadata for the `containers` field would be `merge` and the `patchMergeKey` would be `name`. + +Note: If the patch results in merging two lists of scalars, the scalars are first deduplicated and then merged. + +Strategic Merge Patch also supports special operations as listed below. + +### List Operations + +To override the container list to be strictly replaced, regardless of the default: + +```yaml +containers: + - name: nginx + image: nginx-1.0 + - $patch: replace # any further $patch operations nested in this list will be ignored +``` + +To delete an element of a list that should be merged: + +```yaml +containers: + - name: nginx + image: nginx-1.0 + - $patch: delete + name: log-tailer # merge key and value goes here +``` + +### Map Operations + +To indicate that a map should not be merged and instead should be taken literally: + +```yaml +$patch: replace # recursive and applies to all fields of the map it's in +containers: +- name: nginx + image: nginx-1.0 +``` + +To delete a field of a map: + +```yaml +name: nginx +image: nginx-1.0 +labels: + live: null # set the value of the map key to null +``` + +## Idempotency + +All compatible Kubernetes APIs MUST support "name idempotency" and respond with an HTTP status code 409 when a request is made to POST an object that has the same name as an existing object in the system. See [docs/user-guide/identifiers.md](/{{page.version}}/docs/user-guide/identifiers) for details. + +Names generated by the system may be requested using `metadata.generateName`. GenerateName indicates that the name should be made unique by the server prior to persisting it. A non-empty value for the field indicates the name will be made unique (and the name returned to the client will be different than the name passed). The value of this field will be combined with a unique suffix on the server if the Name field has not been provided. The provided value must be valid within the rules for Name, and may be truncated by the length of the suffix required to make the value unique on the server. If this field is specified, and Name is not present, the server will NOT return a 409 if the generated name exists - instead, it will either return 201 Created or 504 with Reason `ServerTimeout` indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header). + +## Defaulting + +Default resource values are API version-specific, and they are applied during +the conversion from API-versioned declarative configuration to internal objects +representing the desired state (`Spec`) of the resource. Subsequent GETs of the +resource will include the default values explicitly. + +Incorporating the default values into the `Spec` ensures that `Spec` depicts the +full desired state so that it is easier for the system to determine how to +achieve the state, and for the user to know what to anticipate. + +API version-specific default values are set by the API server. + +## Late Initialization + +Late initialization is when resource fields are set by a system controller +after an object is created/updated. + +For example, the scheduler sets the `pod.spec.nodeName` field after the pod is created. + +Late-initializers should only make the following types of modifications: + + - Setting previously unset fields + - Adding keys to maps + - Adding values to arrays which have mergeable semantics (`patchStrategy:"merge"` attribute in + the type definition). + +These conventions: + + 1. allow a user (with sufficient privilege) to override any system-default behaviors by setting + the fields that would otherwise have been defaulted. + 1. enables updates from users to be merged with changes made during late initialization, using + strategic merge patch, as opposed to clobbering the change. + 1. allow the component which does the late-initialization to use strategic merge patch, which + facilitates composition and concurrency of such components. + +Although the apiserver Admission Control stage acts prior to object creation, +Admission Control plugins should follow the Late Initialization conventions +too, to allow their implementation to be later moved to a 'controller', or to client libraries. + +## Concurrency Control and Consistency + +Kubernetes leverages the concept of *resource versions* to achieve optimistic concurrency. All Kubernetes resources have a "resourceVersion" field as part of their metadata. This resourceVersion is a string that identifies the internal version of an object that can be used by clients to determine when objects have changed. When a record is about to be updated, it's version is checked against a pre-saved value, and if it doesn't match, the update fails with a StatusConflict (HTTP status code 409). + +The resourceVersion is changed by the server every time an object is modified. If resourceVersion is included with the PUT operation the system will verify that there have not been other successful mutations to the resource during a read/modify/write cycle, by verifying that the current value of resourceVersion matches the specified value. + +The resourceVersion is currently backed by [etcd's modifiedIndex](https://coreos.com/docs/distributed-configuration/etcd-api/). However, it's important to note that the application should *not* rely on the implementation details of the versioning system maintained by Kubernetes. We may change the implementation of resourceVersion in the future, such as to change it to a timestamp or per-object counter. + +The only way for a client to know the expected value of resourceVersion is to have received it from the server in response to a prior operation, typically a GET. This value MUST be treated as opaque by clients and passed unmodified back to the server. Clients should not assume that the resource version has meaning across namespaces, different kinds of resources, or different servers. Currently, the value of resourceVersion is set to match etcd's sequencer. You could think of it as a logical clock the API server can use to order requests. However, we expect the implementation of resourceVersion to change in the future, such as in the case we shard the state by kind and/or namespace, or port to another storage system. + +In the case of a conflict, the correct client action at this point is to GET the resource again, apply the changes afresh, and try submitting again. This mechanism can be used to prevent races like the following: + +```shell +Client #1 Client #2 +GET Foo GET Foo +Set Foo.Bar = "one" Set Foo.Baz = "two" +PUT Foo PUT Foo +``` + +When these sequences occur in parallel, either the change to Foo.Bar or the change to Foo.Baz can be lost. + +On the other hand, when specifying the resourceVersion, one of the PUTs will fail, since whichever write succeeds changes the resourceVersion for Foo. + +resourceVersion may be used as a precondition for other operations (e.g., GET, DELETE) in the future, such as for read-after-write consistency in the presence of caching. + +"Watch" operations specify resourceVersion using a query parameter. It is used to specify the point at which to begin watching the specified resources. This may be used to ensure that no mutations are missed between a GET of a resource (or list of resources) and a subsequent Watch, even if the current version of the resource is more recent. This is currently the main reason that list operations (GET on a collection) return resourceVersion. + + +## Serialization Format + +APIs may return alternative representations of any resource in response to an Accept header or under alternative endpoints, but the default serialization for input and output of API responses MUST be JSON. + +All dates should be serialized as RFC3339 strings. + +## Units + +Units must either be explicit in the field name (e.g., `timeoutSeconds`), or must be specified as part of the value (e.g., `resource.Quantity`). Which approach is preferred is TBD, though currently we use the `fooSeconds` convention for durations. + + +## Selecting Fields + +Some APIs may need to identify which field in a JSON object is invalid, or to reference a value to extract from a separate resource. The current recommendation is to use standard JavaScript syntax for accessing that field, assuming the JSON object was transformed into a JavaScript object, without the leading dot, such as `metadata.name`. + +Examples: + +* Find the field "current" in the object "state" in the second item in the array "fields": `fields[1].state.current` + +## Object references + +Object references should either be called `fooName` if referring to an object of kind `Foo` by just the name (within the current namespace, if a namespaced resource), or should be called `fooRef`, and should contain a subset of the fields of the `ObjectReference` type. + + +TODO: Plugins, extensions, nested kinds, headers + + +## HTTP Status codes + +The server will respond with HTTP status codes that match the HTTP spec. See the section below for a breakdown of the types of status codes the server will send. + +The following HTTP status codes may be returned by the API. + +#### Success codes + +* `200 StatusOK` + * Indicates that the request completed successfully. +* `201 StatusCreated` + * Indicates that the request to create kind completed successfully. +* `204 StatusNoContent` + * Indicates that the request completed successfully, and the response contains no body. + * Returned in response to HTTP OPTIONS requests. + +#### Error codes + +* `307 StatusTemporaryRedirect` + * Indicates that the address for the requested resource has changed. + * Suggested client recovery behavior + * Follow the redirect. +* `400 StatusBadRequest` + * Indicates the requested is invalid. + * Suggested client recovery behavior: + * Do not retry. Fix the request. +* `401 StatusUnauthorized` + * Indicates that the server can be reached and understood the request, but refuses to take any further action, because the client must provide authorization. If the client has provided authorization, the server is indicating the provided authorization is unsuitable or invalid. + * Suggested client recovery behavior + * If the user has not supplied authorization information, prompt them for the appropriate credentials + * If the user has supplied authorization information, inform them their credentials were rejected and optionally prompt them again. +* `403 StatusForbidden` + * Indicates that the server can be reached and understood the request, but refuses to take any further action, because it is configured to deny access for some reason to the requested resource by the client. + * Suggested client recovery behavior + * Do not retry. Fix the request. +* `404 StatusNotFound` + * Indicates that the requested resource does not exist. + * Suggested client recovery behavior + * Do not retry. Fix the request. +* `405 StatusMethodNotAllowed` + * Indicates that the action the client attempted to perform on the resource was not supported by the code. + * Suggested client recovery behavior + * Do not retry. Fix the request. +* `409 StatusConflict` + * Indicates that either the resource the client attempted to create already exists or the requested update operation cannot be completed due to a conflict. + * Suggested client recovery behavior + * * If creating a new resource + * * Either change the identifier and try again, or GET and compare the fields in the pre-existing object and issue a PUT/update to modify the existing object. + * * If updating an existing resource: + * See `Conflict` from the `status` response section below on how to retrieve more information about the nature of the conflict. + * GET and compare the fields in the pre-existing object, merge changes (if still valid according to preconditions), and retry with the updated request (including `ResourceVersion`). +* `422 StatusUnprocessableEntity` + * Indicates that the requested create or update operation cannot be completed due to invalid data provided as part of the request. + * Suggested client recovery behavior + * Do not retry. Fix the request. +* `429 StatusTooManyRequests` + * Indicates that the either the client rate limit has been exceeded or the server has received more requests then it can process. + * Suggested client recovery behavior: + * Read the `Retry-After` HTTP header from the response, and wait at least that long before retrying. +* `500 StatusInternalServerError` + * Indicates that the server can be reached and understood the request, but either an unexpected internal error occurred and the outcome of the call is unknown, or the server cannot complete the action in a reasonable time (this maybe due to temporary server load or a transient communication issue with another server). + * Suggested client recovery behavior: + * Retry with exponential backoff. +* `503 StatusServiceUnavailable` + * Indicates that required service is unavailable. + * Suggested client recovery behavior: + * Retry with exponential backoff. +* `504 StatusServerTimeout` + * Indicates that the request could not be completed within the given time. Clients can get this response ONLY when they specified a timeout param in the request. + * Suggested client recovery behavior: + * Increase the value of the timeout param and retry with exponential backoff + +## Response Status Kind + +Kubernetes will always return the `Status` kind from any API endpoint when an error occurs. +Clients SHOULD handle these types of objects when appropriate. + +A `Status` kind will be returned by the API in two cases: + + 1. When an operation is not successful (i.e. when the server would return a non 2xx HTTP status code). + 2. When a HTTP `DELETE` call is successful. + +The status object is encoded as JSON and provided as the body of the response. The status object contains fields for humans and machine consumers of the API to get more detailed information for the cause of the failure. The information in the status object supplements, but does not override, the HTTP status code's meaning. When fields in the status object have the same meaning as generally defined HTTP headers and that header is returned with the response, the header should be considered as having higher priority. + +**Example:** + +```shell +$ curl -v -k -H "Authorization: Bearer WhCDvq4VPpYhrcfmF6ei7V9qlbqTubUc" https://10.240.122.184:443/api/v1/namespaces/default/pods/grafana + +> GET /api/v1/namespaces/default/pods/grafana HTTP/1.1 +> User-Agent: curl/7.26.0 +> Host: 10.240.122.184 +> Accept: */* +> Authorization: Bearer WhCDvq4VPpYhrcfmF6ei7V9qlbqTubUc +> + +< HTTP/1.1 404 Not Found +< Content-Type: application/json +< Date: Wed, 20 May 2015 18:10:42 GMT +< Content-Length: 232 +< +{ + "kind": "Status", + "apiVersion": "v1", + "metadata": {}, + "status": "Failure", + "message": "pods \"grafana\" not found", + "reason": "NotFound", + "details": { + "name": "grafana", + "kind": "pods" + }, + "code": 404 +} +``` + +`status` field contains one of two possible values: +* `Success` +* `Failure` + +`message` may contain human-readable description of the error + +`reason` may contain a machine-readable, one-word, CamelCase description of why this operation is in the `Failure` status. If this value is empty there is no information available. The `reason` clarifies an HTTP status code but does not override it. + +`details` may contain extended data associated with the reason. Each reason may define its own extended details. This field is optional and the data returned is not guaranteed to conform to any schema except that defined by the reason type. + +Possible values for the `reason` and `details` fields: + +* `BadRequest` + * Indicates that the request itself was invalid, because the request doesn't make any sense, for example deleting a read-only object. + * This is different than `status reason` `Invalid` above which indicates that the API call could possibly succeed, but the data was invalid. + * API calls that return BadRequest can never succeed. + * Http status code: `400 StatusBadRequest` +* `Unauthorized` + * Indicates that the server can be reached and understood the request, but refuses to take any further action without the client providing appropriate authorization. If the client has provided authorization, this error indicates the provided credentials are insufficient or invalid. + * Details (optional): + * `kind string` + * The kind attribute of the unauthorized resource (on some operations may differ from the requested resource). + * `name string` + * The identifier of the unauthorized resource. + * HTTP status code: `401 StatusUnauthorized` +* `Forbidden` + * Indicates that the server can be reached and understood the request, but refuses to take any further action, because it is configured to deny access for some reason to the requested resource by the client. + * Details (optional): + * `kind string` + * The kind attribute of the forbidden resource (on some operations may differ from the requested resource). + * `name string` + * The identifier of the forbidden resource. + * HTTP status code: `403 StatusForbidden` +* `NotFound` + * Indicates that one or more resources required for this operation could not be found. + * Details (optional): + * `kind string` + * The kind attribute of the missing resource (on some operations may differ from the requested resource). + * `name string` + * The identifier of the missing resource. + * HTTP status code: `404 StatusNotFound` +* `AlreadyExists` + * Indicates that the resource you are creating already exists. + * Details (optional): + * `kind string` + * The kind attribute of the conflicting resource. + * `name string` + * The identifier of the conflicting resource. + * HTTP status code: `409 StatusConflict` +* `Conflict` + * Indicates that the requested update operation cannot be completed due to a conflict. The client may need to alter the request. Each resource may define custom details that indicate the nature of the conflict. + * HTTP status code: `409 StatusConflict` +* `Invalid` + * Indicates that the requested create or update operation cannot be completed due to invalid data provided as part of the request. + * Details (optional): + * `kind string` + * the kind attribute of the invalid resource + * `name string` + * the identifier of the invalid resource + * `causes` + * One or more `StatusCause` entries indicating the data in the provided resource that was invalid. The `reason`, `message`, and `field` attributes will be set. + * HTTP status code: `422 StatusUnprocessableEntity` +* `Timeout` + * Indicates that the request could not be completed within the given time. Clients may receive this response if the server has decided to rate limit the client, or if the server is overloaded and cannot process the request at this time. + * Http status code: `429 TooManyRequests` + * The server should set the `Retry-After` HTTP header and return `retryAfterSeconds` in the details field of the object. A value of `0` is the default. +* `ServerTimeout` + * Indicates that the server can be reached and understood the request, but cannot complete the action in a reasonable time. This maybe due to temporary server load or a transient communication issue with another server. + * Details (optional): + * `kind string` + * The kind attribute of the resource being acted on. + * `name string` + * The operation that is being attempted. + * The server should set the `Retry-After` HTTP header and return `retryAfterSeconds` in the details field of the object. A value of `0` is the default. + * Http status code: `504 StatusServerTimeout` +* `MethodNotAllowed` + * Indicates that the action the client attempted to perform on the resource was not supported by the code. + * For instance, attempting to delete a resource that can only be created. + * API calls that return MethodNotAllowed can never succeed. + * Http status code: `405 StatusMethodNotAllowed` +* `InternalError` + * Indicates that an internal error occurred, it is unexpected and the outcome of the call is unknown. + * Details (optional): + * `causes` + * The original error. + * Http status code: `500 StatusInternalServerError` + +`code` may contain the suggested HTTP return code for this status. + + +## Events + +Events are complementary to status information, since they can provide some historical information about status and occurrences in addition to current or previous status. Generate events for situations users or administrators should be alerted about. + +Choose a unique, specific, short, CamelCase reason for each event category. For example, `FreeDiskSpaceInvalid` is a good event reason because it is likely to refer to just one situation, but `Started` is not a good reason because it doesn't sufficiently indicate what started, even when combined with other event fields. + +`Error creating foo` or `Error creating foo %s` would be appropriate for an event message, with the latter being preferable, since it is more informational. + +Accumulate repeated events in the client, especially for frequent events, to reduce data volume, load on the system, and noise exposed to users. + +## Naming conventions + +* Go field names must be CamelCase. JSON field names must be camelCase. Other than capitalization of the initial letter, the two should almost always match. No underscores nor dashes in either. +* Field and resource names should be declarative, not imperative (DoSomething, SomethingDoer, DoneBy, DoneAt). +* `Minion` has been deprecated in favor of `Node`. Use `Node` where referring to the node resource in the context of the cluster. Use `Host` where referring to properties of the individual physical/virtual system, such as `hostname`, `hostPath`, `hostNetwork`, etc. +* `FooController` is a deprecated kind naming convention. Name the kind after the thing being controlled instead (e.g., `Job` rather than `JobController`). +* The name of a field that specifies the time at which `something` occurs should be called `somethingTime`. Do not use `stamp` (e.g., `creationTimestamp`). +* We use the `fooSeconds` convention for durations, as discussed in the [units subsection](#units). + * `fooPeriodSeconds` is preferred for periodic intervals and other waiting periods (e.g., over `fooIntervalSeconds`). + * `fooTimeoutSeconds` is preferred for inactivity/unresponsiveness deadlines. + * `fooDeadlineSeconds` is preferred for activity completion deadlines. +* Do not use abbreviations in the API, except where they are extremely commonly used, such as "id", "args", or "stdin". +* Acronyms should similarly only be used when extremely commonly known. All letters in the acronym should have the same case, using the appropriate case for the situation. For example, at the beginning of a field name, the acronym should be all lowercase, such as "httpGet". Where used as a constant, all letters should be uppercase, such as "TCP" or "UDP". +* The name of a field referring to another resource of kind `Foo` by name should be called `fooName`. The name of a field referring to another resource of kind `Foo` by ObjectReference (or subset thereof) should be called `fooRef`. +* More generally, include the units and/or type in the field name if they could be ambiguous and they are not specified by the value or value type. + +## Label, selector, and annotation conventions + +Labels are the domain of users. They are intended to facilitate organization and management of API resources using attributes that are meaningful to users, as opposed to meaningful to the system. Think of them as user-created mp3 or email inbox labels, as opposed to the directory structure used by a program to store its data. The former is enables the user to apply an arbitrary ontology, whereas the latter is implementation-centric and inflexible. Users will use labels to select resources to operate on, display label values in CLI/UI columns, etc. Users should always retain full power and flexibility over the label schemas they apply to labels in their namespaces. + +However, we should support conveniences for common cases by default. For example, what we now do in ReplicationController is automatically set the RC's selector and labels to the labels in the pod template by default, if they are not already set. That ensures that the selector will match the template, and that the RC can be managed using the same labels as the pods it creates. Note that once we generalize selectors, it won't necessarily be possible to unambiguously generate labels that match an arbitrary selector. + +If the user wants to apply additional labels to the pods that it doesn't select upon, such as to facilitate adoption of pods or in the expectation that some label values will change, they can set the selector to a subset of the pod labels. Similarly, the RC's labels could be initialized to a subset of the pod template's labels, or could include additional/different labels. + +For disciplined users managing resources within their own namespaces, it's not that hard to consistently apply schemas that ensure uniqueness. One just needs to ensure that at least one value of some label key in common differs compared to all other comparable resources. We could/should provide a verification tool to check that. However, development of conventions similar to the examples in [Labels](/{{page.version}}/docs/user-guide/labels) make uniqueness straightforward. Furthermore, relatively narrowly used namespaces (e.g., per environment, per application) can be used to reduce the set of resources that could potentially cause overlap. + +In cases where users could be running misc. examples with inconsistent schemas, or where tooling or components need to programmatically generate new objects to be selected, there needs to be a straightforward way to generate unique label sets. A simple way to ensure uniqueness of the set is to ensure uniqueness of a single label value, such as by using a resource name, uid, resource hash, or generation number. + +Problems with uids and hashes, however, include that they have no semantic meaning to the user, are not memorable nor readily recognizable, and are not predictable. Lack of predictability obstructs use cases such as creation of a replication controller from a pod, such as people want to do when exploring the system, bootstrapping a self-hosted cluster, or deletion and re-creation of a new RC that adopts the pods of the previous one, such as to rename it. Generation numbers are more predictable and much clearer, assuming there is a logical sequence. Fortunately, for deployments that's the case. For jobs, use of creation timestamps is common internally. Users should always be able to turn off auto-generation, in order to permit some of the scenarios described above. Note that auto-generated labels will also become one more field that needs to be stripped out when cloning a resource, within a namespace, in a new namespace, in a new cluster, etc., and will need to be ignored around when updating a resource via patch or read-modify-write sequence. + +Inclusion of a system prefix in a label key is fairly hostile to UX. A prefix is only necessary in the case that the user cannot choose the label key, in order to avoid collisions with user-defined labels. However, I firmly believe that the user should always be allowed to select the label keys to use on their resources, so it should always be possible to override default label keys. + +Therefore, resources supporting auto-generation of unique labels should have a `uniqueLabelKey` field, so that the user could specify the key if they wanted to, but if unspecified, it could be set by default, such as to the resource type, like job, deployment, or replicationController. The value would need to be at least spatially unique, and perhaps temporally unique in the case of job. + +Annotations have very different intended usage from labels. We expect them to be primarily generated and consumed by tooling and system extensions. I'm inclined to generalize annotations to permit them to directly store arbitrary json. Rigid names and name prefixes make sense, since they are analogous to API fields. + +In fact, in-development API fields, including those used to represent fields of newer alpha/beta API versions in the older stable storage version, may be represented as annotations with the form `something.alpha.kubernetes.io/name` or `something.beta.kubernetes.io/name` (depending on our confidence in it). For example `net.alpha.kubernetes.io/policy` might represent an experimental network policy field. + +Other advice regarding use of labels, annotations, and other generic map keys by Kubernetes components and tools: + + - Key names should be all lowercase, with words separated by dashes, such as `desired-replicas` + - Prefix the key with `kubernetes.io/` or `foo.kubernetes.io/`, preferably the latter if the label/annotation is specific to `foo` + - For instance, prefer `service-account.kubernetes.io/name` over `kubernetes.io/service-account.name` + - Use annotations to store API extensions that the controller responsible for the resource doesn't need to know about, experimental fields that aren't intended to be generally used API fields, etc. Beware that annotations aren't automatically handled by the API conversion machinery. \ No newline at end of file diff --git a/_includes/docs/docs/devel/api_changes.md b/_includes/docs/docs/devel/api_changes.md new file mode 100644 index 0000000000..b11a455b7b --- /dev/null +++ b/_includes/docs/docs/devel/api_changes.md @@ -0,0 +1,584 @@ + +Before attempting a change to the API, you should familiarize yourself +with a number of existing API types and with the [API +conventions](/{{page.version}}/docs/devel/api-conventions). If creating a new API +type/resource, we also recommend that you first send a PR containing +just a proposal for the new API types, and that you initially target +the extensions API (pkg/apis/extensions). + +The Kubernetes API has two major components - the internal structures and +the versioned APIs. The versioned APIs are intended to be stable, while the +internal structures are implemented to best reflect the needs of the Kubernetes +code itself. + +What this means for API changes is that you have to be somewhat thoughtful in +how you approach changes, and that you have to touch a number of pieces to make +a complete change. This document aims to guide you through the process, though +not all API changes will need all of these steps. + +## Operational overview + +It is important to have a high level understanding of the API system used in +Kubernetes in order to navigate the rest of this document. + +As mentioned above, the internal representation of an API object is decoupled +from any one API version. This provides a lot of freedom to evolve the code, +but it requires robust infrastructure to convert between representations. There +are multiple steps in processing an API operation - even something as simple as +a GET involves a great deal of machinery. + +The conversion process is logically a "star" with the internal form at the +center. Every versioned API can be converted to the internal form (and +vice-versa), but versioned APIs do not convert to other versioned APIs directly. +This sounds like a heavy process, but in reality we do not intend to keep more +than a small number of versions alive at once. While all of the Kubernetes code +operates on the internal structures, they are always converted to a versioned +form before being written to storage (disk or etcd) or being sent over a wire. +Clients should consume and operate on the versioned APIs exclusively. + +To demonstrate the general process, here is a (hypothetical) example: + + 1. A user POSTs a `Pod` object to `/api/v7beta1/...` + 2. The JSON is unmarshalled into a `v7beta1.Pod` structure + 3. Default values are applied to the `v7beta1.Pod` + 4. The `v7beta1.Pod` is converted to an `api.Pod` structure + 5. The `api.Pod` is validated, and any errors are returned to the user + 6. The `api.Pod` is converted to a `v6.Pod` (because v6 is the latest stable + version) + 7. The `v6.Pod` is marshalled into JSON and written to etcd + +Now that we have the `Pod` object stored, a user can GET that object in any +supported api version. For example: + + 1. A user GETs the `Pod` from `/api/v5/...` + 2. The JSON is read from etcd and unmarshalled into a `v6.Pod` structure + 3. Default values are applied to the `v6.Pod` + 4. The `v6.Pod` is converted to an `api.Pod` structure + 5. The `api.Pod` is converted to a `v5.Pod` structure + 6. The `v5.Pod` is marshalled into JSON and sent to the user + +The implication of this process is that API changes must be done carefully and +backward-compatibly. + +## On compatibility + +Before talking about how to make API changes, it is worthwhile to clarify what +we mean by API compatibility. An API change is considered backward-compatible +if it: + + * adds new functionality that is not required for correct behavior (e.g., + does not add a new required field) + * does not change existing semantics, including: + * default values and behavior + * interpretation of existing API types, fields, and values + * which fields are required and which are not + +Put another way: + +1. Any API call (e.g. a structure POSTed to a REST endpoint) that worked before + your change must work the same after your change. +2. Any API call that uses your change must not cause problems (e.g. crash or + degrade behavior) when issued against servers that do not include your change. +3. It must be possible to round-trip your change (convert to different API + versions and back) with no loss of information. +4. Existing clients need not be aware of your change in order for them to continue + to function as they did previously, even when your change is utilized + +If your change does not meet these criteria, it is not considered strictly +compatible. + +Let's consider some examples. In a hypothetical API (assume we're at version +v6), the `Frobber` struct looks something like this: + +```go +// API v6. +type Frobber struct { + Height int `json:"height"` + Param string `json:"param"` +} +``` + +You want to add a new `Width` field. It is generally safe to add new fields +without changing the API version, so you can simply change it to: + +```go +// Still API v6. +type Frobber struct { + Height int `json:"height"` + Width int `json:"width"` + Param string `json:"param"` +} +``` + +The onus is on you to define a sane default value for `Width` such that rule #1 +above is true - API calls and stored objects that used to work must continue to +work. + +For your next change you want to allow multiple `Param` values. You can not +simply change `Param string` to `Params []string` (without creating a whole new +API version) - that fails rules #1 and #2. You can instead do something like: + +```go +// Still API v6, but kind of clumsy. +type Frobber struct { + Height int `json:"height"` + Width int `json:"width"` + Param string `json:"param"` // the first param + ExtraParams []string `json:"params"` // additional params +} +``` + +Now you can satisfy the rules: API calls that provide the old style `Param` +will still work, while servers that don't understand `ExtraParams` can ignore +it. This is somewhat unsatisfying as an API, but it is strictly compatible. + +Part of the reason for versioning APIs and for using internal structs that are +distinct from any one version is to handle growth like this. The internal +representation can be implemented as: + +```go +// Internal, soon to be v7beta1. +type Frobber struct { + Height int + Width int + Params []string +} +``` + +The code that converts to/from versioned APIs can decode this into the somewhat +uglier (but compatible!) structures. Eventually, a new API version, let's call +it v7beta1, will be forked and it can use the clean internal structure. + +We've seen how to satisfy rules #1 and #2. Rule #3 means that you can not +extend one versioned API without also extending the others. For example, an +API call might POST an object in API v7beta1 format, which uses the cleaner +`Params` field, but the API server might store that object in trusty old v6 +form (since v7beta1 is "beta"). When the user reads the object back in the +v7beta1 API it would be unacceptable to have lost all but `Params[0]`. This +means that, even though it is ugly, a compatible change must be made to the v6 +API. + +However, this is very challenging to do correctly. It often requires +multiple representations of the same information in the same API resource, which +need to be kept in sync in the event that either is changed. For example, +let's say you decide to rename a field within the same API version. In this case, +you add units to `height` and `width`. You implement this by adding duplicate +fields: + +```go +type Frobber struct { + Height *int `json:"height"` + Width *int `json:"width"` + HeightInInches *int `json:"heightInInches"` + WidthInInches *int `json:"widthInInches"` +} +``` + +You convert all of the fields to pointers in order to distinguish between unset and +set to 0, and then set each corresponding field from the other in the defaulting +pass (e.g., `heightInInches` from `height`, and vice versa), which runs just prior +to conversion. That works fine when the user creates a resource from a hand-written +configuration -- clients can write either field and read either field, but what about +creation or update from the output of GET, or update via PATCH (see +[In-place updates](/{{page.version}}/docs/user-guide/managing-deployments/#in-place-updates-of-resources))? +In this case, the two fields will conflict, because only one field would be updated +in the case of an old client that was only aware of the old field (e.g., `height`). + +Say the client creates: + +```json +{ + "height": 10, + "width": 5 +} +``` + +and GETs: + +```json +{ + "height": 10, + "heightInInches": 10, + "width": 5, + "widthInInches": 5 +} +``` + +then PUTs back: + +```json +{ + "height": 13, + "heightInInches": 10, + "width": 5, + "widthInInches": 5 +} +``` + +The update should not fail, because it would have worked before `heightInInches` was added. + +Therefore, when there are duplicate fields, the old field MUST take precedence +over the new, and the new field should be set to match by the server upon write. +A new client would be aware of the old field as well as the new, and so can ensure +that the old field is either unset or is set consistently with the new field. However, +older clients would be unaware of the new field. Please avoid introducing duplicate +fields due to the complexity they incur in the API. + +A new representation, even in a new API version, that is more expressive than an old one +breaks backward compatibility, since clients that only understood the old representation +would not be aware of the new representation nor its semantics. Examples of +proposals that have run into this challenge include [generalized label +selectors](http://issues.k8s.io/341) and [pod-level security +context](http://prs.k8s.io/12823). + +As another interesting example, enumerated values cause similar challenges. +Adding a new value to an enumerated set is *not* a compatible change. Clients +which assume they know how to handle all possible values of a given field will +not be able to handle the new values. However, removing value from an +enumerated set *can* be a compatible change, if handled properly (treat the +removed value as deprecated but allowed). This is actually a special case of +a new representation, discussed above. + +## Incompatible API changes + +There are times when this might be OK, but mostly we want changes that +meet this definition. If you think you need to break compatibility, +you should talk to the Kubernetes team first. + +Breaking compatibility of a beta or stable API version, such as v1, is unacceptable. +Compatibility for experimental or alpha APIs is not strictly required, but +breaking compatibility should not be done lightly, as it disrupts all users of the +feature. Experimental APIs may be removed. Alpha and beta API versions may be deprecated +and eventually removed wholesale, as described in the [versioning document](https://github.com/kubernetes/kubernetes/blob/{{page.githubbranch}}/docs/design/versioning.md). +Document incompatible changes across API versions under the [conversion tips](/{{page.version}}/docs/api/). + +If your change is going to be backward incompatible or might be a breaking change for API +consumers, please send an announcement to `kubernetes-dev@googlegroups.com` before +the change gets in. If you are unsure, ask. Also make sure that the change gets documented in +the release notes for the next release by labeling the PR with the "release-note" github label. + +If you found that your change accidentally broke clients, it should be reverted. + +In short, the expected API evolution is as follows: + +* `extensions/v1alpha1` -> +* `newapigroup/v1alpha1` -> ... -> `newapigroup/v1alphaN` -> +* `newapigroup/v1beta1` -> ... -> `newapigroup/v1betaN` -> +* `newapigroup/v1` -> +* `newapigroup/v2alpha1` -> ... + +While in extensions we have no obligation to move forward with the API at all and may delete or break it at any time. + +While in alpha we expect to move forward with it, but may break it. + +Once in beta we will preserve forward compatibility, but may introduce new versions and delete old ones. + +v1 must be backward-compatible for an extended length of time. + +## Changing versioned APIs + +For most changes, you will probably find it easiest to change the versioned +APIs first. This forces you to think about how to make your change in a +compatible way. Rather than doing each step in every version, it's usually +easier to do each versioned API one at a time, or to do all of one version +before starting "all the rest". + +### Edit types.go + +The struct definitions for each API are in `pkg/api//types.go`. Edit +those files to reflect the change you want to make. Note that all types and non-inline +fields in versioned APIs must be preceded by descriptive comments - these are used to generate +documentation. + +Optional fields should have the `,omitempty` json tag; fields are interpreted as being +required otherwise. + +### Edit defaults.go + +If your change includes new fields for which you will need default values, you +need to add cases to `pkg/api//defaults.go`. Of course, since you +have added code, you have to add a test: `pkg/api//defaults_test.go`. + +Do use pointers to scalars when you need to distinguish between an unset value +and an automatic zero value. For example, +`PodSpec.TerminationGracePeriodSeconds` is defined as `*int64` the go type +definition. A zero value means 0 seconds, and a nil value asks the system to +pick a default. + +Don't forget to run the tests! + +### Edit conversion.go + +Given that you have not yet changed the internal structs, this might feel +premature, and that's because it is. You don't yet have anything to convert to +or from. We will revisit this in the "internal" section. If you're doing this +all in a different order (i.e. you started with the internal structs), then you +should jump to that topic below. In the very rare case that you are making an +incompatible change you might or might not want to do this now, but you will +have to do more later. The files you want are +`pkg/api//conversion.go` and `pkg/api//conversion_test.go`. + +Note that the conversion machinery doesn't generically handle conversion of values, +such as various kinds of field references and API constants. [The client +library](https://releases.k8s.io/{{page.githubbranch}}/pkg/client/unversioned/request.go) has custom conversion code for +field references. You also need to add a call to api.Scheme.AddFieldLabelConversionFunc +with a mapping function that understands supported translations. + +## Changing the internal structures + +Now it is time to change the internal structs so your versioned changes can be +used. + +### Edit types.go + +Similar to the versioned APIs, the definitions for the internal structs are in +`pkg/api/types.go`. Edit those files to reflect the change you want to make. +Keep in mind that the internal structs must be able to express *all* of the +versioned APIs. + +## Edit validation.go + +Most changes made to the internal structs need some form of input validation. +Validation is currently done on internal objects in +`pkg/api/validation/validation.go`. This validation is the one of the first +opportunities we have to make a great user experience - good error messages and +thorough validation help ensure that users are giving you what you expect and, +when they don't, that they know why and how to fix it. Think hard about the +contents of `string` fields, the bounds of `int` fields and the +requiredness/optionalness of fields. + +Of course, code needs tests - `pkg/api/validation/validation_test.go`. + +## Edit version conversions + +At this point you have both the versioned API changes and the internal +structure changes done. If there are any notable differences - field names, +types, structural change in particular - you must add some logic to convert +versioned APIs to and from the internal representation. If you see errors from +the `serialization_test`, it may indicate the need for explicit conversions. + +Performance of conversions very heavily influence performance of apiserver. +Thus, we are auto-generating conversion functions that are much more efficient +than the generic ones (which are based on reflections and thus are highly +inefficient). + +The conversion code resides with each versioned API. There are two files: + + - `pkg/api//conversion.go` containing manually written conversion + functions + - `pkg/api//conversion_generated.go` containing auto-generated + conversion functions + - `pkg/apis/extensions//conversion.go` containing manually written + conversion functions + - `pkg/apis/extensions//conversion_generated.go` containing + auto-generated conversion functions + +Since auto-generated conversion functions are using manually written ones, +those manually written should be named with a defined convention, i.e. a function +converting type X in pkg a to type Y in pkg b, should be named: +`convert_a_X_To_b_Y`. + +Also note that you can (and for efficiency reasons should) use auto-generated +conversion functions when writing your conversion functions. + +Once all the necessary manually written conversions are added, you need to +regenerate auto-generated ones. To regenerate them, run: + +```shell +hack/update-generated-conversions.sh + +``` +If running the above script is impossible due to compile errors, the easiest +workaround is to comment out the code causing errors and let the script to +regenerate it. If the auto-generated conversion methods are not used by the +manually-written ones, it's fine to just remove the whole file and let the +generator to create it from scratch. + +Unsurprisingly, adding manually written conversion also requires you to add tests to +`pkg/api//conversion_test.go`. + +## Edit deep copy files + +At this point you have both the versioned API changes and the internal +structure changes done. You now need to generate code to handle deep copy +of your versioned api objects. + +The deep copy code resides with each versioned API: + + - `pkg/api//deep_copy_generated.go` containing auto-generated copy functions + - `pkg/apis/extensions//deep_copy_generated.go` containing auto-generated copy functions + +To regenerate them, run: + +```shell +hack/update-generated-deep-copies.sh +``` + +## Edit json (un)marshaling code + +We are auto-generating code for marshaling and unmarshaling json representation +of api objects - this is to improve the overall system performance. + +The auto-generated code resides with each versioned API: + + - `pkg/api//types.generated.go` + - `pkg/apis/extensions//types.generated.go` + +To regenerate them, run: + +```shell +hack/update-codecgen.sh +``` + +## Making a new API Group + +This section is under construction, as we make the tooling completely generic. + +At the moment, you'll have to make a new directory under pkg/apis/; copy the +directory structure from pkg/apis/extensions. Add the new group/version to all +of the hack/{verify,update}-generated-{deep-copy,conversions,swagger}.sh files +in the appropriate places--it should just require adding your new group/version +to a bash array. You will also need to make sure your new types are imported by +the generation commands (cmd/gendeepcopy/ & cmd/genconversion). These +instructions may not be complete and will be updated as we gain experience. + +Adding API groups outside of the pkg/apis/ directory is not currently supported, +but is clearly desirable. The deep copy & conversion generators need to work by +parsing go files instead of by reflection; then they will be easy to point at +arbitrary directories: see issue [#13775](http://issue.k8s.io/13775). + +## Update the fuzzer + +Part of our testing regimen for APIs is to "fuzz" (fill with random values) API +objects and then convert them to and from the different API versions. This is +a great way of exposing places where you lost information or made bad +assumptions. If you have added any fields which need very careful formatting +(the test does not run validation) or if you have made assumptions such as +"this slice will always have at least 1 element", you may get an error or even +a panic from the `serialization_test`. If so, look at the diff it produces (or +the backtrace in case of a panic) and figure out what you forgot. Encode that +into the fuzzer's custom fuzz functions. Hint: if you added defaults for a field, +that field will need to have a custom fuzz function that ensures that the field is +fuzzed to a non-empty value. + +The fuzzer can be found in `pkg/api/testing/fuzzer.go`. + +## Update the semantic comparisons + +VERY VERY rarely is this needed, but when it hits, it hurts. In some rare +cases we end up with objects (e.g. resource quantities) that have morally +equivalent values with different bitwise representations (e.g. value 10 with a +base-2 formatter is the same as value 0 with a base-10 formatter). The only way +Go knows how to do deep-equality is through field-by-field bitwise comparisons. +This is a problem for us. + +The first thing you should do is try not to do that. If you really can't avoid +this, I'd like to introduce you to our semantic DeepEqual routine. It supports +custom overrides for specific types - you can find that in `pkg/api/helpers.go`. + +There's one other time when you might have to touch this: unexported fields. +You see, while Go's `reflect` package is allowed to touch unexported fields, us +mere mortals are not - this includes semantic DeepEqual. Fortunately, most of +our API objects are "dumb structs" all the way down - all fields are exported +(start with a capital letter) and there are no unexported fields. But sometimes +you want to include an object in our API that does have unexported fields +somewhere in it (for example, `time.Time` has unexported fields). If this hits +you, you may have to touch the semantic DeepEqual customization functions. + +## Implement your change + +Now you have the API all changed - go implement whatever it is that you're +doing! + +## Write end-to-end tests + +Check out the [E2E docs](/{{page.version}}/docs/devel/e2e-tests) for detailed information about how to write end-to-end +tests for your feature. + +## Examples and docs + +At last, your change is done, all unit tests pass, e2e passes, you're done, +right? Actually, no. You just changed the API. If you are touching an +existing facet of the API, you have to try *really* hard to make sure that +*all* the examples and docs are updated. There's no easy way to do this, due +in part to JSON and YAML silently dropping unknown fields. You're clever - +you'll figure it out. Put `grep` or `ack` to good use. + +If you added functionality, you should consider documenting it and/or writing +an example to illustrate your change. + +Make sure you update the swagger API spec by running: + +```shell +hack/update-swagger-spec.sh +``` + +The API spec changes should be in a commit separate from your other changes. + +## Adding new REST objects + +TODO(smarterclayton): write this. + +## Alpha, Beta, and Stable Versions + +New feature development proceeds through a series of stages of increasing maturity: + +- Development level + - Object Versioning: no convention + - Availability: not commited to main kubernetes repo, and thus not available in offical releases + - Audience: other developers closely collaborating on a feature or proof-of-concept + - Upgradeability, Reliability, Completeness, and Support: no requirements or guarantees +- Alpha level + - Object Versioning: API version name contains `alpha` (e.g. `v1alpha1`) + - Availability: committed to main kubernetes repo; appears in an official release; feature is + disabled by default, but may be enabled by flag + - Audience: developers and expert users interested in giving early feedback on features + - Completeness: some API operations, CLI commands, or UI support may not be implemented; the API + need not have had an *API review* (an intensive and targeted review of the API, on top of a normal + code review) + - Upgradeability: the object schema and semantics may change in a later software release, without + any provision for preserving objects in an existing cluster; + removing the upgradability concern allows developers to make rapid progress; in particular, + API versions can increment faster than the minor release cadence and the developer need not + maintain multiple versions; developers should still increment the API version when object schema + or semantics change in an [incompatible way](#on-compatibility) + - Cluster Reliability: because the feature is relatively new, and may lack complete end-to-end + tests, enabling the feature via a flag might expose bugs with destabilize the cluster (e.g. a + bug in a control loop might rapidly create excessive numbers of object, exhausting API storage). + - Support: there is *no commitment* from the project to complete the feature; the feature may be + dropped entirely in a later software release + - Recommended Use Cases: only in short-lived testing clusters, due to complexity of upgradeability + and lack of long-term support and lack of upgradability. +- Beta level: + - Object Versioning: API version name contains `beta` (e.g. `v2beta3`) + - Availability: in official Kubernetes releases, and enabled by default + - Audience: users interested in providing feedback on features + - Completeness: all API operations, CLI commands, and UI support should be implemented; end-to-end + tests complete; the API has had a thorough API review and is thought to be complete, though use + during beta may frequently turn up API issues not thought of during review + - Upgradeability: the object schema and semantics may change in a later software release; when + this happens, an upgrade path will be documentedr; in some cases, objects will be automatically + converted to the new version; in other cases, a manual upgrade may be necessary; a manual + upgrade may require downtime for anything relying on the new feature, and may require + manual conversion of objects to the new version; when manual conversion is necessary, the + project will provide documentation on the process (for an example, see [v1 conversion + tips](/{{page.version}}/docs/api/)) + - Cluster Reliability: since the feature has e2e tests, enabling the feature via a flag should not + create new bugs in unrelated features; because the feature is new, it may have minor bugs + - Support: the project commits to complete the feature, in some form, in a subsequent Stable + version; typically this will happen within 3 months, but sometimes longer; releases should + simultaneously support two consecutive versions (e.g. `v1beta1` and `v1beta2`; or `v1beta2` and + `v1`) for at least one minor release cycle (typically 3 months) so that users have enough time + to upgrade and migrate objects + - Recommended Use Cases: in short-lived testing clusters; in production clusters as part of a + short-lived evaluation of the feature in order to provide feedback +- Stable level: + - Object Versioning: API version `vX` where `X` is an integer (e.g. `v1`) + - Availability: in official Kubernetes releases, and enabled by default + - Audience: all users + - Completeness: same as beta + - Upgradeability: only [strictly compatible](#on-compatibility) changes allowed in subsequent + software releases + - Cluster Reliability: high + - Support: API version will continue to be present for many subsequent software releases; + - Recommended Use Cases: any \ No newline at end of file diff --git a/_includes/docs/docs/devel/automation.md b/_includes/docs/docs/devel/automation.md new file mode 100644 index 0000000000..8fc52cf0ad --- /dev/null +++ b/_includes/docs/docs/devel/automation.md @@ -0,0 +1,101 @@ + +Kubernetes uses a variety of automated tools in an attempt to relieve developers of repeptitive, low +brain power work. This document attempts to describe these processes. + + +## Submit Queue + +In an effort to: + + * reduce load on core developers + * maintain e2e stability + * load test githubs label feature + +We have added an automated [submit-queue](https://github.com/kubernetes/contrib/tree/master/submit-queue) +for kubernetes. + +The submit-queue does the following: + +```go +for _, pr := range readyToMergePRs() { + if testsAreStable() { + mergePR(pr) + } +} +``` + +The status of the submit-queue is [online.](http://submit-queue.k8s.io/) + +### Ready to merge status + +A PR is considered "ready for merging" if it matches the following: + + * it has the `lgtm` label, and that `lgtm` is newer than the latest commit + * it has passed the cla pre-submit and has the `cla:yes` label + * it has passed the travis and shippable pre-submit tests + * one (or all) of + * its author is in kubernetes/contrib/submit-queue/whitelist.txt + * its author is in contributors.txt via the github API. + * the PR has the `ok-to-merge` label + * One (or both of) + * it has passed the Jenkins e2e test + * it has the `e2e-not-required` label + +Note that the combined whitelist/committer list is available at [submit-queue.k8s.io](http://submit-queue.k8s.io) + +### Merge process + +Merges _only_ occur when the `critical builds` (Jenkins e2e for gce, gke, scalability, upgrade) are passing. +We're open to including more builds here, let us know... + +Merges are serialized, so only a single PR is merged at a time, to ensure against races. + +If the PR has the `e2e-not-required` label, it is simply merged. +If the PR does not have this label, e2e tests are re-run, if these new tests pass, the PR is merged. + +If e2e flakes or is currently buggy, the PR will not be merged, but it will be re-run on the following +pass. + +## Github Munger + +We also run a [github "munger"](https://github.com/kubernetes/contrib/tree/master/mungegithub) + +This runs repeatedly over github pulls and issues and runs modular "mungers" similar to "mungedocs" + +Currently this runs: + + * blunderbuss - Tries to automatically find an owner for a PR without an owner, uses mapping file here: + https://github.com/kubernetes/contrib/blob/master/mungegithub/blunderbuss.yml + * needs-rebase - Adds `needs-rebase` to PRs that aren't currently mergeable, and removes it from those that are. + * size - Adds `size/xs` - `size/xxl` labels to PRs + * ok-to-test - Adds the `ok-to-test` message to PRs that have an `lgtm` but the e2e-builder would otherwise not test due to whitelist + * ping-ci - Attempts to ping the ci systems (Travis/Shippable) if they are missing from a PR. + * lgtm-after-commit - Removes the `lgtm` label from PRs where there are commits that are newer than the `lgtm` label + +In the works: + * issue-detector - machine learning for determining if an issue that has been filed is a `support` issue, `bug` or `feature` + +Please feel free to unleash your creativity on this tool, send us new mungers that you think will help support the Kubernetes development process. + +## PR builder + +We also run a robotic PR builder that attempts to run e2e tests for each PR. + +Before a PR from an unknown user is run, the PR builder bot (`k8s-bot`) asks to a message from a +contributor that a PR is "ok to test", the contributor replies with that message. Contributors can also +add users to the whitelist by replying with the message "add to whitelist" ("please" is optional, but +remember to treat your robots with kindness...) + +If a PR is approved for testing, and tests either haven't run, or need to be re-run, you can ask the +PR builder to re-run the tests. To do this, reply to the PR with a message that begins with `@k8s-bot test this`, this should trigger a re-build/re-test. + + +## FAQ: + +#### How can I ask my PR to be tested again for Jenkins failures? + +Right now you have to ask a contributor (this may be you!) to re-run the test with "@k8s-bot test this" + +### How can I kick Shippable to re-test on a failure? + +Right now the easiest way is to close and then immediately re-open the PR. \ No newline at end of file diff --git a/_includes/docs/docs/devel/cherry-picks.md b/_includes/docs/docs/devel/cherry-picks.md new file mode 100644 index 0000000000..55b2b4bb9b --- /dev/null +++ b/_includes/docs/docs/devel/cherry-picks.md @@ -0,0 +1,31 @@ + +This document explains cherry picks are managed on release branches within the +Kubernetes projects. + +## Propose a Cherry Pick + +Any contributor can propose a cherry pick of any pull request, like so: + +```shell +hack/cherry_pick_pull.sh upstream/release-3.14 98765 +``` + +This will walk you through the steps to propose an automated cherry pick of pull + #98765 for remote branch `upstream/release-3.14`. + +## Cherry Pick Review + +Cherry pick pull requests are reviewed differently than normal pull requests. In +particular, they may be self-merged by the release branch owner without fanfare, +in the case the release branch owner knows the cherry pick was already +requested - this should not be the norm, but it may happen. + +[Contributor License Agreements](http://releases.k8s.io/{{page.githubbranch}}/CONTRIBUTING.md) is considered implicit +for all code within cherry-pick pull requests, ***unless there is a large +conflict***. + +## Searching for Cherry Picks + +Now that we've structured cherry picks as PRs, searching for all cherry-picks +against a release is a GitHub query: For example, +[this query is all of the v0.21.x cherry-picks](https://github.com/kubernetes/kubernetes/pulls?utf8=%E2%9C%93&q=is%3Apr+%22automated+cherry+pick%22+base%3Arelease-0.21) \ No newline at end of file diff --git a/_includes/docs/docs/devel/cli-roadmap.md b/_includes/docs/docs/devel/cli-roadmap.md new file mode 100644 index 0000000000..be6cf2de9f --- /dev/null +++ b/_includes/docs/docs/devel/cli-roadmap.md @@ -0,0 +1,8 @@ + +See github issues with the following labels: +* [area/app-config-deployment](https://github.com/kubernetes/kubernetes/labels/area/app-config-deployment) +* [component/kubectl](https://github.com/kubernetes/kubernetes/labels/component/kubectl) +* [component/clientlib](https://github.com/kubernetes/kubernetes/labels/component/clientlib) + + + diff --git a/_includes/docs/docs/devel/client-libraries.md b/_includes/docs/docs/devel/client-libraries.md new file mode 100644 index 0000000000..2d474bde65 --- /dev/null +++ b/_includes/docs/docs/devel/client-libraries.md @@ -0,0 +1,20 @@ + +### Supported + + * [Go](http://releases.k8s.io/{{page.githubbranch}}/pkg/client/) + +### User Contributed + +*Note: Libraries provided by outside parties are supported by their authors, not the core Kubernetes team* + + * [Java (OSGi)](https://bitbucket.org/amdatulabs/amdatu-kubernetes) + * [Java (Fabric8, OSGi)](https://github.com/fabric8io/kubernetes-client) + * [Ruby](https://github.com/Ch00k/kuber) + * [Ruby](https://github.com/abonas/kubeclient) + * [PHP](https://github.com/devstub/kubernetes-api-php-client) + * [PHP](https://github.com/maclof/kubernetes-client) + * [Node.js](https://github.com/tenxcloud/node-kubernetes-client) + * [Perl](https://metacpan.org/pod/Net::Kubernetes) + + + diff --git a/_includes/docs/docs/devel/coding-conventions.md b/_includes/docs/docs/devel/coding-conventions.md new file mode 100644 index 0000000000..5c5e749480 --- /dev/null +++ b/_includes/docs/docs/devel/coding-conventions.md @@ -0,0 +1,56 @@ + + + - Bash + - https://google-styleguide.googlecode.com/svn/trunk/shell.xml + - Ensure that build, release, test, and cluster-management scripts run on OS X + - Go + - Ensure your code passes the [presubmit checks](/{{page.version}}/docs/devel/development/#hooks) + - [Go Code Review Comments](https://github.com/golang/go/wiki/CodeReviewComments) + - [Effective Go](https://golang.org/doc/effective_go) + - Comment your code. + - [Go's commenting conventions](http://blog.golang.org/godoc-documenting-go-code) + - If reviewers ask questions about why the code is the way it is, that's a sign that comments might be helpful. + - Command-line flags should use dashes, not underscores + - Naming + - Please consider package name when selecting an interface name, and avoid redundancy. + - e.g.: `storage.Interface` is better than `storage.StorageInterface`. + - Do not use uppercase characters, underscores, or dashes in package names. + - Please consider parent directory name when choosing a package name. + - so pkg/controllers/autoscaler/foo.go should say `package autoscaler` not `package autoscalercontroller`. + - Unless there's a good reason, the `package foo` line should match the name of the directory in which the .go file exists. + - Importers can use a different name if they need to disambiguate. + - Locks should be called `lock` and should never be embedded (always `lock sync.Mutex`). When multiple locks are present, give each lock a distinct name following Go conventions - `stateLock`, `mapLock` etc. + - API conventions + - [API changes](/{{page.version}}/docs/devel/api_changes) + - [API conventions](/{{page.version}}/docs/devel/api-conventions) + - [Kubectl conventions](/{{page.version}}/docs/devel/kubectl-conventions) + - [Logging conventions](/{{page.version}}/docs/devel/logging) + +Testing conventions + + - All new packages and most new significant functionality must come with unit tests + - Table-driven tests are preferred for testing multiple scenarios/inputs; for example, see [TestNamespaceAuthorization](https://releases.k8s.io/{{page.githubbranch}}/test/integration/auth_test.go) + - Significant features should come with integration (test/integration) and/or end-to-end (test/e2e) tests + - Including new kubectl commands and major features of existing commands + - Unit tests must pass on OS X and Windows platforms - if you use Linux specific features, your test case must either be skipped on windows or compiled out (skipped is better when running Linux specific commands, compiled out is required when your code does not compile on Windows). + +Directory and file conventions + + - Avoid package sprawl. Find an appropriate subdirectory for new packages. (See [#4851](http://issues.k8s.io/4851) for discussion.) + - Libraries with no more appropriate home belong in new package subdirectories of pkg/util + - Avoid general utility packages. Packages called "util" are suspect. Instead, derive a name that describes your desired function. For example, the utility functions dealing with waiting for operations are in the "wait" package and include functionality like Poll. So the full name is wait.Poll + - Go source files and directories use underscores, not dashes + - Package directories should generally avoid using separators as much as possible (when packages are multiple words, they usually should be in nested subdirectories). + - Document directories and filenames should use dashes rather than underscores + - Contrived examples that illustrate system features belong in /docs/user-guide or /docs/admin, depending on whether it is a feature primarily intended for users that deploy applications or cluster administrators, respectively. Actual application examples belong in /examples. + - Examples should also illustrate [best practices for using the system](/{{page.version}}/docs/user-guide/config-best-practices) + - Third-party code + - Third-party Go code is managed using Godeps + - Other third-party code belongs in /third_party + - Third-party code must include licenses + - This includes modified third-party code and excerpts, as well + +Coding advice + + - Go + - [Go landmines](https://gist.github.com/lavalamp/4bd23295a9f32706a48f) diff --git a/_includes/docs/docs/devel/collab.md b/_includes/docs/docs/devel/collab.md new file mode 100644 index 0000000000..4d3490c15a --- /dev/null +++ b/_includes/docs/docs/devel/collab.md @@ -0,0 +1,39 @@ + +Kubernetes is open source, but many of the people working on it do so as their day job. In order to avoid forcing people to be "at work" effectively 24/7, we want to establish some semi-formal protocols around development. Hopefully these rules make things go more smoothly. If you find that this is not the case, please complain loudly. + +## Patches welcome + +First and foremost: as a potential contributor, your changes and ideas are welcome at any hour of the day or night, weekdays, weekends, and holidays. Please do not ever hesitate to ask a question or send a PR. + +## Code reviews + +All changes must be code reviewed. For non-maintainers this is obvious, since you can't commit anyway. But even for maintainers, we want all changes to get at least one review, preferably (for non-trivial changes obligatorily) from someone who knows the areas the change touches. For non-trivial changes we may want two reviewers. The primary reviewer will make this decision and nominate a second reviewer, if needed. Except for trivial changes, PRs should not be committed until relevant parties (e.g. owners of the subsystem affected by the PR) have had a reasonable chance to look at PR in their local business hours. + +Most PRs will find reviewers organically. If a maintainer intends to be the primary reviewer of a PR they should set themselves as the assignee on GitHub and say so in a reply to the PR. Only the primary reviewer of a change should actually do the merge, except in rare cases (e.g. they are unavailable in a reasonable timeframe). + +If a PR has gone 2 work days without an owner emerging, please poke the PR thread and ask for a reviewer to be assigned. + +Except for rare cases, such as trivial changes (e.g. typos, comments) or emergencies (e.g. broken builds), maintainers should not merge their own changes. + +Expect reviewers to request that you avoid [common go style mistakes](https://github.com/golang/go/wiki/CodeReviewComments) in your PRs. + +## Assigned reviews + +Maintainers can assign reviews to other maintainers, when appropriate. The assignee becomes the shepherd for that PR and is responsible for merging the PR once they are satisfied with it or else closing it. The assignee might request reviews from non-maintainers. + +## Merge hours + +Maintainers will do merges of appropriately reviewed-and-approved changes during their local "business hours" (typically 7:00 am Monday to 5:00 pm (17:00h) Friday). PRs that arrive over the weekend or on holidays will only be merged if there is a very good reason for it and if the code review requirements have been met. Concretely this means that nobody should merge changes immediately before going to bed for the night. + +There may be discussion an even approvals granted outside of the above hours, but merges will generally be deferred. + +If a PR is considered complex or controversial, the merge of that PR should be delayed to give all interested parties in all timezones the opportunity to provide feedback. Concretely, this means that such PRs should be held for 24 +hours before merging. Of course "complex" and "controversial" are left to the judgment of the people involved, but we trust that part of being a committer is the judgment required to evaluate such things honestly, and not be +motivated by your desire (or your cube-mate's desire) to get their code merged. Also see "Holds" below, any reviewer can issue a "hold" to indicate that the PR is in fact complicated or complex and deserves further review. + +PRs that are incorrectly judged to be merge-able, may be reverted and subject to re-review, if subsequent reviewers believe that they in fact are controversial or complex. + + +## Holds + +Any maintainer or core contributor who wants to review a PR but does not have time immediately may put a hold on a PR simply by saying so on the PR discussion and offering an ETA measured in single-digit days at most. Any PR that has a hold shall not be merged until the person who requested the hold acks the review, withdraws their hold, or is overruled by a preponderance of maintainers. \ No newline at end of file diff --git a/_includes/docs/docs/devel/developer-guides/vagrant.md b/_includes/docs/docs/devel/developer-guides/vagrant.md new file mode 100644 index 0000000000..4a9bdc6a74 --- /dev/null +++ b/_includes/docs/docs/devel/developer-guides/vagrant.md @@ -0,0 +1,343 @@ + +Running kubernetes with Vagrant (and VirtualBox) is an easy way to run/test/develop on your local machine (Linux, Mac OS X). + +### Prerequisites + +1. Install latest version >= 1.6.2 of vagrant from http://www.vagrantup.com/downloads.html +2. Install one of: + 1. The latest version of Virtual Box from https://www.virtualbox.org/wiki/Downloads + 2. [VMWare Fusion](https://www.vmware.com/products/fusion/) version 5 or greater as well as the appropriate [Vagrant VMWare Fusion provider](https://www.vagrantup.com/vmware) + 3. [VMWare Workstation](https://www.vmware.com/products/workstation/) version 9 or greater as well as the [Vagrant VMWare Workstation provider](https://www.vagrantup.com/vmware) + 4. [Parallels Desktop](https://www.parallels.com/products/desktop/) version 9 or greater as well as the [Vagrant Parallels provider](https://parallels.github.io/vagrant-parallels/) +3. Get or build a [binary release](/{{page.version}}/docs/getting-started-guides/binary_release) + +### Setup + +By default, the Vagrant setup will create a single master VM (called kubernetes-master) and one node (called kubernetes-minion-1). Each VM will take 1 GB, so make sure you have at least 2GB to 4GB of free memory (plus appropriate free disk space). To start your local cluster, open a shell and run: + +```shell +cd kubernetes + +export KUBERNETES_PROVIDER=vagrant +./cluster/kube-up.sh +``` + +The `KUBERNETES_PROVIDER` environment variable tells all of the various cluster management scripts which variant to use. If you forget to set this, the assumption is you are running on Google Compute Engine. + +If you installed more than one Vagrant provider, Kubernetes will usually pick the appropriate one. However, you can override which one Kubernetes will use by setting the [`VAGRANT_DEFAULT_PROVIDER`](https://docs.vagrantup.com/v2/providers/default) environment variable: + +```shell +export VAGRANT_DEFAULT_PROVIDER=parallels +export KUBERNETES_PROVIDER=vagrant +./cluster/kube-up.sh +``` + +Vagrant will provision each machine in the cluster with all the necessary components to run Kubernetes. The initial setup can take a few minutes to complete on each machine. + +By default, each VM in the cluster is running Fedora, and all of the Kubernetes services are installed into systemd. + +To access the master or any node: + +```shell +vagrant ssh master +vagrant ssh minion-1 +``` + +If you are running more than one nodes, you can access the others by: + +```shell +vagrant ssh minion-2 +vagrant ssh minion-3 +``` + +To view the service status and/or logs on the kubernetes-master: + +```shell +$ vagrant ssh master +[vagrant@kubernetes-master ~] $ sudo systemctl status kube-apiserver +[vagrant@kubernetes-master ~] $ sudo journalctl -r -u kube-apiserver + +[vagrant@kubernetes-master ~] $ sudo systemctl status kube-controller-manager +[vagrant@kubernetes-master ~] $ sudo journalctl -r -u kube-controller-manager + +[vagrant@kubernetes-master ~] $ sudo systemctl status etcd +[vagrant@kubernetes-master ~] $ sudo systemctl status nginx +``` + +To view the services on any of the nodes: + +```shell +$ vagrant ssh minion-1 +[vagrant@kubernetes-minion-1] $ sudo systemctl status docker +[vagrant@kubernetes-minion-1] $ sudo journalctl -r -u docker +[vagrant@kubernetes-minion-1] $ sudo systemctl status kubelet +[vagrant@kubernetes-minion-1] $ sudo journalctl -r -u kubelet +``` + +### Interacting with your Kubernetes cluster with Vagrant. + +With your Kubernetes cluster up, you can manage the nodes in your cluster with the regular Vagrant commands. + +To push updates to new Kubernetes code after making source changes: + +```shell +./cluster/kube-push.sh +``` + +To stop and then restart the cluster: + +```shell +vagrant halt +./cluster/kube-up.sh +``` + +To destroy the cluster: + +```shell +vagrant destroy +``` + +Once your Vagrant machines are up and provisioned, the first thing to do is to check that you can use the `kubectl.sh` script. + +You may need to build the binaries first, you can do this with `make` + +```shell +$ ./cluster/kubectl.sh get nodes + +NAME LABELS STATUS +kubernetes-minion-0whl kubernetes.io/hostname=kubernetes-minion-0whl Ready +kubernetes-minion-4jdf kubernetes.io/hostname=kubernetes-minion-4jdf Ready +kubernetes-minion-epbe kubernetes.io/hostname=kubernetes-minion-epbe Ready +``` + +### Interacting with your Kubernetes cluster with the `kube-*` scripts. + +Alternatively to using the vagrant commands, you can also use the `cluster/kube-*.sh` scripts to interact with the vagrant based provider just like any other hosting platform for kubernetes. + +All of these commands assume you have set `KUBERNETES_PROVIDER` appropriately: + +```shell +export KUBERNETES_PROVIDER=vagrant +``` + +Bring up a vagrant cluster + +```shell +./cluster/kube-up.sh +``` + +Destroy the vagrant cluster + +```shell +./cluster/kube-down.sh +``` + +Update the vagrant cluster after you make changes (only works when building your own releases locally): + +```shell +./cluster/kube-push.sh +``` + +Interact with the cluster + +```shell +./cluster/kubectl.sh +``` + +### Authenticating with your master + +When using the vagrant provider in Kubernetes, the `cluster/kubectl.sh` script will cache your credentials in a `~/.kubernetes_vagrant_auth` file so you will not be prompted for them in the future. + +```shell +$ cat ~/.kubernetes_vagrant_auth +{ "User": "vagrant", + "Password": "vagrant" + "CAFile": "/home/k8s_user/.kubernetes.vagrant.ca.crt", + "CertFile": "/home/k8s_user/.kubecfg.vagrant.crt", + "KeyFile": "/home/k8s_user/.kubecfg.vagrant.key" +} +``` + +You should now be set to use the `cluster/kubectl.sh` script. For example try to list the nodes that you have started with: + +```shell +./cluster/kubectl.sh get nodes +``` + +### Running containers + +Your cluster is running, you can list the nodes in your cluster: + +```shell +$ ./cluster/kubectl.sh get nodes + +NAME LABELS STATUS +kubernetes-minion-0whl kubernetes.io/hostname=kubernetes-minion-0whl Ready +kubernetes-minion-4jdf kubernetes.io/hostname=kubernetes-minion-4jdf Ready +kubernetes-minion-epbe kubernetes.io/hostname=kubernetes-minion-epbe Ready +``` + +Now start running some containers! + +You can now use any of the cluster/kube-*.sh commands to interact with your VM machines. +Before starting a container there will be no pods, services and replication controllers. + +```shell +$ cluster/kubectl.sh get pods +NAME READY STATUS RESTARTS AGE + +$ cluster/kubectl.sh get services +NAME LABELS SELECTOR IP(S) PORT(S) + +$ cluster/kubectl.sh get rc +CONTROLLER CONTAINER(S) IMAGE(S) SELECTOR REPLICAS +``` + +Start a container running nginx with a replication controller and three replicas + +```shell +$ cluster/kubectl.sh run my-nginx --image=nginx --replicas=3 --port=80 +CONTROLLER CONTAINER(S) IMAGE(S) SELECTOR REPLICAS +my-nginx my-nginx nginx run=my-nginx 3 +``` + +When listing the pods, you will see that three containers have been started and are in Waiting state: + +```shell +$ cluster/kubectl.sh get pods +NAME READY STATUS RESTARTS AGE +my-nginx-389da 1/1 Waiting 0 33s +my-nginx-kqdjk 1/1 Waiting 0 33s +my-nginx-nyj3x 1/1 Waiting 0 33s +``` + +You need to wait for the provisioning to complete, you can monitor the minions by doing: + +```shell +$ sudo salt '*minion-1' cmd.run 'docker images' +kubernetes-minion-1: + REPOSITORY TAG IMAGE ID CREATED VIRTUAL SIZE + 96864a7d2df3 26 hours ago 204.4 MB + kubernetes/pause latest 6c4579af347b 8 weeks ago 239.8 kB +``` + +Once the docker image for nginx has been downloaded, the container will start and you can list it: + +```shell +$ sudo salt '*minion-1' cmd.run 'docker ps' +kubernetes-minion-1: + CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES + dbe79bf6e25b nginx:latest "nginx" 21 seconds ago Up 19 seconds k8s--mynginx.8c5b8a3a--7813c8bd_-_3ffe_-_11e4_-_9036_-_0800279696e1.etcd--7813c8bd_-_3ffe_-_11e4_-_9036_-_0800279696e1--fcfa837f + fa0e29c94501 kubernetes/pause:latest "/pause" 8 minutes ago Up 8 minutes 0.0.0.0:8080->80/tcp k8s--net.a90e7ce4--7813c8bd_-_3ffe_-_11e4_-_9036_-_0800279696e1.etcd--7813c8bd_-_3ffe_-_11e4_-_9036_-_0800279696e1--baf5b21b +``` + +Going back to listing the pods, services and replicationcontrollers, you now have: + +```shell +$ cluster/kubectl.sh get pods +NAME READY STATUS RESTARTS AGE +my-nginx-389da 1/1 Running 0 33s +my-nginx-kqdjk 1/1 Running 0 33s +my-nginx-nyj3x 1/1 Running 0 33s + +$ cluster/kubectl.sh get services +NAME LABELS SELECTOR IP(S) PORT(S) + +$ cluster/kubectl.sh get rc +NAME IMAGE(S) SELECTOR REPLICAS +my-nginx nginx run=my-nginx 3 +``` + +We did not start any services, hence there are none listed. But we see three replicas displayed properly. +Check the [guestbook](https://github.com/kubernetes/kubernetes/tree/{{page.githubbranch}}/examples/guestbook/) application to learn how to create a service. +You can already play with scaling the replicas with: + +```shell +$ ./cluster/kubectl.sh scale rc my-nginx --replicas=2 +$ ./cluster/kubectl.sh get pods +NAME READY STATUS RESTARTS AGE +my-nginx-kqdjk 1/1 Running 0 13m +my-nginx-nyj3x 1/1 Running 0 13m +``` + +Congratulations! + +### Testing + +The following will run all of the end-to-end testing scenarios assuming you set your environment in `cluster/kube-env.sh`: + +```shell +NUM_MINIONS=3 hack/e2e-test.sh +``` + +### Troubleshooting + +#### I keep downloading the same (large) box all the time! + +By default the Vagrantfile will download the box from S3. You can change this (and cache the box locally) by providing a name and an alternate URL when calling `kube-up.sh` + +```shell +export KUBERNETES_BOX_NAME=choose_your_own_name_for_your_kuber_box +export KUBERNETES_BOX_URL=path_of_your_kuber_box +export KUBERNETES_PROVIDER=vagrant +./cluster/kube-up.sh +``` + +#### I just created the cluster, but I am getting authorization errors! + +You probably have an incorrect ~/.kubernetes_vagrant_auth file for the cluster you are attempting to contact. + +```shell +rm ~/.kubernetes_vagrant_auth +``` + +After using kubectl.sh make sure that the correct credentials are set: + +```shell +$ cat ~/.kubernetes_vagrant_auth +{ + "User": "vagrant", + "Password": "vagrant" +} +``` + +#### I just created the cluster, but I do not see my container running! + +If this is your first time creating the cluster, the kubelet on each node schedules a number of docker pull requests to fetch prerequisite images. This can take some time and as a result may delay your initial pod getting provisioned. + +#### I changed Kubernetes code, but it's not running! + +Are you sure there was no build error? After running `$ vagrant provision`, scroll up and ensure that each Salt state was completed successfully on each box in the cluster. +It's very likely you see a build error due to an error in your source files! + +#### I have brought Vagrant up but the nodes won't validate! + +Are you sure you built a release first? Did you install `net-tools`? For more clues, login to one of the nodes (`vagrant ssh minion-1`) and inspect the salt minion log (`sudo cat /var/log/salt/minion`). + +#### I want to change the number of nodes! + +You can control the number of nodes that are instantiated via the environment variable `NUM_MINIONS` on your host machine. If you plan to work with replicas, we strongly encourage you to work with enough nodes to satisfy your largest intended replica size. If you do not plan to work with replicas, you can save some system resources by running with a single node. You do this, by setting `NUM_MINIONS` to 1 like so: + +```shell +export NUM_MINIONS=1 +``` + +#### I want my VMs to have more memory! + +You can control the memory allotted to virtual machines with the `KUBERNETES_MEMORY` environment variable. +Just set it to the number of megabytes you would like the machines to have. For example: + +```shell +export KUBERNETES_MEMORY=2048 +``` + +If you need more granular control, you can set the amount of memory for the master and nodes independently. For example: + +```shell +export KUBERNETES_MASTER_MEMORY=1536 +export KUBERNETES_MINION_MEMORY=2048 + +``` +#### I ran vagrant suspend and nothing works! + +`vagrant suspend` seems to mess up the network. It's not supported at this time. \ No newline at end of file diff --git a/_includes/docs/docs/devel/development.md b/_includes/docs/docs/devel/development.md new file mode 100644 index 0000000000..d3ffad2a34 --- /dev/null +++ b/_includes/docs/docs/devel/development.md @@ -0,0 +1,315 @@ + +# Releases and Official Builds + +Official releases are built in Docker containers. Details are [here](http://releases.k8s.io/{{page.githubbranch}}/build/README.md). You can do simple builds and development with just a local Docker installation. If want to build go locally outside of docker, please continue below. + +## Go development environment + +Kubernetes is written in [Go](http://golang.org) programming language. If you haven't set up Go development environment, please follow [this instruction](http://golang.org/doc/code) to install go tool and set up GOPATH. Ensure your version of Go is at least 1.3. + +## Git Setup + +Below, we outline one of the more common git workflows that core developers use. Other git workflows are also valid. + +### Visual overview + +![Git workflow](/images/docs/git_workflow.png) + +### Fork the main repository + +1. Go to https://github.com/kubernetes/kubernetes +2. Click the "Fork" button (at the top right) + +### Clone your fork + +The commands below require that you have $GOPATH set ([$GOPATH docs](https://golang.org/doc/code/#GOPATH)). We highly recommend you put Kubernetes' code into your GOPATH. Note: the commands below will not work if there is more than one directory in your `$GOPATH`. + +```shell +mkdir -p $GOPATH/src/k8s.io +cd $GOPATH/src/k8s.io +# Replace "$YOUR_GITHUB_USERNAME" below with your github username +git clone https://github.com/$YOUR_GITHUB_USERNAME/kubernetes.git +cd kubernetes +git remote add upstream 'https://github.com/kubernetes/kubernetes.git' +``` + +### Create a branch and make changes + +```shell +git checkout -b myfeature +# Make your code changes +``` + +### Keeping your development fork in sync + +```shell +git fetch upstream +git rebase upstream/master +``` + +Note: If you have write access to the main repository at github.com/kubernetes/kubernetes, you should modify your git configuration so that you can't accidentally push to upstream: + +```shell +git remote set-url --push upstream no_push +``` + +### Committing changes to your fork + +```shell +git commit +git push -f origin myfeature +``` + +### Creating a pull request + +1. Visit https://github.com/$YOUR_GITHUB_USERNAME/kubernetes +2. Click the "Compare and pull request" button next to your "myfeature" branch. +3. Check out the pull request [process](/{{page.version}}/docs/devel/pull-requests) for more details + +### When to retain commits and when to squash + +Upon merge, all git commits should represent meaningful milestones or units of +work. Use commits to add clarity to the development and review process. + +Before merging a PR, squash any "fix review feedback", "typo", and "rebased" +sorts of commits. It is not imperative that every commit in a PR compile and +pass tests independently, but it is worth striving for. For mass automated +fixups (e.g. automated doc formatting), use one or more commits for the +changes to tooling and a final commit to apply the fixup en masse. This makes +reviews much easier. + +See [Faster Reviews](/{{page.version}}/docs/devel/faster_reviews) for more details. + +## godep and dependency management + +Kubernetes uses [godep](https://github.com/tools/godep) to manage dependencies. It is not strictly required for building Kubernetes but it is required when managing dependencies under the Godeps/ tree, and is required by a number of the build and test scripts. Please make sure that ``godep`` is installed and in your ``$PATH``. + +### Installing godep + +There are many ways to build and host go binaries. Here is an easy way to get utilities like `godep` installed: + +1) Ensure that [mercurial](http://mercurial.selenic.com/wiki/Download) is installed on your system. (some of godep's dependencies use the mercurial +source control system). Use `apt-get install mercurial` or `yum install mercurial` on Linux, or [brew.sh](http://brew.sh) on OS X, or download +directly from mercurial. + +2) Create a new GOPATH for your tools and install godep: + +```shell +export GOPATH=$HOME/go-tools +mkdir -p $GOPATH +go get github.com/tools/godep +``` + +3) Add $GOPATH/bin to your path. Typically you'd add this to your ~/.profile: + +```shell +export GOPATH=$HOME/go-tools +export PATH=$PATH:$GOPATH/bin +``` + +### Using godep + +Here's a quick walkthrough of one way to use godeps to add or update a Kubernetes dependency into Godeps/_workspace. For more details, please see the instructions in [godep's documentation](https://github.com/tools/godep). + +1) Devote a directory to this endeavor: + +_Devoting a separate directory is not required, but it is helpful to separate dependency updates from other changes._ + +```shell +export KPATH=$HOME/code/kubernetes +mkdir -p $KPATH/src/k8s.io/kubernetes +cd $KPATH/src/k8s.io/kubernetes +git clone https://path/to/your/fork . +# Or copy your existing local repo here. IMPORTANT: making a symlink doesn't work. +``` + +2) Set up your GOPATH. + +```shell +# Option A: this will let your builds see packages that exist elsewhere on your system. +export GOPATH=$KPATH:$GOPATH +# Option B: This will *not* let your local builds see packages that exist elsewhere on your system. +export GOPATH=$KPATH +# Option B is recommended if you're going to mess with the dependencies. +``` + +3) Populate your new GOPATH. + +```shell +cd $KPATH/src/k8s.io/kubernetes +godep restore +``` + +4) Next, you can either add a new dependency or update an existing one. + +```shell +# To add a new dependency, do: +cd $KPATH/src/k8s.io/kubernetes +go get path/to/dependency +# Change code in Kubernetes to use the dependency. +godep save ./... +# To update an existing dependency, do: +cd $KPATH/src/k8s.io/kubernetes +go get -u path/to/dependency +# Change code in Kubernetes accordingly if necessary. +godep update path/to/dependency/... +``` + +_If `go get -u path/to/dependency` fails with compilation errors, instead try `go get -d -u path/to/dependency` +to fetch the dependencies without compiling them. This can happen when updating the cadvisor dependency._ + + +5) Before sending your PR, it's a good idea to sanity check that your Godeps.json file is ok by running hack/verify-godeps.sh + +_If hack/verify-godeps.sh fails after a `godep update`, it is possible that a transitive dependency was added or removed but not +updated by godeps. It then may be necessary to perform a `godep save ./...` to pick up the transitive dependency changes._ + +It is sometimes expedient to manually fix the /Godeps/godeps.json file to minimize the changes. + +Please send dependency updates in separate commits within your PR, for easier reviewing. + +## Hooks + +Before committing any changes, please link/copy these hooks into your .git +directory. This will keep you from accidentally committing non-gofmt'd go code. + +```shell +cd kubernetes/.git/hooks/ +ln -s ../../hooks/pre-commit . +``` + +## Unit tests + +```shell +cd kubernetes +hack/test-go.sh +``` + +Alternatively, you could also run: + +```shell +cd kubernetes +godep go test ./... +``` + +If you only want to run unit tests in one package, you could run ``godep go test`` under the package directory. For example, the following commands will run all unit tests in package kubelet: + +```shell +$ cd kubernetes # step into the kubernetes directory. +$ cd pkg/kubelet +$ godep go test +# some output from unit tests +PASS +ok k8s.io/kubernetes/pkg/kubelet 0.317s +``` + +## Coverage + +Currently, collecting coverage is only supported for the Go unit tests. + +To run all unit tests and generate an HTML coverage report, run the following: + +```shell +cd kubernetes +KUBE_COVER=y hack/test-go.sh +``` + +At the end of the run, an the HTML report will be generated with the path printed to stdout. + +To run tests and collect coverage in only one package, pass its relative path under the `kubernetes` directory as an argument, for example: + +```shell +cd kubernetes +KUBE_COVER=y hack/test-go.sh pkg/kubectl +``` + +Multiple arguments can be passed, in which case the coverage results will be combined for all tests run. + +Coverage results for the project can also be viewed on [Coveralls](https://coveralls.io/r/kubernetes/kubernetes), and are continuously updated as commits are merged. Additionally, all pull requests which spawn a Travis build will report unit test coverage results to Coveralls. Coverage reports from before the Kubernetes Github organization was created can be found [here](https://coveralls.io/r/GoogleCloudPlatform/kubernetes). + +## Integration tests + +You need an [etcd](https://github.com/coreos/etcd/releases/tag/v2.0.0) in your path, please make sure it is installed and in your ``$PATH``. + +```shell +cd kubernetes +hack/test-integration.sh +``` + +## End-to-End tests + +You can run an end-to-end test which will bring up a master and two nodes, perform some tests, and then tear everything down. Make sure you have followed the getting started steps for your chosen cloud platform (which might involve changing the `KUBERNETES_PROVIDER` environment variable to something other than "gce". + +```shell +cd kubernetes +hack/e2e-test.sh +``` + +Pressing control-C should result in an orderly shutdown but if something goes wrong and you still have some VMs running you can force a cleanup with this command: + +```shell +go run hack/e2e.go --down +``` + +### Flag options + +See the flag definitions in `hack/e2e.go` for more options, such as reusing an existing cluster, here is an overview: + +```shell +# Build binaries for testing +go run hack/e2e.go --build +# Create a fresh cluster. Deletes a cluster first, if it exists +go run hack/e2e.go --up +# Create a fresh cluster at a specific release version. +go run hack/e2e.go --up --version=0.7.0 +# Test if a cluster is up. +go run hack/e2e.go --isup +# Push code to an existing cluster +go run hack/e2e.go --push +# Push to an existing cluster, or bring up a cluster if it's down. +go run hack/e2e.go --pushup +# Run all tests +go run hack/e2e.go --test +# Run tests matching the regex "Pods.*env" +go run hack/e2e.go -v -test --test_args="--ginkgo.focus=Pods.*env" +# Alternately, if you have the e2e cluster up and no desire to see the event stream, you can run ginkgo-e2e.sh directly: +hack/ginkgo-e2e.sh --ginkgo.focus=Pods.*env +``` + +### Combining flags + +```shell +# Flags can be combined, and their actions will take place in this order: +# -build, -push|-up|-pushup, -test|-tests=..., -down +# e.g.: +go run hack/e2e.go -build -pushup -test -down +# -v (verbose) can be added if you want streaming output instead of only +# seeing the output of failed commands. +# -ctl can be used to quickly call kubectl against your e2e cluster. Useful for +# cleaning up after a failed test or viewing logs. Use -v to avoid suppressing +# kubectl output. +go run hack/e2e.go -v -ctl='get events' +go run hack/e2e.go -v -ctl='delete pod foobar' +``` + +## Conformance testing + +End-to-end testing, as described above, is for [development +distributions](/{{page.version}}/docs/devel/writing-a-getting-started-guide). A conformance test is used on +a [versioned distro](/{{page.version}}/docs/devel/writing-a-getting-started-guide). + +The conformance test runs a subset of the e2e-tests against a manually-created cluster. It does not +require support for up/push/down and other operations. To run a conformance test, you need to know the +IP of the master for your cluster and the authorization arguments to use. The conformance test is +intended to run against a cluster at a specific binary release of Kubernetes. +See [conformance-test.sh](http://releases.k8s.io/{{page.githubbranch}}/hack/conformance-test.sh). + +## Testing out flaky tests + +[Instructions here](/{{page.version}}/docs/devel/flaky-tests) + +## Regenerating the CLI documentation + +```shell +hack/update-generated-docs.sh +``` \ No newline at end of file diff --git a/_includes/docs/docs/devel/e2e-tests.md b/_includes/docs/docs/devel/e2e-tests.md new file mode 100644 index 0000000000..3855af548d --- /dev/null +++ b/_includes/docs/docs/devel/e2e-tests.md @@ -0,0 +1,110 @@ + +The end-2-end tests for kubernetes provide a mechanism to test behavior of the system, and to ensure end user operations match developer specifications. In distributed systems it is not uncommon that a minor change may pass all unit tests, but cause unforseen changes at the system level. Thus, the primary objectives of the end-2-end tests are to ensure a consistent and reliable behavior of the kubernetes code base, and to catch bugs early. + +The end-2-end tests in kubernetes are built atop of [ginkgo] (http://onsi.github.io/ginkgo/) and [gomega] (http://onsi.github.io/gomega/). There are a host of features that this BDD testing framework provides, and it is recommended that the developer read the documentation prior to diving into the tests. + +The purpose of *this* document is to serve as a primer for developers who are looking to execute, or add tests, using a local development environment. + +## Building and Running the Tests + +**NOTE:** The tests have an array of options. For simplicity, the examples will focus on leveraging the tests on a local cluster using `sudo ./hack/local-up-cluster.sh` + +### Building the Tests + +The tests are built into a single binary which can be run against any deployed kubernetes system. To build the tests, navigate to your source directory and execute: + +`$ make all` + +The output for the end-2-end tests will be a single binary called `e2e.test` under the default output directory, which is typically `_output/local/bin/linux/amd64/`. Within the repository there are scripts that are provided under the `./hack` directory that are helpful for automation, but may not apply for a local development purposes. Instead, we recommend familiarizing yourself with the executable options. To obtain the full list of options, run the following: + +`$ ./e2e.test --help` + +### Running the Tests + +For the purposes of brevity, we will look at a subset of the options, which are listed below: + +```shell +-ginkgo.dryRun=false: If set, ginkgo will walk the test hierarchy without actually running anything. Best paired with -v. +-ginkgo.failFast=false: If set, ginkgo will stop running a test suite after a failure occurs. +-ginkgo.failOnPending=false: If set, ginkgo will mark the test suite as failed if any specs are pending. +-ginkgo.focus="": If set, ginkgo will only run specs that match this regular expression. +-ginkgo.skip="": If set, ginkgo will only run specs that do not match this regular expression. +-ginkgo.trace=false: If set, default reporter prints out the full stack trace when a failure occurs +-ginkgo.v=false: If set, default reporter print out all specs as they begin. +-host="": The host, or api-server, to connect to +-kubeconfig="": Path to kubeconfig containing embedded authinfo. +-prom-push-gateway="": The URL to prometheus gateway, so that metrics can be pushed during e2es and scraped by prometheus. Typically something like 127.0.0.1:9091. +-provider="": The name of the Kubernetes provider (gce, gke, local, vagrant, etc.) +-repo-root="../../": Root directory of kubernetes repository, for finding test files. +``` + +Prior to running the tests, it is recommended that you first create a simple auth file in your home directory, e.g. `$HOME/.kubernetes_auth` , with the following: + +```json +{ + "User": "root", + "Password": "" +} +``` + +Next, you will need a cluster that you can test against. As mentioned earlier, you will want to execute `sudo ./hack/local-up-cluster.sh`. To get a sense of what tests exist, you may want to run: + +```shell +e2e.test --host="127.0.0.1:8080" --provider="local" --ginkgo.v=true -ginkgo.dryRun=true --kubeconfig="$HOME/.kubernetes_auth" --repo-root="$KUBERNETES_SRC_PATH" +``` + +If you wish to execute a specific set of tests you can use the `-ginkgo.focus=` regex, e.g.: + +```shell +e2e.test ... --ginkgo.focus="DNS|(?i)nodeport(?-i)|kubectl guestbook" +``` + +Conversely, if you wish to exclude a set of tests, you can run: + +```shell +e2e.test ... --ginkgo.skip="Density|Scale" +``` + +As mentioned earlier there are a host of other options that are available, but are left to the developer + +**NOTE:** If you are running tests on a local cluster repeatedly, you may need to periodically perform some manual cleanup. +- `rm -rf /var/run/kubernetes`, clear kube generated credentials, sometimes stale permissions can cause problems. +- `sudo iptables -F`, clear ip tables rules left by the kube-proxy. + +## Adding a New Test + +As mentioned above, prior to adding a new test, it is a good idea to perform a `-ginkgo.dryRun=true` on the system, in order to see if a behavior is already being tested, or to determine if it may be possible to augment an existing set of tests for a specific use case. + +If a behavior does not currently have coverage and a developer wishes to add a new e2e test, navigate to the ./test/e2e directory and create a new test using the existing suite as a guide. + +**TODO:** Create a self-documented example which has been disabled, but can be copied to create new tests and outlines the capabilities and libraries used. + +## Performance Evaluation + +Another benefit of the end-2-end tests is the ability to create reproducible loads on the system, which can then be used to determine the responsiveness, or analyze other characteristics of the system. For example, the density tests load the system to 30,50,100 pods per/node and measures the different characteristics of the system, such as throughput, api-latency, etc. + +For a good overview of how we analyze performance data, please read the following [post](http://blog.kubernetes.io/2015/09/kubernetes-performance-measurements-and) + +For developers who are interested in doing their own performance analysis, we recommend setting up [prometheus](http://prometheus.io/) for data collection, and using [promdash](http://prometheus.io/docs/visualization/promdash/) to visualize the data. There also exists the option of pushing your own metrics in from the tests using a [prom-push-gateway](http://prometheus.io/docs/instrumenting/pushing/). Containers for all of these components can be found [here](https://hub.docker.com/u/prom/). + +For more accurate measurements, you may wish to set up prometheus external to kubernetes in an environment where it can access the major system components (api-server, controller-manager, scheduler). This is especially useful when attempting to gather metrics in a load-balanced api-server environment, because all api-servers can be analyzed independently as well as collectively. On startup, configuration file is passed to prometheus that specifies the endpoints that prometheus will scrape, as well as the sampling interval. + +**prometheus.conf** + +```conf +job: { + name: "kubernetes" + scrape_interval: "1s" + target_group: { + # apiserver(s) + target: "http://localhost:8080/metrics" + # scheduler + target: "http://localhost:10251/metrics" + # controller-manager + target: "http://localhost:10252/metrics" + } +``` + +Once prometheus is scraping the kubernetes endpoints, that data can then be plotted using promdash, and alerts can be created against the assortment of metrics that kubernetes provides. + +**HAPPY TESTING!** diff --git a/_includes/docs/docs/devel/faster_reviews.md b/_includes/docs/docs/devel/faster_reviews.md new file mode 100644 index 0000000000..f5d82cb4c6 --- /dev/null +++ b/_includes/docs/docs/devel/faster_reviews.md @@ -0,0 +1,200 @@ + +Most of what is written here is not at all specific to Kubernetes, but it bears +being written down in the hope that it will occasionally remind people of "best +practices" around code reviews. + +You've just had a brilliant idea on how to make Kubernetes better. Let's call +that idea "FeatureX". Feature X is not even that complicated. You have a +pretty good idea of how to implement it. You jump in and implement it, fixing a +bunch of stuff along the way. You send your PR - this is awesome! And it sits. +And sits. A week goes by and nobody reviews it. Finally someone offers a few +comments, which you fix up and wait for more review. And you wait. Another +week or two goes by. This is horrible. + +What went wrong? One particular problem that comes up frequently is this - your +PR is too big to review. You've touched 39 files and have 8657 insertions. +When your would-be reviewers pull up the diffs they run away - this PR is going +to take 4 hours to review and they don't have 4 hours right now. They'll get to it +later, just as soon as they have more free time (ha!). + +Let's talk about how to avoid this. + +## 0. Familiarize yourself with project conventions + +* [Development guide](/{{page.version}}/docs/devel/development) +* [Coding conventions](/{{page.version}}/docs/devel/coding-conventions) +* [API conventions](/{{page.version}}/docs/devel/api-conventions) +* [Kubectl conventions](/{{page.version}}/docs/devel/kubectl-conventions) + +## 1. Don't build a cathedral in one PR + +Are you sure FeatureX is something the Kubernetes team wants or will accept, or +that it is implemented to fit with other changes in flight? Are you willing to +bet a few days or weeks of work on it? If you have any doubt at all about the +usefulness of your feature or the design - make a proposal doc (in docs/proposals; +for example [the QoS proposal](http://prs.k8s.io/11713)) or a sketch PR (e.g., just +the API or Go interface) or both. Write or code up just enough to express the idea +and the design and why you made those choices, then get feedback on this. Be clear +about what type of feedback you are asking for. Now, if we ask you to change a +bunch of facets of the design, you won't have to re-write it all. + +## 2. Smaller diffs are exponentially better + +Small PRs get reviewed faster and are more likely to be correct than big ones. +Let's face it - attention wanes over time. If your PR takes 60 minutes to +review, I almost guarantee that the reviewer's eye for details is not as keen in +the last 30 minutes as it was in the first. This leads to multiple rounds of +review when one might have sufficed. In some cases the review is delayed in its +entirety by the need for a large contiguous block of time to sit and read your +code. + +Whenever possible, break up your PRs into multiple commits. Making a series of +discrete commits is a powerful way to express the evolution of an idea or the +different ideas that make up a single feature. There's a balance to be struck, +obviously. If your commits are too small they become more cumbersome to deal +with. Strive to group logically distinct ideas into commits. + +For example, if you found that FeatureX needed some "prefactoring" to fit in, +make a commit that JUST does that prefactoring. Then make a new commit for +FeatureX. Don't lump unrelated things together just because you didn't think +about prefactoring. If you need to, fork a new branch, do the prefactoring +there and send a PR for that. If you can explain why you are doing seemingly +no-op work ("it makes the FeatureX change easier, I promise") we'll probably be +OK with it. + +Obviously, a PR with 25 commits is still very cumbersome to review, so use +common sense. + +## 3. Multiple small PRs are often better than multiple commits + +If you can extract whole ideas from your PR and send those as PRs of their own, +you can avoid the painful problem of continually rebasing. Kubernetes is a +fast-moving codebase - lock in your changes ASAP, and make merges be someone +else's problem. + +Obviously, we want every PR to be useful on its own, so you'll have to use +common sense in deciding what can be a PR vs what should be a commit in a larger +PR. Rule of thumb - if this commit or set of commits is directly related to +FeatureX and nothing else, it should probably be part of the FeatureX PR. If +you can plausibly imagine someone finding value in this commit outside of +FeatureX, try it as a PR. + +Don't worry about flooding us with PRs. We'd rather have 100 small, obvious PRs +than 10 unreviewable monoliths. + +## 4. Don't rename, reformat, comment, etc in the same PR + +Often, as you are implementing FeatureX, you find things that are just wrong. +Bad comments, poorly named functions, bad structure, weak type-safety. You +should absolutely fix those things (or at least file issues, please) - but not +in this PR. See the above points - break unrelated changes out into different +PRs or commits. Otherwise your diff will have WAY too many changes, and your +reviewer won't see the forest because of all the trees. + +## 5. Comments matter + +Read up on GoDoc - follow those general rules. If you're writing code and you +think there is any possible chance that someone might not understand why you did +something (or that you won't remember what you yourself did), comment it. If +you think there's something pretty obvious that we could follow up on, add a +TODO. Many code-review comments are about this exact issue. + +## 5. Tests are almost always required + +Nothing is more frustrating than doing a review, only to find that the tests are +inadequate or even entirely absent. Very few PRs can touch code and NOT touch +tests. If you don't know how to test FeatureX - ask! We'll be happy to help +you design things for easy testing or to suggest appropriate test cases. + +## 6. Look for opportunities to generify + +If you find yourself writing something that touches a lot of modules, think hard +about the dependencies you are introducing between packages. Can some of what +you're doing be made more generic and moved up and out of the FeatureX package? +Do you need to use a function or type from an otherwise unrelated package? If +so, promote! We have places specifically for hosting more generic code. + +Likewise if FeatureX is similar in form to FeatureW which was checked in last +month and it happens to exactly duplicate some tricky stuff from FeatureW, +consider prefactoring core logic out and using it in both FeatureW and FeatureX. +But do that in a different commit or PR, please. + +## 7. Fix feedback in a new commit + +Your reviewer has finally sent you some feedback on FeatureX. You make a bunch +of changes and ... what? You could patch those into your commits with git +"squash" or "fixup" logic. But that makes your changes hard to verify. Unless +your whole PR is pretty trivial, you should instead put your fixups into a new +commit and re-push. Your reviewer can then look at that commit on its own - so +much faster to review than starting over. + +We might still ask you to clean up your commits at the very end, for the sake +of a more readable history, but don't do this until asked, typically at the point +where the PR would otherwise be tagged LGTM. + +General squashing guidelines: + +* Sausage => squash + + When there are several commits to fix bugs in the original commit(s), address reviewer feedback, etc. Really we only want to see the end state and commit message for the whole PR. + +* Layers => don't squash + + When there are independent changes layered upon each other to achieve a single goal. For instance, writing a code munger could be one commit, applying it could be another, and adding a precommit check could be a third. One could argue they should be separate PRs, but there's really no way to test/review the munger without seeing it applied, and there needs to be a precommit check to ensure the munged output doesn't immediately get out of date. + +A commit, as much as possible, should be a single logical change. Each commit should always have a good title line (<70 characters) and include an additional description paragraph describing in more detail the change intended. Do not link pull requests by `#` in a commit description, because GitHub creates lots of spam. Instead, reference other PRs via the PR your commit is in. + +## 8. KISS, YAGNI, MVP, etc + +Sometimes we need to remind each other of core tenets of software design - Keep +It Simple, You Aren't Gonna Need It, Minimum Viable Product, and so on. Adding +features "because we might need it later" is antithetical to software that +ships. Add the things you need NOW and (ideally) leave room for things you +might need later - but don't implement them now. + +## 9. Push back + +We understand that it is hard to imagine, but sometimes we make mistakes. It's +OK to push back on changes requested during a review. If you have a good reason +for doing something a certain way, you are absolutely allowed to debate the +merits of a requested change. You might be overruled, but you might also +prevail. We're mostly pretty reasonable people. Mostly. + +## 10. I'm still getting stalled - help?! + +So, you've done all that and you still aren't getting any PR love? Here's some +things you can do that might help kick a stalled process along: + + * Make sure that your PR has an assigned reviewer (assignee in GitHub). If + this is not the case, reply to the PR comment stream asking for one to be + assigned. + + * Ping the assignee (@username) on the PR comment stream asking for an + estimate of when they can get to it. + + * Ping the assignee by email (many of us have email addresses that are well + published or are the same as our GitHub handle @google.com or @redhat.com). + + * Ping the [team](https://github.com/orgs/kubernetes/teams) (via @team-name) + that works in the area you're submitting code. + +If you think you have fixed all the issues in a round of review, and you haven't +heard back, you should ping the reviewer (assignee) on the comment stream with a +"please take another look" (PTAL) or similar comment indicating you are done and +you think it is ready for re-review. In fact, this is probably a good habit for +all PRs. + +One phenomenon of open-source projects (where anyone can comment on any issue) +is the dog-pile - your PR gets so many comments from so many people it becomes +hard to follow. In this situation you can ask the primary reviewer +(assignee) whether they want you to fork a new PR to clear out all the comments. +Remember: you don't HAVE to fix every issue raised by every person who feels +like commenting, but you should at least answer reasonable comments with an +explanation. + +## Final: Use common sense + +Obviously, none of these points are hard rules. There is no document that can +take the place of common sense and good taste. Use your best judgment, but put +a bit of thought into how your work can be made easier to review. If you do +these things your PRs will flow much more easily. \ No newline at end of file diff --git a/_includes/docs/docs/devel/flaky-tests.md b/_includes/docs/docs/devel/flaky-tests.md new file mode 100644 index 0000000000..596639f0d0 --- /dev/null +++ b/_includes/docs/docs/devel/flaky-tests.md @@ -0,0 +1,64 @@ + +Sometimes unit tests are flaky. This means that due to (usually) race conditions, they will occasionally fail, even though most of the time they pass. + +We have a goal of 99.9% flake free tests. This means that there is only one flake in one thousand runs of a test. + +Running a test 1000 times on your own machine can be tedious and time consuming. Fortunately, there is a better way to achieve this using Kubernetes. + +_Note: these instructions are mildly hacky for now, as we get run once semantics and logging they will get better_ + +There is a testing image `brendanburns/flake` up on the docker hub. We will use this image to test our fix. + +Create a replication controller with the following config: + +```yaml +apiVersion: v1 +kind: ReplicationController +metadata: + name: flakecontroller +spec: + replicas: 24 + template: + metadata: + labels: + name: flake + spec: + containers: + - name: flake + image: brendanburns/flake + env: + - name: TEST_PACKAGE + value: pkg/tools + - name: REPO_SPEC + value: https://github.com/kubernetes/kubernetes +``` + +Note that we omit the labels and the selector fields of the replication controller, because they will be populated from the labels field of the pod template by default. + +```shell +kubectl create -f ./controller.yaml +``` + +This will spin up 24 instances of the test. They will run to completion, then exit, and the kubelet will restart them, accumulating more and more runs of the test. +You can examine the recent runs of the test by calling `docker ps -a` and looking for tasks that exited with non-zero exit codes. Unfortunately, docker ps -a only keeps around the exit status of the last 15-20 containers with the same image, so you have to check them frequently. +You can use this script to automate checking for failures, assuming your cluster is running on GCE and has four nodes: + +```shell +echo "" > output.txt +for i in {1..4}; do + echo "Checking kubernetes-minion-${i}" + echo "kubernetes-minion-${i}:" >> output.txt + gcloud compute ssh "kubernetes-minion-${i}" --command="sudo docker ps -a" >> output.txt +done +grep "Exited ([^0])" output.txt +``` + +Eventually you will have sufficient runs for your purposes. At that point you can stop and delete the replication controller by running: + +```shell +kubectl stop replicationcontroller flakecontroller +``` + +If you do a final check for flakes with `docker ps -a`, ignore tasks that exited -1, since that's what happens when you stop the replication controller. + +Happy flake hunting! \ No newline at end of file diff --git a/_includes/docs/docs/devel/getting-builds.md b/_includes/docs/docs/devel/getting-builds.md new file mode 100644 index 0000000000..56e57d7f88 --- /dev/null +++ b/_includes/docs/docs/devel/getting-builds.md @@ -0,0 +1,31 @@ + +You can use [hack/get-build.sh](http://releases.k8s.io/{{page.githubbranch}}/hack/get-build.sh) to or use as a reference on how to get the most recent builds with curl. With `get-build.sh` you can grab the most recent stable build, the most recent release candidate, or the most recent build to pass our ci and gce e2e tests (essentially a nightly build). + +Run `./hack/get-build.sh -h` for its usage. + +For example, to get a build at a specific version (v1.0.2): + +```shell +./hack/get-build.sh v1.0.2 +``` + +Alternatively, to get the latest stable release: + +```shell +./hack/get-build.sh release/stable +``` + +Finally, you can just print the latest or stable version: + +```shell +./hack/get-build.sh -v ci/latest +``` + +You can also use the gsutil tool to explore the Google Cloud Storage release buckets. Here are some examples: + +```shell +gsutil cat gs://kubernetes-release/ci/latest.txt # output the latest ci version number +gsutil cat gs://kubernetes-release/ci/latest-green.txt # output the latest ci version number that passed gce e2e +gsutil ls gs://kubernetes-release/ci/v0.20.0-29-g29a55cc/ # list the contents of a ci release +gsutil ls gs://kubernetes-release/release # list all official releases and rcs +``` diff --git a/_includes/docs/docs/devel/index.md b/_includes/docs/docs/devel/index.md new file mode 100644 index 0000000000..e2683f07dc --- /dev/null +++ b/_includes/docs/docs/devel/index.md @@ -0,0 +1,76 @@ + +The developer guide is for anyone wanting to either write code which directly accesses the +Kubernetes API, or to contribute directly to the Kubernetes project. +It assumes some familiarity with concepts in the [User Guide](/{{page.version}}/docs/user-guide/) and the [Cluster Admin +Guide](/{{page.version}}/docs/admin/). + + +## The process of developing and contributing code to the Kubernetes project + +* **On Collaborative Development** ([collab.md](/{{page.version}}/docs/devel/collab)): Info on pull requests and code reviews. + +* **GitHub Issues** ([issues.md](/{{page.version}}/docs/devel/issues)): How incoming issues are reviewed and prioritized. + +* **Pull Request Process** ([pull-requests.md](/{{page.version}}/docs/devel/pull-requests)): When and why pull requests are closed. + +* **Faster PR reviews** ([faster_reviews.md](/{{page.version}}/docs/devel/faster_reviews)): How to get faster PR reviews. + +* **Getting Recent Builds** ([getting-builds.md](/{{page.version}}/docs/devel/getting-builds)): How to get recent builds including the latest builds that pass CI. + +* **Automated Tools** ([automation.md](/{{page.version}}/docs/devel/automation)): Descriptions of the automation that is running on our github repository. + + +## Setting up your dev environment, coding, and debugging + +* **Development Guide** ([development.md](/{{page.version}}/docs/devel/development)): Setting up your development environment. + +* **Hunting flaky tests** ([flaky-tests.md](/{{page.version}}/docs/devel/flaky-tests)): We have a goal of 99.9% flake free tests. + Here's how to run your tests many times. + +* **Logging Conventions** ([logging.md](/{{page.version}}/docs/devel/logging)]: Glog levels. + +* **Profiling Kubernetes** ([profiling.md](/{{page.version}}/docs/devel/profiling)): How to plug in go pprof profiler to Kubernetes. + +* **Instrumenting Kubernetes with a new metric** + ([instrumentation.md](/{{page.version}}/docs/devel/instrumentation)): How to add a new metrics to the + Kubernetes code base. + +* **Coding Conventions** ([coding-conventions.md](/{{page.version}}/docs/devel/coding-conventions)): + Coding style advice for contributors. + + +## Developing against the Kubernetes API + +* API objects are explained at [http://kubernetes.io/third_party/swagger-ui/](http://kubernetes.io/third_party/swagger-ui/). + +* **Annotations** ([docs/user-guide/annotations.md](/{{page.version}}/docs/user-guide/annotations)): are for attaching arbitrary non-identifying metadata to objects. + Programs that automate Kubernetes objects may use annotations to store small amounts of their state. + +* **API Conventions** ([api-conventions.md](/{{page.version}}/docs/devel/api-conventions)): + Defining the verbs and resources used in the Kubernetes API. + +* **API Client Libraries** ([client-libraries.md](/{{page.version}}/docs/devel/client-libraries)): + A list of existing client libraries, both supported and user-contributed. + + +## Writing plugins + +* **Authentication Plugins** ([docs/admin/authentication.md](/{{page.version}}/docs/admin/authentication)): + The current and planned states of authentication tokens. + +* **Authorization Plugins** ([docs/admin/authorization.md](/{{page.version}}/docs/admin/authorization)): + Authorization applies to all HTTP requests on the main apiserver port. + This doc explains the available authorization implementations. + +* **Admission Control Plugins** ([admission_control](https://github.com/kubernetes/kubernetes/blob/{{page.githubbranch}}/docs/design/admission_control.md)) + + +## Building releases + +* **Making release notes** ([making-release-notes.md](/{{page.version}}/docs/devel/making-release-notes)): Generating release nodes for a new release. + +* **Releasing Kubernetes** ([releasing.md](/{{page.version}}/docs/devel/releasing)): How to create a Kubernetes release (as in version) + and how the version information gets embedded into the built binaries. + + + diff --git a/_includes/docs/docs/devel/instrumentation.md b/_includes/docs/docs/devel/instrumentation.md new file mode 100644 index 0000000000..82ebe4329b --- /dev/null +++ b/_includes/docs/docs/devel/instrumentation.md @@ -0,0 +1,34 @@ + +The following is a step-by-step guide for adding a new metric to the Kubernetes code base. + +We use the Prometheus monitoring system's golang client library for instrumenting our code. Once you've picked out a file that you want to add a metric to, you should: + +1. Import "github.com/prometheus/client_golang/prometheus". + +2. Create a top-level var to define the metric. For this, you have to: + 1. Pick the type of metric. Use a Gauge for things you want to set to a particular value, a Counter for things you want to increment, or a Histogram or Summary for histograms/distributions of values (typically for latency). Histograms are better if you're going to aggregate the values across jobs, while summaries are better if you just want the job to give you a useful summary of the values. + 2. Give the metric a name and description. + 3. Pick whether you want to distinguish different categories of things using labels on the metric. If so, add "Vec" to the name of the type of metric you want and add a slice of the label names to the definition. + + https://github.com/kubernetes/kubernetes/blob/cd3299307d44665564e1a5c77d0daa0286603ff5/pkg/apiserver/apiserver.go#L53 + https://github.com/kubernetes/kubernetes/blob/cd3299307d44665564e1a5c77d0daa0286603ff5/pkg/kubelet/metrics/metrics.go#L31 + +3. Register the metric so that prometheus will know to export it. + + https://github.com/kubernetes/kubernetes/blob/cd3299307d44665564e1a5c77d0daa0286603ff5/pkg/kubelet/metrics/metrics.go#L74 + https://github.com/kubernetes/kubernetes/blob/cd3299307d44665564e1a5c77d0daa0286603ff5/pkg/apiserver/apiserver.go#L78 + +4. Use the metric by calling the appropriate method for your metric type (Set, Inc/Add, or Observe, respectively for Gauge, Counter, or Histogram/Summary), first calling WithLabelValues if your metric has any labels + + https://github.com/kubernetes/kubernetes/blob/3ce7fe8310ff081dbbd3d95490193e1d5250d2c9/pkg/kubelet/kubelet.go#L1384 + https://github.com/kubernetes/kubernetes/blob/cd3299307d44665564e1a5c77d0daa0286603ff5/pkg/apiserver/apiserver.go#L87 + + +These are the metric type definitions if you're curious to learn about them or need more information: +https://github.com/prometheus/client_golang/blob/master/prometheus/gauge.go +https://github.com/prometheus/client_golang/blob/master/prometheus/counter.go +https://github.com/prometheus/client_golang/blob/master/prometheus/histogram.go +https://github.com/prometheus/client_golang/blob/master/prometheus/summary.go + + + diff --git a/_includes/docs/docs/devel/issues.md b/_includes/docs/docs/devel/issues.md new file mode 100644 index 0000000000..8ade1ca424 --- /dev/null +++ b/_includes/docs/docs/devel/issues.md @@ -0,0 +1,16 @@ + +A list quick overview of how we will review and prioritize incoming issues at https://github.com/kubernetes/kubernetes/issues + +## Priorities + +We will use GitHub issue labels for prioritization. The absence of a priority label means the bug has not been reviewed and prioritized yet. + +## Definitions + +* P0 - something broken for users, build broken, or critical security issue. Someone must drop everything and work on it. +* P1 - must fix for earliest possible binary release (every two weeks) +* P2 - should be fixed in next major release version +* P3 - default priority for lower importance bugs that we still want to track and plan to fix at some point +* design - priority/design is for issues that are used to track design discussions +* support - priority/support is used for issues tracking user support requests +* untriaged - anything without a priority/X label will be considered untriaged \ No newline at end of file diff --git a/_includes/docs/docs/devel/kubectl-conventions.md b/_includes/docs/docs/devel/kubectl-conventions.md new file mode 100644 index 0000000000..1dcdf06db5 --- /dev/null +++ b/_includes/docs/docs/devel/kubectl-conventions.md @@ -0,0 +1,96 @@ + +Updated: 8/27/2015 + +* TOC +{:toc} + + +## Principles + +* Strive for consistency across commands +* Explicit should always override implicit + * Environment variables should override default values + * Command-line flags should override default values and environment variables + * --namespace should also override the value specified in a specified resource + +## Command conventions + +* Command names are all lowercase, and hyphenated if multiple words. +* kubectl VERB NOUNs for commands that apply to multiple resource types +* NOUNs may be specified as TYPE name1 name2 ... or TYPE/name1 TYPE/name2; TYPE is omitted when only a single type is expected +* Resource types are all lowercase, with no hyphens; both singular and plural forms are accepted +* NOUNs may also be specified by one or more file arguments: -f file1 -f file2 ... +* Resource types may have 2- or 3-letter aliases. +* Business logic should be decoupled from the command framework, so that it can be reused independently of kubectl, cobra, etc. + * Ideally, commonly needed functionality would be implemented server-side in order to avoid problems typical of "fat" clients and to make it readily available to non-Go clients +* Commands that generate resources, such as `run` or `expose`, should obey the following conventions: + * Flags should be converted to a parameter Go map or json map prior to invoking the generator + * The generator must be versioned so that users depending on a specific behavior may pin to that version, via `--generator=` + * Generation should be decoupled from creation + * `--dry-run` should output the resource that would be created, without creating it +* A command group (e.g., `kubectl config`) may be used to group related non-standard commands, such as custom generators, mutations, and computations + +## Flag conventions + +* Flags are all lowercase, with words separated by hyphens +* Flag names and single-character aliases should have the same meaning across all commands +* Command-line flags corresponding to API fields should accept API enums exactly (e.g., --restart=Always) +* Do not reuse flags for different semantic purposes, and do not use different flag names for the same semantic purpose -- grep for `"Flags()"` before adding a new flag +* Use short flags sparingly, only for the most frequently used options, prefer lowercase over uppercase for the most common cases, try to stick to well known conventions for UNIX commands and/or Docker, where they exist, and update this list when adding new short flags + * `-f`: Resource file + * also used for `--follow` in `logs`, but should be deprecated in favor of `-F` + * `-l`: Label selector + * also used for `--labels` in `expose`, but should be deprecated + * `-L`: Label columns + * `-c`: Container + * also used for `--client` in `version`, but should be deprecated + * `-i`: Attach stdin + * `-t`: Allocate TTY + * also used for `--template`, but deprecated + * `-w`: Watch (currently also used for `--www` in `proxy`, but should be deprecated) + * `-p`: Previous + * also used for `--pod` in `exec`, but deprecated + * also used for `--patch` in `patch`, but should be deprecated + * also used for `--port` in `proxy`, but should be deprecated + * `-P`: Static file prefix in `proxy`, but should be deprecated + * `-r`: Replicas + * `-u`: Unix socket + * `-v`: Verbose logging level +* `--dry-run`: Don't modify the live state; simulate the mutation and display the output +* `--local`: Don't contact the server; just do local read, transformation, generation, etc. and display the output +* `--output-version=...`: Convert the output to a different API group/version +* `--validate`: Validate the resource schema + +## Output conventions + +* By default, output is intended for humans rather than programs + * However, affordances are made for simple parsing of `get` output +* Only errors should be directed to stderr +* `get` commands should output one row per resource, and one resource per row + * Column titles and values should not contain spaces in order to facilitate commands that break lines into fields: cut, awk, etc. + * By default, `get` output should fit within about 80 columns + * Eventually we could perhaps auto-detect width + * `-o wide` may be used to display additional columns + * The first column should be the resource name, titled `NAME` (may change this to an abbreviation of resource type) + * NAMESPACE should be displayed as the first column when --all-namespaces is specified + * The last default column should be time since creation, titled `AGE` + * `-Lkey` should append a column containing the value of label with key `key`, with `` if not present + * json, yaml, Go template, and jsonpath template formats should be supported and encouraged for subsequent processing + * Users should use --api-version or --output-version to ensure the output uses the version they expect +* `describe` commands may output on multiple lines and may include information from related resources, such as events. Describe should add additional information from related resources that a normal user may need to know - if a user would always run "describe resource1" and the immediately want to run a "get type2" or "describe resource2", consider including that info. Examples, persistent volume claims for pods that reference claims, events for most resources, nodes and the pods scheduled on them. When fetching related resources, a targeted field selector should be used in favor of client side filtering of related resources. +* Mutations should output TYPE/name verbed by default, where TYPE is singular; `-o name` may be used to just display TYPE/name, which may be used to specify resources in other commands + +## Documentation conventions + +* Commands are documented using Cobra; docs are then auto-generated by `hack/update-generated-docs.sh`. + * Use should contain a short usage string for the most common use case(s), not an exhaustive specification + * Short should contain a one-line explanation of what the command does + * Long may contain multiple lines, including additional information about input, output, commonly used flags, etc. + * Example should contain examples + * Start commands with `$` + * A comment should precede each example command, and should begin with `#` +* Use "FILENAME" for filenames +* Use "TYPE" for the particular flavor of resource type accepted by kubectl, rather than "RESOURCE" or "KIND" +* Use "NAME" for resource names + + diff --git a/_includes/docs/docs/devel/logging.md b/_includes/docs/docs/devel/logging.md new file mode 100644 index 0000000000..8f87d80621 --- /dev/null +++ b/_includes/docs/docs/devel/logging.md @@ -0,0 +1,27 @@ + +The following conventions for the glog levels to use. [glog](http://godoc.org/github.com/golang/glog) is globally preferred to [log](http://golang.org/pkg/log/) for better runtime control. + +* glog.Errorf() - Always an error +* glog.Warningf() - Something unexpected, but probably not an error +* glog.Infof() has multiple levels: + * glog.V(0) - Generally useful for this to ALWAYS be visible to an operator + * Programmer errors + * Logging extra info about a panic + * CLI argument handling + * glog.V(1) - A reasonable default log level if you don't want verbosity. + * Information about config (listening on X, watching Y) + * Errors that repeat frequently that relate to conditions that can be corrected (pod detected as unhealthy) + * glog.V(2) - Useful steady state information about the service and important log messages that may correlate to significant changes in the system. This is the recommended default log level for most systems. + * Logging HTTP requests and their exit code + * System state changing (killing pod) + * Controller state change events (starting pods) + * Scheduler log messages + * glog.V(3) - Extended information about changes + * More info about system state changes + * glog.V(4) - Debug level verbosity (for now) + * Logging in particularly thorny parts of code where you may want to come back later and check it + +As per the comments, the practical default level is V(2). Developers and QE environments may wish to run at V(3) or V(4). If you wish to change the log level, you can pass in `-v=X` where X is the desired maximum level to log. + + + diff --git a/_includes/docs/docs/devel/making-release-notes.md b/_includes/docs/docs/devel/making-release-notes.md new file mode 100644 index 0000000000..9248509de9 --- /dev/null +++ b/_includes/docs/docs/devel/making-release-notes.md @@ -0,0 +1,41 @@ + +This documents the process for making release notes for a release. + +### 1) Note the PR number of the previous release + +Find the most-recent PR that was merged with the previous .0 release. Remember this as $LASTPR. +_TODO_: Figure out a way to record this somewhere to save the next release engineer time. + +Find the most-recent PR that was merged with the current .0 release. Remember this as $CURRENTPR. + +### 2) Run the release-notes tool + +```shell +${KUBERNETES_ROOT}/build/make-release-notes.sh $LASTPR $CURRENTPR +``` + +### 3) Trim the release notes + +This generates a list of the entire set of PRs merged since the last minor +release. It is likely long and many PRs aren't worth mentioning. If any of the +PRs were cherrypicked into patches on the last minor release, you should exclude +them from the current release's notes. + +Open up `candidate-notes.md` in your favorite editor. + +Remove, regroup, organize to your hearts content. + + +### 4) Update CHANGELOG.md + +With the final markdown all set, cut and paste it to the top of `CHANGELOG.md` + +### 5) Update the Release page + + * Switch to the [releases](https://github.com/kubernetes/kubernetes/releases) page. + * Open up the release you are working on. + * Cut and paste the final markdown from above into the release notes + * Press Save. + + + diff --git a/_includes/docs/docs/devel/profiling.md b/_includes/docs/docs/devel/profiling.md new file mode 100644 index 0000000000..57d95cd66b --- /dev/null +++ b/_includes/docs/docs/devel/profiling.md @@ -0,0 +1,43 @@ + +This document explain how to plug in profiler and how to profile Kubernetes services. + +## Profiling library + +Go comes with inbuilt 'net/http/pprof' profiling library and profiling web service. The way service works is binding debug/pprof/ subtree on a running webserver to the profiler. Reading from subpages of debug/pprof returns pprof-formatted profiles of the running binary. The output can be processed offline by the tool of choice, or used as an input to handy 'go tool pprof', which can graphically represent the result. + +## Adding profiling to services to APIserver. + +TL;DR: Add lines: + +```go +m.mux.HandleFunc("/debug/pprof/", pprof.Index) +m.mux.HandleFunc("/debug/pprof/profile", pprof.Profile) +m.mux.HandleFunc("/debug/pprof/symbol", pprof.Symbol) +``` + +to the init(c *Config) method in 'pkg/master/master.go' and import 'net/http/pprof' package. + +In most use cases to use profiler service it's enough to do 'import _ net/http/pprof', which automatically registers a handler in the default http.Server. Slight inconvenience is that APIserver uses default server for intra-cluster communication, so plugging profiler to it is not really useful. In 'pkg/master/server/server.go' more servers are created and started as separate goroutines. The one that is usually serving external traffic is secureServer. The handler for this traffic is defined in 'pkg/master/master.go' and stored in Handler variable. It is created from HTTP multiplexer, so the only thing that needs to be done is adding profiler handler functions to this multiplexer. This is exactly what lines after TL;DR do. + +## Connecting to the profiler + +Even when running profiler I found not really straightforward to use 'go tool pprof' with it. The problem is that at least for dev purposes certificates generated for APIserver are not signed by anyone trusted and because secureServer serves only secure traffic it isn't straightforward to connect to the service. The best workaround I found is by creating an ssh tunnel from the kubernetes_master open unsecured port to some external server, and use this server as a proxy. To save everyone looking for correct ssh flags, it is done by running: + +```shell +ssh kubernetes_master -L:localhost:8080 +``` + +or analogous one for you Cloud provider. Afterwards you can e.g. run + +```shell +go tool pprof http://localhost:/debug/pprof/profile +``` + +to get 30 sec. CPU profile. + +## Contention profiling + +To enable contention profiling you need to add line `rt.SetBlockProfileRate(1)` in addition to `m.mux.HandleFunc(...)` added before (`rt` stands for `runtime` in `master.go`). This enables 'debug/pprof/block' subpage, which can be used as an input to `go tool pprof`. + + + diff --git a/_includes/docs/docs/devel/pull-requests.md b/_includes/docs/docs/devel/pull-requests.md new file mode 100644 index 0000000000..39c45c990d --- /dev/null +++ b/_includes/docs/docs/devel/pull-requests.md @@ -0,0 +1,34 @@ + +An overview of how we will manage old or out-of-date pull requests. + +## Process + +We will close any pull requests older than two weeks. + +Exceptions can be made for PRs that have active review comments, or that are awaiting other dependent PRs. Closed pull requests are easy to recreate, and little work is lost by closing a pull request that subsequently needs to be reopened. + +We want to limit the total number of PRs in flight to: +* Maintain a clean project +* Remove old PRs that would be difficult to rebase as the underlying code has changed over time +* Encourage code velocity + +## Life of a Pull Request + +Unless in the last few weeks of a milestone when we need to reduce churn and stabilize, we aim to be always accepting pull requests. + +Either the [on call](https://github.com/kubernetes/kubernetes/wiki/Kubernetes-on-call-rotation) manually or the [submit queue](https://github.com/kubernetes/contrib/tree/master/submit-queue) automatically will manage merging PRs. + +There are several requirements for the submit queue to work: +* Author must have signed CLA ("cla: yes" label added to PR) +* No changes can be made since last lgtm label was applied +* k8s-bot must have reported the GCE E2E build and test steps passed (Travis, Shippable and Jenkins build) + +Additionally, for infrequent or new contributors, we require the on call to apply the "ok-to-merge" label manually. This is gated by the [whitelist](https://github.com/kubernetes/contrib/tree/master/submit-queue/whitelist.txt). + +## Automation + +We use a variety of automation to manage pull requests. This automation is described in detail +[elsewhere.](/{{page.version}}/docs/devel/automation) + + + diff --git a/_includes/docs/docs/devel/releasing.md b/_includes/docs/docs/devel/releasing.md new file mode 100644 index 0000000000..9d933cc00a --- /dev/null +++ b/_includes/docs/docs/devel/releasing.md @@ -0,0 +1,307 @@ + +This document explains how to cut a release, and the theory behind it. If you +just want to cut a release and move on with your life, you can stop reading +after the first section. + +## How to cut a Kubernetes release + +Regardless of whether you are cutting a major or minor version, cutting a +release breaks down into four pieces: + +1. Selecting release components. +1. Tagging and merging the release in Git. +1. Building and pushing the binaries. +1. Writing release notes. + +You should progress in this strict order. + +### Building a New Major/Minor Version (`vX.Y.0`) + +#### Selecting Release Components + +When cutting a major/minor release, your first job is to find the branch +point. We cut `vX.Y.0` releases directly from `master`, which is also the +branch that we have most continuous validation on. Go first to [the main GCE +Jenkins end-to-end job](http://go/k8s-test/job/kubernetes-e2e-gce) and next to [the +Critical Builds page](http://go/k8s-test/view/Critical%20Builds) and hopefully find a +recent Git hash that looks stable across at least `kubernetes-e2e-gce` and +`kubernetes-e2e-gke-ci`. First glance through builds and look for nice solid +rows of green builds, and then check temporally with the other Critical Builds +to make sure they're solid around then as well. Once you find some greens, you +can find the Git hash for a build by looking at the "Console Log", then look for +`githash=`. You should see a line line: + +```shell ++ githash=v0.20.2-322-g974377b +``` + +Because Jenkins builds frequently, if you're looking between jobs +(e.g. `kubernetes-e2e-gke-ci` and `kubernetes-e2e-gce`), there may be no single +`githash` that's been run on both jobs. In that case, take the a green +`kubernetes-e2e-gce` build (but please check that it corresponds to a temporally +similar build that's green on `kubernetes-e2e-gke-ci`). Lastly, if you're having +trouble understanding why the GKE continuous integration clusters are failing +and you're trying to cut a release, don't hesitate to contact the GKE +oncall. + +Before proceeding to the next step: + +```shell +export BRANCHPOINT=v0.20.2-322-g974377b +``` + +Where `v0.20.2-322-g974377b` is the git hash you decided on. This will become +our (retroactive) branch point. + +#### Branching, Tagging and Merging + +Do the following: + +1. `export VER=x.y` (e.g. `0.20` for v0.20) +1. cd to the base of the repo +1. `git fetch upstream && git checkout -b release-${VER} ${BRANCHPOINT}` (you did set `${BRANCHPOINT}`, right?) +1. Make sure you don't have any files you care about littering your repo (they + better be checked in or outside the repo, or the next step will delete them). +1. `make clean && git reset --hard HEAD && git clean -xdf` +1. `make` (TBD: you really shouldn't have to do this, but the swagger output step requires it right now) +1. `./build/mark-new-version.sh v${VER}.0` to mark the new release and get further + instructions. This creates a series of commits on the branch you're working + on (`release-${VER}`), including forking our documentation for the release, + the release version commit (which is then tagged), and the post-release + version commit. +1. Follow the instructions given to you by that script. They are canon for the + remainder of the Git process. If you don't understand something in that + process, please ask! + +**TODO**: how to fix tags, etc., if you have to shift the release branchpoint. + +#### Building and Pushing Binaries + +In your git repo (you still have `${VER}` set from above right?): + +1. `git checkout upstream/master && build/build-official-release.sh v${VER}.0` (the `build-official-release.sh` script is version agnostic, so it's best to run it off `master` directly). +1. Follow the instructions given to you by that script. +1. At this point, you've done all the Git bits, you've got all the binary bits pushed, and you've got the template for the release started on GitHub. + +#### Writing Release Notes + +[This helpful guide](/{{page.version}}/docs/devel/making-release-notes) describes how to write release +notes for a major/minor release. In the release template on GitHub, leave the +last PR number that the tool finds for the `.0` release, so the next releaser +doesn't have to hunt. + +### Building a New Patch Release (`vX.Y.Z` for `Z > 0`) + +#### Selecting Release Components + +We cut `vX.Y.Z` releases from the `release-vX.Y` branch after all cherry picks +to the branch have been resolved. You should ensure all outstanding cherry picks +have been reviewed and merged and the branch validated on Jenkins (validation +TBD). See the [Cherry Picks](/{{page.version}}/docs/devel/cherry-picks) for more information on how to +manage cherry picks prior to cutting the release. + +#### Tagging and Merging + +1. `export VER=x.y` (e.g. `0.20` for v0.20) +1. `export PATCH=Z` where `Z` is the patch level of `vX.Y.Z` +1. cd to the base of the repo +1. `git fetch upstream && git checkout -b upstream/release-${VER} release-${VER}` +1. Make sure you don't have any files you care about littering your repo (they + better be checked in or outside the repo, or the next step will delete them). +1. `make clean && git reset --hard HEAD && git clean -xdf` +1. `make` (TBD: you really shouldn't have to do this, but the swagger output step requires it right now) +1. `./build/mark-new-version.sh v${VER}.${PATCH}` to mark the new release and get further + instructions. This creates a series of commits on the branch you're working + on (`release-${VER}`), including forking our documentation for the release, + the release version commit (which is then tagged), and the post-release + version commit. +1. Follow the instructions given to you by that script. They are canon for the + remainder of the Git process. If you don't understand something in that + process, please ask! When proposing PRs, you can pre-fill the body with + `hack/cherry_pick_list.sh upstream/release-${VER}` to inform people of what + is already on the branch. + +**TODO**: how to fix tags, etc., if the release is changed. + +#### Building and Pushing Binaries + +In your git repo (you still have `${VER}` and `${PATCH}` set from above right?): + +1. `git checkout upstream/master && build/build-official-release.sh + v${VER}.${PATCH}` (the `build-official-release.sh` script is version + agnostic, so it's best to run it off `master` directly). +1. Follow the instructions given to you by that script. At this point, you've + done all the Git bits, you've got all the binary bits pushed, and you've got + the template for the release started on GitHub. + +#### Writing Release Notes + +Run `hack/cherry_pick_list.sh ${VER}.${PATCH}~1` to get the release notes for +the patch release you just created. Feel free to prune anything internal, like +you would for a major release, but typically for patch releases we tend to +include everything in the release notes. + +## Origin of the Sources + +Kubernetes may be built from either a git tree (using `hack/build-go.sh`) or +from a tarball (using either `hack/build-go.sh` or `go install`) or directly by +the Go native build system (using `go get`). + +When building from git, we want to be able to insert specific information about +the build tree at build time. In particular, we want to use the output of `git +describe` to generate the version of Kubernetes and the status of the build +tree (add a `-dirty` prefix if the tree was modified.) + +When building from a tarball or using the Go build system, we will not have +access to the information about the git tree, but we still want to be able to +tell whether this build corresponds to an exact release (e.g. v0.3) or is +between releases (e.g. at some point in development between v0.3 and v0.4). + +## Version Number Format + +In order to account for these use cases, there are some specific formats that +may end up representing the Kubernetes version. Here are a few examples: + +- **v0.5**: This is official version 0.5 and this version will only be used + when building from a clean git tree at the v0.5 git tag, or from a tree + extracted from the tarball corresponding to that specific release. +- **v0.5-15-g0123abcd4567**: This is the `git describe` output and it indicates + that we are 15 commits past the v0.5 release and that the SHA1 of the commit + where the binaries were built was `0123abcd4567`. It is only possible to have + this level of detail in the version information when building from git, not + when building from a tarball. +- **v0.5-15-g0123abcd4567-dirty** or **v0.5-dirty**: The extra `-dirty` prefix + means that the tree had local modifications or untracked files at the time of + the build, so there's no guarantee that the source code matches exactly the + state of the tree at the `0123abcd4567` commit or at the `v0.5` git tag + (resp.) +- **v0.5-dev**: This means we are building from a tarball or using `go get` or, + if we have a git tree, we are using `go install` directly, so it is not + possible to inject the git version into the build information. Additionally, + this is not an official release, so the `-dev` prefix indicates that the + version we are building is after `v0.5` but before `v0.6`. (There is actually + an exception where a commit with `v0.5-dev` is not present on `v0.6`, see + later for details.) + +## Injecting Version into Binaries + +In order to cover the different build cases, we start by providing information +that can be used when using only Go build tools or when we do not have the git +version information available. + +To be able to provide a meaningful version in those cases, we set the contents +of variables in a Go source file that will be used when no overrides are +present. + +We are using `pkg/version/base.go` as the source of versioning in absence of +information from git. Here is a sample of that file's contents: + +```go +var ( + gitVersion string = "v0.4-dev" // version from git, output of $(git describe) + gitCommit string = "" // sha1 from git, output of $(git rev-parse HEAD) +) +``` + +This means a build with `go install` or `go get` or a build from a tarball will +yield binaries that will identify themselves as `v0.4-dev` and will not be able +to provide you with a SHA1. + +To add the extra versioning information when building from git, the +`hack/build-go.sh` script will gather that information (using `git describe` and +`git rev-parse`) and then create a `-ldflags` string to pass to `go install` and +tell the Go linker to override the contents of those variables at build time. It +can, for instance, tell it to override `gitVersion` and set it to +`v0.4-13-g4567bcdef6789-dirty` and set `gitCommit` to `4567bcdef6789...` which +is the complete SHA1 of the (dirty) tree used at build time. + +## Handling Official Versions + +Handling official versions from git is easy, as long as there is an annotated +git tag pointing to a specific version then `git describe` will return that tag +exactly which will match the idea of an official version (e.g. `v0.5`). + +Handling it on tarballs is a bit harder since the exact version string must be +present in `pkg/version/base.go` for it to get embedded into the binaries. But +simply creating a commit with `v0.5` on its own would mean that the commits +coming after it would also get the `v0.5` version when built from tarball or `go +get` while in fact they do not match `v0.5` (the one that was tagged) exactly. + +To handle that case, creating a new release should involve creating two adjacent +commits where the first of them will set the version to `v0.5` and the second +will set it to `v0.5-dev`. In that case, even in the presence of merges, there +will be a single commit where the exact `v0.5` version will be used and all +others around it will either have `v0.4-dev` or `v0.5-dev`. + +The diagram below illustrates it. + +![Diagram of git commits involved in the release](/images/docs/releasing.svg) + +After working on `v0.4-dev` and merging PR 99 we decide it is time to release +`v0.5`. So we start a new branch, create one commit to update +`pkg/version/base.go` to include `gitVersion = "v0.5"` and `git commit` it. + +We test it and make sure everything is working as expected. + +Before sending a PR for it, we create a second commit on that same branch, +updating `pkg/version/base.go` to include `gitVersion = "v0.5-dev"`. That will +ensure that further builds (from tarball or `go install`) on that tree will +always include the `-dev` prefix and will not have a `v0.5` version (since they +do not match the official `v0.5` exactly.) + +We then send PR 100 with both commits in it. + +Once the PR is accepted, we can use `git tag -a` to create an annotated tag +*pointing to the one commit* that has `v0.5` in `pkg/version/base.go` and push +it to GitHub. (Unfortunately GitHub tags/releases are not annotated tags, so +this needs to be done from a git client and pushed to GitHub using SSH or +HTTPS.) + +## Parallel Commits + +While we are working on releasing `v0.5`, other development takes place and +other PRs get merged. For instance, in the example above, PRs 101 and 102 get +merged to the master branch before the versioning PR gets merged. + +This is not a problem, it is only slightly inaccurate that checking out the tree +at commit `012abc` or commit `345cde` or at the commit of the merges of PR 101 +or 102 will yield a version of `v0.4-dev` *but* those commits are not present in +`v0.5`. + +In that sense, there is a small window in which commits will get a +`v0.4-dev` or `v0.4-N-gXXX` label and while they're indeed later than `v0.4` +but they are not really before `v0.5` in that `v0.5` does not contain those +commits. + +Unfortunately, there is not much we can do about it. On the other hand, other +projects seem to live with that and it does not really become a large problem. + +As an example, Docker commit a327d9b91edf has a `v1.1.1-N-gXXX` label but it is +not present in Docker `v1.2.0`: + +```shell +$ git describe a327d9b91edf +v1.1.1-822-ga327d9b91edf + +$ git log --oneline v1.2.0..a327d9b91edf +a327d9b91edf Fix data space reporting from Kb/Mb to KB/MB + +(Non-empty output here means the commit is not present on v1.2.0.) +``` + +## Release Notes + +No official release should be made final without properly matching release notes. + +There should be made available, per release, a small summary, preamble, of the +major changes, both in terms of feature improvements/bug fixes and notes about +functional feature changes (if any) regarding the previous released version so +that the BOM regarding updating to it gets as obvious and trouble free as possible. + +After this summary, preamble, all the relevant PRs/issues that got in that +version should be listed and linked together with a small summary understandable +by plain mortals (in a perfect world PR/issue's title would be enough but often +it is just too cryptic/geeky/domain-specific that it isn't). + + + diff --git a/_includes/docs/docs/devel/scheduler.md b/_includes/docs/docs/devel/scheduler.md new file mode 100755 index 0000000000..589d463957 --- /dev/null +++ b/_includes/docs/docs/devel/scheduler.md @@ -0,0 +1,48 @@ + +The Kubernetes scheduler runs as a process alongside the other master +components such as the API server. Its interface to the API server is to watch +for Pods with an empty PodSpec.NodeName, and for each Pod, it posts a Binding +indicating where the Pod should be scheduled. + +## The scheduling process + +The scheduler tries to find a node for each Pod, one at a time, as it notices +these Pods via watch. There are three steps. First it applies a set of "predicates" that filter out +inappropriate nodes. For example, if the PodSpec specifies resource requests, then the scheduler +will filter out nodes that don't have at least that much resources available (computed +as the capacity of the node minus the sum of the resource requests of the containers that +are already running on the node). Second, it applies a set of "priority functions" +that rank the nodes that weren't filtered out by the predicate check. For example, +it tries to spread Pods across nodes while at the same time favoring the least-loaded +nodes (where "load" here is sum of the resource requests of the containers running on the node, +divided by the node's capacity). +Finally, the node with the highest priority is chosen +(or, if there are multiple such nodes, then one of them is chosen at random). The code +for this main scheduling loop is in the function `Schedule()` in +[plugin/pkg/scheduler/generic_scheduler.go](http://releases.k8s.io/{{page.githubbranch}}/plugin/pkg/scheduler/generic_scheduler.go) + +## Scheduler extensibility + +The scheduler is extensible: the cluster administrator can choose which of the pre-defined +scheduling policies to apply, and can add new ones. The built-in predicates and priorities are +defined in [plugin/pkg/scheduler/algorithm/predicates/predicates.go](http://releases.k8s.io/{{page.githubbranch}}/plugin/pkg/scheduler/algorithm/predicates/predicates.go) and +[plugin/pkg/scheduler/algorithm/priorities/priorities.go](http://releases.k8s.io/{{page.githubbranch}}/plugin/pkg/scheduler/algorithm/priorities/priorities.go), respectively. +The policies that are applied when scheduling can be chosen in one of two ways. Normally, +the policies used are selected by the functions `defaultPredicates()` and `defaultPriorities()` in +[plugin/pkg/scheduler/algorithmprovider/defaults/defaults.go](http://releases.k8s.io/{{page.githubbranch}}/plugin/pkg/scheduler/algorithmprovider/defaults/defaults.go). +However, the choice of policies +can be overridden by passing the command-line flag `--policy-config-file` to the scheduler, pointing to a JSON +file specifying which scheduling policies to use. See +[examples/scheduler-policy-config.json](https://github.com/kubernetes/kubernetes/tree/{{page.githubbranch}}/examples/scheduler-policy-config.json) for an example +config file. (Note that the config file format is versioned; the API is defined in +[plugin/pkg/scheduler/api](http://releases.k8s.io/{{page.githubbranch}}/plugin/pkg/scheduler/api/)). +Thus to add a new scheduling policy, you should modify predicates.go or priorities.go, +and either register the policy in `defaultPredicates()` or `defaultPriorities()`, or use a policy config file. + +## Exploring the code + +If you want to get a global picture of how the scheduler works, you can start in +[plugin/cmd/kube-scheduler/app/server.go](http://releases.k8s.io/{{page.githubbranch}}/plugin/cmd/kube-scheduler/app/server.go) + + + diff --git a/_includes/docs/docs/devel/scheduler_algorithm.md b/_includes/docs/docs/devel/scheduler_algorithm.md new file mode 100755 index 0000000000..1516114a10 --- /dev/null +++ b/_includes/docs/docs/devel/scheduler_algorithm.md @@ -0,0 +1,36 @@ + +For each unscheduled Pod, the Kubernetes scheduler tries to find a node across the cluster according to a set of rules. A general introduction to the Kubernetes scheduler can be found at [scheduler.md](/{{page.version}}/docs/devel/scheduler). In this document, the algorithm of how to select a node for the Pod is explained. There are two steps before a destination node of a Pod is chosen. The first step is filtering all the nodes and the second is ranking the remaining nodes to find a best fit for the Pod. + +## Filtering the nodes + +The purpose of filtering the nodes is to filter out the nodes that do not meet certain requirements of the Pod. For example, if the free resource on a node (measured by the capacity minus the sum of the resource requests of all the Pods that already run on the node) is less than the Pod's required resource, the node should not be considered in the ranking phase so it is filtered out. Currently, there are several "predicates" implementing different filtering policies, including: + +- `NoDiskConflict`: Evaluate if a pod can fit due to the volumes it requests, and those that are already mounted. +- `PodFitsResources`: Check if the free resource (CPU and Memory) meets the requirement of the Pod. The free resource is measured by the capacity minus the sum of requests of all Pods on the node. To learn more about the resource QoS in Kubernetes, please check [QoS proposal](https://github.com/kubernetes/kubernetes/blob/{{page.githubbranch}}/docs/proposals/resource-qos.md). +- `PodFitsHostPorts`: Check if any HostPort required by the Pod is already occupied on the node. +- `PodFitsHost`: Filter out all nodes except the one specified in the PodSpec's NodeName field. +- `PodSelectorMatches`: Check if the labels of the node match the labels specified in the Pod's `nodeSelector` field ([Here](/{{page.version}}/docs/user-guide/node-selection/) is an example of how to use `nodeSelector` field). +- `CheckNodeLabelPresence`: Check if all the specified labels exist on a node or not, regardless of the value. + +The details of the above predicates can be found in [plugin/pkg/scheduler/algorithm/predicates/predicates.go](http://releases.k8s.io/{{page.githubbranch}}/plugin/pkg/scheduler/algorithm/predicates/predicates.go). All predicates mentioned above can be used in combination to perform a sophisticated filtering policy. Kubernetes uses some, but not all, of these predicates by default. You can see which ones are used by default in [plugin/pkg/scheduler/algorithmprovider/defaults/defaults.go](http://releases.k8s.io/{{page.githubbranch}}/plugin/pkg/scheduler/algorithmprovider/defaults/defaults.go). + +## Ranking the nodes + +The filtered nodes are considered suitable to host the Pod, and it is often that there are more than one nodes remaining. Kubernetes prioritizes the remaining nodes to find the "best" one for the Pod. The prioritization is performed by a set of priority functions. For each remaining node, a priority function gives a score which scales from 0-10 with 10 representing for "most preferred" and 0 for "least preferred". Each priority function is weighted by a positive number and the final score of each node is calculated by adding up all the weighted scores. For example, suppose there are two priority functions, `priorityFunc1` and `priorityFunc2` with weighting factors `weight1` and `weight2` respectively, the final score of some NodeA is: + + finalScoreNodeA = (weight1 * priorityFunc1) + (weight2 * priorityFunc2) + +After the scores of all nodes are calculated, the node with highest score is chosen as the host of the Pod. If there are more than one nodes with equal highest scores, a random one among them is chosen. + +Currently, Kubernetes scheduler provides some practical priority functions, including: + +- `LeastRequestedPriority`: The node is prioritized based on the fraction of the node that would be free if the new Pod were scheduled onto the node. (In other words, (capacity - sum of requests of all Pods already on the node - request of Pod that is being scheduled) / capacity). CPU and memory are equally weighted. The node with the highest free fraction is the most preferred. Note that this priority function has the effect of spreading Pods across the nodes with respect to resource consumption. +- `CalculateNodeLabelPriority`: Prefer nodes that have the specified label. +- `BalancedResourceAllocation`: This priority function tries to put the Pod on a node such that the CPU and Memory utilization rate is balanced after the Pod is deployed. +- `CalculateSpreadPriority`: Spread Pods by minimizing the number of Pods belonging to the same service on the same node. +- `CalculateAntiAffinityPriority`: Spread Pods by minimizing the number of Pods belonging to the same service on nodes with the same value for a particular label. + +The details of the above priority functions can be found in [plugin/pkg/scheduler/algorithm/priorities](http://releases.k8s.io/{{page.githubbranch}}/plugin/pkg/scheduler/algorithm/priorities/). Kubernetes uses some, but not all, of these priority functions by default. You can see which ones are used by default in [plugin/pkg/scheduler/algorithmprovider/defaults/defaults.go](http://releases.k8s.io/{{page.githubbranch}}/plugin/pkg/scheduler/algorithmprovider/defaults/defaults.go). Similar as predicates, you can combine the above priority functions and assign weight factors (positive number) to them as you want (check [scheduler.md](/{{page.version}}/docs/devel/scheduler) for how to customize). + + + diff --git a/_includes/docs/docs/devel/writing-a-getting-started-guide.md b/_includes/docs/docs/devel/writing-a-getting-started-guide.md new file mode 100644 index 0000000000..47e475b5e7 --- /dev/null +++ b/_includes/docs/docs/devel/writing-a-getting-started-guide.md @@ -0,0 +1,97 @@ + +This page gives some advice for anyone planning to write or update a Getting Started Guide for Kubernetes. +It also gives some guidelines which reviewers should follow when reviewing a pull request for a +guide. + +A Getting Started Guide is instructions on how to create a Kubernetes cluster on top of a particular +type(s) of infrastructure. Infrastructure includes: the IaaS provider for VMs; +the node OS; inter-node networking; and node Configuration Management system. +A guide refers to scripts, Configuration Management files, and/or binary assets such as RPMs. We call +the combination of all these things needed to run on a particular type of infrastructure a +**distro**. + +[The Matrix](/{{page.version}}/docs/getting-started-guides/) lists the distros. If there is already a guide +which is similar to the one you have planned, consider improving that one. + + +Distros fall into two categories: + + - **versioned distros** are tested to work with a particular binary release of Kubernetes. These + come in a wide variety, reflecting a wide range of ideas and preferences in how to run a cluster. + - **development distros** are tested work with the latest Kubernetes source code. But, there are + relatively few of these and the bar is much higher for creating one. They must support + fully automated cluster creation, deletion, and upgrade. + +There are different guidelines for each. + +## Versioned Distro Guidelines + +These guidelines say *what* to do. See the Rationale section for *why*. + + - Send us a PR. + - Put the instructions in `docs/getting-started-guides/...`. Scripts go there too. This helps devs easily + search for uses of flags by guides. + - We may ask that you host binary assets or large amounts of code in our `contrib` directory or on your + own repo. + - Add or update a row in [The Matrix](/{{page.version}}/docs/getting-started-guides/). + - State the binary version of Kubernetes that you tested clearly in your Guide doc. + - Setup a cluster and run the [conformance test](/{{page.version}}/docs/devel/development/#conformance-testing) against it, and report the + results in your PR. + - Versioned distros should typically not modify or add code in `cluster/`. That is just scripts for developer + distros. + - When a new major or minor release of Kubernetes comes out, we may also release a new + conformance test, and require a new conformance test run to earn a conformance checkmark. + +If you have a cluster partially working, but doing all the above steps seems like too much work, +we still want to hear from you. We suggest you write a blog post or a Gist, and we will link to it on our wiki page. +Just file an issue or chat us on [Slack](/{{page.version}}/docs/troubleshooting/#slack) and one of the committers will link to it from the wiki. + +## Development Distro Guidelines + +These guidelines say *what* to do. See the Rationale section for *why*. + + - the main reason to add a new development distro is to support a new IaaS provider (VM and + network management). This means implementing a new `pkg/cloudprovider/providers/$IAAS_NAME`. + - Development distros should use Saltstack for Configuration Management. + - development distros need to support automated cluster creation, deletion, upgrading, etc. + This mean writing scripts in `cluster/$IAAS_NAME`. + - all commits to the tip of this repo need to not break any of the development distros + - the author of the change is responsible for making changes necessary on all the cloud-providers if the + change affects any of them, and reverting the change if it breaks any of the CIs. + - a development distro needs to have an organization which owns it. This organization needs to: + - Setting up and maintaining Continuous Integration that runs e2e frequently (multiple times per day) against the + Distro at head, and which notifies all devs of breakage. + - being reasonably available for questions and assisting with + refactoring and feature additions that affect code for their IaaS. + +## Rationale + + - We want people to create Kubernetes clusters with whatever IaaS, Node OS, + configuration management tools, and so on, which they are familiar with. The + guidelines for **versioned distros** are designed for flexibility. + - We want developers to be able to work without understanding all the permutations of + IaaS, NodeOS, and configuration management. The guidelines for **developer distros** are designed + for consistency. + - We want users to have a uniform experience with Kubernetes whenever they follow instructions anywhere + in our Github repository. So, we ask that versioned distros pass a **conformance test** to make sure + really work. + - We want to **limit the number of development distros** for several reasons. Developers should + only have to change a limited number of places to add a new feature. Also, since we will + gate commits on passing CI for all distros, and since end-to-end tests are typically somewhat + flaky, it would be highly likely for there to be false positives and CI backlogs with many CI pipelines. + - We do not require versioned distros to do **CI** for several reasons. It is a steep + learning curve to understand our automated testing scripts. And it is considerable effort + to fully automate setup and teardown of a cluster, which is needed for CI. And, not everyone + has the time and money to run CI. We do not want to + discourage people from writing and sharing guides because of this. + - Versioned distro authors are free to run their own CI and let us know if there is breakage, but we + will not include them as commit hooks -- there cannot be so many commit checks that it is impossible + to pass them all. + - We prefer a single Configuration Management tool for development distros. If there were more + than one, the core developers would have to learn multiple tools and update config in multiple + places. **Saltstack** happens to be the one we picked when we started the project. We + welcome versioned distros that use any tool; there are already examples of + CoreOS Fleet, Ansible, and others. + - You can still run code from head or your own branch + if you use another Configuration Management tool -- you just have to do some manual steps + during testing and deployment. \ No newline at end of file diff --git a/_includes/docs/docs/getting-started-guides/aws.md b/_includes/docs/docs/getting-started-guides/aws.md new file mode 100644 index 0000000000..bf760ba891 --- /dev/null +++ b/_includes/docs/docs/getting-started-guides/aws.md @@ -0,0 +1,101 @@ + +* TOC +{:toc} + +## Prerequisites + +1. You need an AWS account. Visit [http://aws.amazon.com](http://aws.amazon.com) to get started +2. Install and configure [AWS Command Line Interface](http://aws.amazon.com/cli) +3. You need an AWS [instance profile and role](http://docs.aws.amazon.com/IAM/latest/UserGuide/instance-profiles) with EC2 full access. + +NOTE: This script use the 'default' AWS profile by default. +You may explicitly set AWS profile to use using the `AWS_DEFAULT_PROFILE` environment variable: + +```shell +export AWS_DEFAULT_PROFILE=myawsprofile +``` + +## Cluster turnup + +### Supported procedure: `get-kube` + +```shell +#Using wget +export KUBERNETES_PROVIDER=aws; wget -q -O - https://get.k8s.io | bash +#Using cURL +export KUBERNETES_PROVIDER=aws; curl -sS https://get.k8s.io | bash +``` + +NOTE: This script calls [cluster/kube-up.sh](http://releases.k8s.io/{{page.githubbranch}}/cluster/kube-up.sh) +which in turn calls [cluster/aws/util.sh](http://releases.k8s.io/{{page.githubbranch}}/cluster/aws/util.sh) +using [cluster/aws/config-default.sh](http://releases.k8s.io/{{page.githubbranch}}/cluster/aws/config-default.sh). + +This process takes about 5 to 10 minutes. Once the cluster is up, the IP addresses of your master and node(s) will be printed, +as well as information about the default services running in the cluster (monitoring, logging, dns). User credentials and security +tokens are written in `~/.kube/config`, they will be necessary to use the CLI or the HTTP Basic Auth. + +By default, the script will provision a new VPC and a 4 node k8s cluster in us-west-2a (Oregon) with `t2.micro` instances running on Ubuntu. +You can override the variables defined in [config-default.sh](http://releases.k8s.io/{{page.githubbranch}}/cluster/aws/config-default.sh) to change this behavior as follows: + +```shell +export KUBE_AWS_ZONE=eu-west-1c +export NUM_MINIONS=2 +export MINION_SIZE=m3.medium +export AWS_S3_REGION=eu-west-1 +export AWS_S3_BUCKET=mycompany-kubernetes-artifacts +export INSTANCE_PREFIX=k8s +... +``` + +It will also try to create or reuse a keypair called "kubernetes", and IAM profiles called "kubernetes-master" and "kubernetes-minion". +If these already exist, make sure you want them to be used here. + +NOTE: If using an existing keypair named "kubernetes" then you must set the `AWS_SSH_KEY` key to point to your private key. + +### Alternatives + +A contributed [example](/{{page.version}}/docs/getting-started-guides/coreos/coreos_multinode_cluster) allows you to setup a Kubernetes cluster based on [CoreOS](http://www.coreos.com), using +EC2 with user data (cloud-config). + +## Getting started with your cluster + +### Command line administration tool: `kubectl` + +The cluster startup script will leave you with a `kubernetes` directory on your workstation. +Alternately, you can download the latest Kubernetes release from [this page](https://github.com/kubernetes/kubernetes/releases). + +Next, add the appropriate binary folder to your `PATH` to access kubectl: + +```shell +# OS X +export PATH=/platforms/darwin/amd64:$PATH +# Linux +export PATH=/platforms/linux/amd64:$PATH +``` + +An up-to-date documentation page for this tool is available here: [kubectl manual](/{{page.version}}/docs/user-guide/kubectl/kubectl) + +By default, `kubectl` will use the `kubeconfig` file generated during the cluster startup for authenticating against the API. +For more information, please read [kubeconfig files](/{{page.version}}/docs/user-guide/kubeconfig-file) + +### Examples + +See [a simple nginx example](/{{page.version}}/docs/user-guide/simple-nginx) to try out your new cluster. + +The "Guestbook" application is another popular example to get started with Kubernetes: [guestbook example](https://github.com/kubernetes/kubernetes/tree/{{page.githubbranch}}/examples/guestbook/) + +For more complete applications, please look in the [examples directory](https://github.com/kubernetes/kubernetes/tree/{{page.githubbranch}}/examples/) + +## Tearing down the cluster + +Make sure the environment variables you used to provision your cluster are still exported, then call the following script inside the +`kubernetes` directory: + +```shell +cluster/kube-down.sh +``` + +## Further reading + +Please see the [Kubernetes docs](/{{page.version}}/docs/) for more details on administering +and using a Kubernetes cluster. \ No newline at end of file diff --git a/_includes/docs/docs/getting-started-guides/azure.md b/_includes/docs/docs/getting-started-guides/azure.md new file mode 100644 index 0000000000..00d137402a --- /dev/null +++ b/_includes/docs/docs/getting-started-guides/azure.md @@ -0,0 +1,8 @@ + + +## Getting started on Microsoft Azure + +Checkout the [coreos azure getting started guide](/{{page.version}}/docs/getting-started-guides/coreos/azure/) + + + diff --git a/_includes/docs/docs/getting-started-guides/binary_release.md b/_includes/docs/docs/getting-started-guides/binary_release.md new file mode 100644 index 0000000000..9f75790189 --- /dev/null +++ b/_includes/docs/docs/getting-started-guides/binary_release.md @@ -0,0 +1,25 @@ + +You can either build a release from sources or download a pre-built release. If you do not plan on developing Kubernetes itself, we suggest a pre-built release. + +### Prebuilt Binary Release + +The list of binary releases is available for download from the [GitHub Kubernetes repo release page](https://github.com/kubernetes/kubernetes/releases). + +Download the latest release and unpack this tar file on Linux or OS X, cd to the created `kubernetes/` directory, and then follow the getting started guide for your cloud. + +### Building from source + +Get the Kubernetes source. If you are simply building a release from source there is no need to set up a full golang environment as all building happens in a Docker container. + +Building a release is simple. + +```shell +git clone https://github.com/kubernetes/kubernetes.git +cd kubernetes +make release +``` + +For more details on the release process see the [`build/` directory](http://releases.k8s.io/{{page.githubbranch}}/build/) + + + diff --git a/_includes/docs/docs/getting-started-guides/centos/centos_manual_config.md b/_includes/docs/docs/getting-started-guides/centos/centos_manual_config.md new file mode 100644 index 0000000000..211efdfb74 --- /dev/null +++ b/_includes/docs/docs/getting-started-guides/centos/centos_manual_config.md @@ -0,0 +1,169 @@ + +* TOC +{:toc} + +## Prerequisites + +You need two machines with CentOS installed on them. + +## Starting a cluster + +This is a getting started guide for CentOS. It is a manual configuration so you understand all the underlying packages / services / ports, etc... + +This guide will only get ONE node working. Multiple nodes requires a functional [networking configuration](/{{page.version}}/docs/admin/networking) done outside of kubernetes. Although the additional Kubernetes configuration requirements should be obvious. + +The Kubernetes package provides a few services: kube-apiserver, kube-scheduler, kube-controller-manager, kubelet, kube-proxy. These services are managed by systemd and the configuration resides in a central location: /etc/kubernetes. We will break the services up between the hosts. The first host, centos-master, will be the Kubernetes master. This host will run the kube-apiserver, kube-controller-manager, and kube-scheduler. In addition, the master will also run _etcd_. The remaining host, centos-minion will be the node and run kubelet, proxy, cadvisor and docker. + +**System Information:** + +Hosts: + +```conf +centos-master = 192.168.121.9 +centos-minion = 192.168.121.65 +``` + +**Prepare the hosts:** + +* Create virt7-testing repo on all hosts - centos-{master,minion} with following information. + +```conf +[virt7-testing] +name=virt7-testing +baseurl=http://cbs.centos.org/repos/virt7-testing/x86_64/os/ +gpgcheck=0 +``` + +* Install Kubernetes on all hosts - centos-{master,minion}. This will also pull in etcd, docker, and cadvisor. + +```shell +yum -y install --enablerepo=virt7-testing kubernetes +``` + +* Note * Using etcd-0.4.6-7 (This is temporary update in documentation) + +If you do not get etcd-0.4.6-7 installed with virt7-testing repo, + +In the current virt7-testing repo, the etcd package is updated which causes service failure. To avoid this, + +```shell +yum erase etcd +``` + +It will uninstall the current available etcd package + +```shell +yum install http://cbs.centos.org/kojifiles/packages/etcd/0.4.6/7.el7.centos/x86_64/etcd-0.4.6-7.el7.centos.x86_64.rpm +yum -y install --enablerepo=virt7-testing kubernetes +``` + +* Add master and node to /etc/hosts on all machines (not needed if hostnames already in DNS) + +```shell +echo "192.168.121.9 centos-master +192.168.121.65 centos-minion" >> /etc/hosts +``` + +* Edit `/etc/kubernetes/config` which will be the same on all hosts to contain: + +```shell +# Comma separated list of nodes in the etcd cluster +KUBE_ETCD_SERVERS="--etcd-servers=http://centos-master:4001" + +# logging to stderr means we get it in the systemd journal +KUBE_LOGTOSTDERR="--logtostderr=true" + +# journal message level, 0 is debug +KUBE_LOG_LEVEL="--v=0" + +# Should this cluster be allowed to run privileged docker containers +KUBE_ALLOW_PRIV="--allow-privileged=false" +``` + +* Disable the firewall on both the master and node, as docker does not play well with other firewall rule managers + +```shell +systemctl disable iptables-services firewalld +systemctl stop iptables-services firewalld +``` + +**Configure the Kubernetes services on the master.** + +* Edit /etc/kubernetes/apiserver to appear as such: + +```shell +# The address on the local server to listen to. +KUBE_API_ADDRESS="--address=0.0.0.0" + +# The port on the local server to listen on. +KUBE_API_PORT="--port=8080" + +# How the replication controller and scheduler find the kube-apiserver +KUBE_MASTER="--master=http://centos-master:8080" + +# Port kubelets listen on +KUBELET_PORT="--kubelet-port=10250" + +# Address range to use for services +KUBE_SERVICE_ADDRESSES="--service-cluster-ip-range=10.254.0.0/16" + +# Add your own! +KUBE_API_ARGS="" +``` + +* Start the appropriate services on master: + +```shell +for SERVICES in etcd kube-apiserver kube-controller-manager kube-scheduler; do + systemctl restart $SERVICES + systemctl enable $SERVICES + systemctl status $SERVICES +done +``` + +**Configure the Kubernetes services on the node.** + +***We need to configure the kubelet and start the kubelet and proxy*** + +* Edit `/etc/kubernetes/kubelet` to appear as such: + +```shell +# The address for the info server to serve on +KUBELET_ADDRESS="--address=0.0.0.0" + +# The port for the info server to serve on +KUBELET_PORT="--port=10250" + +# You may leave this blank to use the actual hostname +KUBELET_HOSTNAME="--hostname-override=centos-minion" + +# Location of the api-server +KUBELET_API_SERVER="--api-servers=http://centos-master:8080" + +# Add your own! +KUBELET_ARGS="" +``` + +* Start the appropriate services on node (centos-minion). + +```shell +for SERVICES in kube-proxy kubelet docker; do + systemctl restart $SERVICES + systemctl enable $SERVICES + systemctl status $SERVICES +done +``` + +*You should be finished!* + +* Check to make sure the cluster can see the node (on centos-master) + +```shell +$ kubectl get nodes +NAME LABELS STATUS +centos-minion Ready +``` + +**The cluster should be running! Launch a test pod.** + +You should have a functional cluster, check out [101](/{{page.version}}/docs/user-guide/walkthrough/)! \ No newline at end of file diff --git a/_includes/docs/docs/getting-started-guides/cloudstack.md b/_includes/docs/docs/getting-started-guides/cloudstack.md new file mode 100644 index 0000000000..864037afe7 --- /dev/null +++ b/_includes/docs/docs/getting-started-guides/cloudstack.md @@ -0,0 +1,78 @@ + +CloudStack is a software to build public and private clouds based on hardware virtualization principles (traditional IaaS). To deploy Kubernetes on CloudStack there are several possibilities depending on the Cloud being used and what images are made available. [Exoscale](http://exoscale.ch) for instance makes a [CoreOS](http://coreos.com) template available, therefore instructions to deploy Kubernetes on coreOS can be used. CloudStack also has a vagrant plugin available, hence Vagrant could be used to deploy Kubernetes either using the existing shell provisioner or using new Salt based recipes. + +[CoreOS](http://coreos.com) templates for CloudStack are built [nightly](http://stable.release.core-os.net/amd64-usr/current/). CloudStack operators need to [register](http://docs.cloudstack.apache.org/projects/cloudstack-administration/en/latest/templates) this template in their cloud before proceeding with these Kubernetes deployment instructions. + +This guide uses an [Ansible playbook](https://github.com/runseb/ansible-kubernetes). +This is a completely automated, a single playbook deploys Kubernetes based on the coreOS [instructions](/{{page.version}}/docs/getting-started-guides/coreos/coreos_multinode_cluster). + + +This [Ansible](http://ansibleworks.com) playbook deploys Kubernetes on a CloudStack based Cloud using CoreOS images. The playbook, creates an ssh key pair, creates a security group and associated rules and finally starts coreOS instances configured via cloud-init. + + + +* TOC +{:toc} + +## Prerequisites + + $ sudo apt-get install -y python-pip + $ sudo pip install ansible + $ sudo pip install cs + +[_cs_](https://github.com/exoscale/cs) is a python module for the CloudStack API. + +Set your CloudStack endpoint, API keys and HTTP method used. + +You can define them as environment variables: `CLOUDSTACK_ENDPOINT`, `CLOUDSTACK_KEY`, `CLOUDSTACK_SECRET` and `CLOUDSTACK_METHOD`. + +Or create a `~/.cloudstack.ini` file: + + [cloudstack] + endpoint = + key = + secret = + method = post + +We need to use the http POST method to pass the _large_ userdata to the coreOS instances. + +### Clone the playbook + + $ git clone --recursive https://github.com/runseb/ansible-kubernetes.git + $ cd ansible-kubernetes + +The [ansible-cloudstack](https://github.com/resmo/ansible-cloudstack) module is setup in this repository as a submodule, hence the `--recursive`. + +### Create a Kubernetes cluster + +You simply need to run the playbook. + + $ ansible-playbook k8s.yml + +Some variables can be edited in the `k8s.yml` file. + + vars: + ssh_key: k8s + k8s_num_nodes: 2 + k8s_security_group_name: k8s + k8s_node_prefix: k8s2 + k8s_template: Linux CoreOS alpha 435 64-bit 10GB Disk + k8s_instance_type: Tiny + +This will start a Kubernetes master node and a number of compute nodes (by default 2). +The `instance_type` and `template` by default are specific to [exoscale](http://exoscale.ch), edit them to specify your CloudStack cloud specific template and instance type (i.e service offering). + +Check the tasks and templates in `roles/k8s` if you want to modify anything. + +Once the playbook as finished, it will print out the IP of the Kubernetes master: + + TASK: [k8s | debug msg='k8s master IP is {{ k8s_master.default_ip }}'] ******** + +SSH to it using the key that was created and using the _core_ user and you can list the machines in your cluster: + + $ ssh -i ~/.ssh/id_rsa_k8s core@ + $ fleetctl list-machines + MACHINE IP METADATA + a017c422... role=node + ad13bf84... role=master + e9af8293... role=node \ No newline at end of file diff --git a/_includes/docs/docs/getting-started-guides/coreos.md b/_includes/docs/docs/getting-started-guides/coreos.md new file mode 100644 index 0000000000..8182735ce1 --- /dev/null +++ b/_includes/docs/docs/getting-started-guides/coreos.md @@ -0,0 +1,69 @@ + +* TOC +{:toc} + +There are multiple guides on running Kubernetes with [CoreOS](https://coreos.com/kubernetes/docs/latest/): + +### Official CoreOS Guides + +These guides are maintained by CoreOS and deploy Kubernetes the "CoreOS Way" with full TLS, the DNS add-on, and more. These guides pass Kubernetes conformance testing and we encourage you to [test this yourself](https://coreos.com/kubernetes/docs/latest/conformance-tests). + +[**Vagrant Multi-Node**](https://coreos.com/kubernetes/docs/latest/kubernetes-on-vagrant) + +Guide to setting up a multi-node cluster on Vagrant. The deployer can independently configure the number of etcd nodes, master nodes, and worker nodes to bring up a fully HA control plane. + +
+ +[**Vagrant Single-Node**](https://coreos.com/kubernetes/docs/latest/kubernetes-on-vagrant-single) + +The quickest way to set up a Kubernetes development environment locally. As easy as `git clone`, `vagrant up` and configuring `kubectl`. + +
+ +[**Full Step by Step Guide**](https://coreos.com/kubernetes/docs/latest/getting-started) + +A generic guide to setting up an HA cluster on any cloud or bare metal, with full TLS. Repeat the master or worker steps to configure more machines of that role. + +### Community Guides + +These guides are maintained by community members, cover specific platforms and use cases, and experiment with different ways of configuring Kubernetes on CoreOS. + +[**Multi-node Cluster**](/{{page.version}}/docs/getting-started-guides/coreos/coreos_multinode_cluster) + +Set up a single master, multi-worker cluster on your choice of platform: AWS, GCE, or VMware Fusion. + +
+ +[**Easy Multi-node Cluster on Google Compute Engine**](https://github.com/rimusz/coreos-multi-node-k8s-gce/blob/master/README.md) + +Scripted installation of a single master, multi-worker cluster on GCE. Kubernetes components are managed by [fleet](https://github.com/coreos/fleet). + +
+ +[**Multi-node cluster using cloud-config and Weave on Vagrant**](https://github.com/errordeveloper/weave-demos/blob/master/poseidon/README.md) + +Configure a Vagrant-based cluster of 3 machines with networking provided by Weave. + +
+ +[**Multi-node cluster using cloud-config and Vagrant**](https://github.com/pires/kubernetes-vagrant-coreos-cluster/blob/master/README.md) + +Configure a single master, multi-worker cluster locally, running on your choice of hypervisor: VirtualBox, Parallels, or VMware + +
+ +[**Multi-node cluster with Vagrant and fleet units using a small OS X App**](https://github.com/rimusz/coreos-osx-gui-kubernetes-cluster/blob/master/README.md) + +Guide to running a single master, multi-worker cluster controlled by an OS X menubar application. Uses Vagrant under the hood. + +
+ +[**Resizable multi-node cluster on Azure with Weave**](/{{page.version}}/docs/getting-started-guides/coreos/azure/) + +Guide to running an HA etcd cluster with a single master on Azure. Uses the Azure node.js CLI to resize the cluster. + +
+ +[**Multi-node cluster using cloud-config, CoreOS and VMware ESXi**](https://github.com/xavierbaude/VMware-coreos-multi-nodes-Kubernetes) + +Configure a single master, single worker cluster on VMware ESXi. \ No newline at end of file diff --git a/_includes/docs/docs/getting-started-guides/coreos/azure/.gitignore b/_includes/docs/docs/getting-started-guides/coreos/azure/.gitignore new file mode 100644 index 0000000000..c2658d7d1b --- /dev/null +++ b/_includes/docs/docs/getting-started-guides/coreos/azure/.gitignore @@ -0,0 +1 @@ +node_modules/ diff --git a/_includes/docs/docs/getting-started-guides/coreos/azure/addons/skydns-rc.yaml b/_includes/docs/docs/getting-started-guides/coreos/azure/addons/skydns-rc.yaml new file mode 100644 index 0000000000..00a20f3b4a --- /dev/null +++ b/_includes/docs/docs/getting-started-guides/coreos/azure/addons/skydns-rc.yaml @@ -0,0 +1,92 @@ +apiVersion: v1 +kind: ReplicationController +metadata: + name: kube-dns-v8 + namespace: kube-system + labels: + k8s-app: kube-dns + version: v8 + kubernetes.io/cluster-service: "true" +spec: + replicas: 3 + selector: + k8s-app: kube-dns + version: v8 + template: + metadata: + labels: + k8s-app: kube-dns + version: v8 + kubernetes.io/cluster-service: "true" + spec: + containers: + - name: etcd + image: gcr.io/google_containers/etcd:2.0.9 + resources: + limits: + cpu: 100m + memory: 50Mi + command: + - /usr/local/bin/etcd + - -data-dir + - /var/etcd/data + - -listen-client-urls + - http://127.0.0.1:2379,http://127.0.0.1:4001 + - -advertise-client-urls + - http://127.0.0.1:2379,http://127.0.0.1:4001 + - -initial-cluster-token + - skydns-etcd + volumeMounts: + - name: etcd-storage + mountPath: /var/etcd/data + - name: kube2sky + image: gcr.io/google_containers/kube2sky:1.11 + resources: + limits: + cpu: 100m + memory: 50Mi + args: + # command = "/kube2sky" + - -domain=kube.local + - -kube_master_url=http://kube-00:8080 + - name: skydns + image: gcr.io/google_containers/skydns:2015-03-11-001 + resources: + limits: + cpu: 100m + memory: 50Mi + args: + # command = "/skydns" + - -machines=http://localhost:4001 + - -addr=0.0.0.0:53 + - -domain=kube.local + ports: + - containerPort: 53 + name: dns + protocol: UDP + - containerPort: 53 + name: dns-tcp + protocol: TCP + livenessProbe: + httpGet: + path: /healthz + port: 8080 + scheme: HTTP + initialDelaySeconds: 30 + timeoutSeconds: 5 + - name: healthz + image: gcr.io/google_containers/exechealthz:1.0 + resources: + limits: + cpu: 10m + memory: 20Mi + args: + - -cmd=nslookup kubernetes.default.svc.kube.local localhost >/dev/null + - -port=8080 + ports: + - containerPort: 8080 + protocol: TCP + volumes: + - name: etcd-storage + emptyDir: {} + dnsPolicy: Default # Don't use cluster DNS. diff --git a/_includes/docs/docs/getting-started-guides/coreos/azure/addons/skydns-svc.yaml b/_includes/docs/docs/getting-started-guides/coreos/azure/addons/skydns-svc.yaml new file mode 100644 index 0000000000..a0e979c266 --- /dev/null +++ b/_includes/docs/docs/getting-started-guides/coreos/azure/addons/skydns-svc.yaml @@ -0,0 +1,20 @@ +apiVersion: v1 +kind: Service +metadata: + name: kube-dns + namespace: kube-system + labels: + k8s-app: kube-dns + kubernetes.io/cluster-service: "true" + kubernetes.io/name: "KubeDNS" +spec: + selector: + k8s-app: kube-dns + clusterIP: 10.1.0.3 + ports: + - name: dns + port: 53 + protocol: UDP + - name: dns-tcp + port: 53 + protocol: TCP diff --git a/_includes/docs/docs/getting-started-guides/coreos/azure/azure-login.js b/_includes/docs/docs/getting-started-guides/coreos/azure/azure-login.js new file mode 100755 index 0000000000..624916b2b5 --- /dev/null +++ b/_includes/docs/docs/getting-started-guides/coreos/azure/azure-login.js @@ -0,0 +1,3 @@ +#!/usr/bin/env node + +require('child_process').fork('node_modules/azure-cli/bin/azure', ['login'].concat(process.argv)); diff --git a/_includes/docs/docs/getting-started-guides/coreos/azure/cloud_config_templates/kubernetes-cluster-etcd-node-template.yml b/_includes/docs/docs/getting-started-guides/coreos/azure/cloud_config_templates/kubernetes-cluster-etcd-node-template.yml new file mode 100644 index 0000000000..4cbb480e53 --- /dev/null +++ b/_includes/docs/docs/getting-started-guides/coreos/azure/cloud_config_templates/kubernetes-cluster-etcd-node-template.yml @@ -0,0 +1,19 @@ +## This file is used as input to deployment script, which amends it as needed. +## More specifically, we need to add peer hosts for each but the elected peer. + +coreos: + units: + - name: etcd2.service + enable: true + command: start + etcd2: + name: '%H' + initial-cluster-token: 'etcd-cluster' + initial-advertise-peer-urls: 'http://%H:2380' + listen-peer-urls: 'http://%H:2380' + listen-client-urls: 'http://0.0.0.0:2379,http://0.0.0.0:4001' + advertise-client-urls: 'http://%H:2379,http://%H:4001' + initial-cluster-state: 'new' + update: + group: stable + reboot-strategy: off diff --git a/_includes/docs/docs/getting-started-guides/coreos/azure/cloud_config_templates/kubernetes-cluster-main-nodes-template.yml b/_includes/docs/docs/getting-started-guides/coreos/azure/cloud_config_templates/kubernetes-cluster-main-nodes-template.yml new file mode 100644 index 0000000000..340c804139 --- /dev/null +++ b/_includes/docs/docs/getting-started-guides/coreos/azure/cloud_config_templates/kubernetes-cluster-main-nodes-template.yml @@ -0,0 +1,339 @@ +## This file is used as input to deployment script, which amends it as needed. +## More specifically, we need to add environment files for as many nodes as we +## are going to deploy. + +write_files: + - path: /opt/bin/curl-retry.sh + permissions: '0755' + owner: root + content: | + #!/bin/sh -x + until curl $@ + do sleep 1 + done + +coreos: + update: + group: stable + reboot-strategy: off + units: + - name: systemd-networkd-wait-online.service + drop-ins: + - name: 50-check-github-is-reachable.conf + content: | + [Service] + ExecStart=/bin/sh -x -c \ + 'until curl --silent --fail https://status.github.com/api/status.json | grep -q \"good\"; do sleep 2; done' + + - name: docker.service + drop-ins: + - name: 50-weave-kubernetes.conf + content: | + [Service] + Environment=DOCKER_OPTS='--bridge="weave" -r="false"' + + - name: weave-network.target + enable: true + content: | + [Unit] + Description=Weave Network Setup Complete + Documentation=man:systemd.special(7) + RefuseManualStart=no + After=network-online.target + [Install] + WantedBy=multi-user.target + WantedBy=kubernetes-master.target + WantedBy=kubernetes-node.target + + - name: kubernetes-master.target + enable: true + command: start + content: | + [Unit] + Description=Kubernetes Cluster Master + Documentation=http://kubernetes.io/ + RefuseManualStart=no + After=weave-network.target + Requires=weave-network.target + ConditionHost=kube-00 + Wants=kube-apiserver.service + Wants=kube-scheduler.service + Wants=kube-controller-manager.service + Wants=kube-proxy.service + [Install] + WantedBy=multi-user.target + + - name: kubernetes-node.target + enable: true + command: start + content: | + [Unit] + Description=Kubernetes Cluster Node + Documentation=http://kubernetes.io/ + RefuseManualStart=no + After=weave-network.target + Requires=weave-network.target + ConditionHost=!kube-00 + Wants=kube-proxy.service + Wants=kubelet.service + [Install] + WantedBy=multi-user.target + + - name: 10-weave.network + runtime: false + content: | + [Match] + Type=bridge + Name=weave* + [Network] + + - name: install-weave.service + enable: true + content: | + [Unit] + After=network-online.target + Before=weave.service + Before=weave-helper.service + Before=docker.service + Description=Install Weave + Documentation=http://docs.weave.works/ + Requires=network-online.target + [Service] + Type=oneshot + RemainAfterExit=yes + ExecStartPre=/bin/mkdir -p /opt/bin/ + ExecStartPre=/opt/bin/curl-retry.sh \ + --silent \ + --location \ + https://github.com/weaveworks/weave/releases/download/latest_release/weave \ + --output /opt/bin/weave + ExecStartPre=/opt/bin/curl-retry.sh \ + --silent \ + --location \ + https://raw.github.com/errordeveloper/weave-demos/master/poseidon/weave-helper \ + --output /opt/bin/weave-helper + ExecStartPre=/usr/bin/chmod +x /opt/bin/weave + ExecStartPre=/usr/bin/chmod +x /opt/bin/weave-helper + ExecStart=/bin/echo Weave Installed + [Install] + WantedBy=weave-network.target + WantedBy=weave.service + + - name: weave-helper.service + enable: true + content: | + [Unit] + After=install-weave.service + After=docker.service + Description=Weave Network Router + Documentation=http://docs.weave.works/ + Requires=docker.service + Requires=install-weave.service + [Service] + ExecStart=/opt/bin/weave-helper + Restart=always + [Install] + WantedBy=weave-network.target + + - name: weave.service + enable: true + content: | + [Unit] + After=install-weave.service + After=docker.service + Description=Weave Network Router + Documentation=http://docs.weave.works/ + Requires=docker.service + Requires=install-weave.service + [Service] + TimeoutStartSec=0 + EnvironmentFile=/etc/weave.%H.env + ExecStartPre=/opt/bin/weave setup + ExecStartPre=/opt/bin/weave launch $WEAVE_PEERS + ExecStart=/usr/bin/docker attach weave + Restart=on-failure + Restart=always + ExecStop=/opt/bin/weave stop + [Install] + WantedBy=weave-network.target + + - name: weave-create-bridge.service + enable: true + content: | + [Unit] + After=network.target + After=install-weave.service + Before=weave.service + Before=docker.service + Requires=network.target + Requires=install-weave.service + [Service] + Type=oneshot + EnvironmentFile=/etc/weave.%H.env + ExecStart=/opt/bin/weave --local create-bridge + ExecStart=/usr/bin/ip addr add dev weave $BRIDGE_ADDRESS_CIDR + ExecStart=/usr/bin/ip route add $BREAKOUT_ROUTE dev weave scope link + ExecStart=/usr/bin/ip route add 224.0.0.0/4 dev weave + [Install] + WantedBy=multi-user.target + WantedBy=weave-network.target + + - name: install-kubernetes.service + enable: true + content: | + [Unit] + After=network-online.target + Before=kube-apiserver.service + Before=kube-controller-manager.service + Before=kubelet.service + Before=kube-proxy.service + Description=Download Kubernetes Binaries + Documentation=http://kubernetes.io/ + Requires=network-online.target + [Service] + Environment=KUBE_RELEASE_TARBALL=https://github.com/GoogleCloudPlatform/kubernetes/releases/download/v1.0.1/kubernetes.tar.gz + ExecStartPre=/bin/mkdir -p /opt/ + ExecStart=/opt/bin/curl-retry.sh --silent --location $KUBE_RELEASE_TARBALL --output /tmp/kubernetes.tgz + ExecStart=/bin/tar xzvf /tmp/kubernetes.tgz -C /tmp/ + ExecStart=/bin/tar xzvf /tmp/kubernetes/server/kubernetes-server-linux-amd64.tar.gz -C /opt + ExecStartPost=/bin/chmod o+rx -R /opt/kubernetes + ExecStartPost=/bin/ln -s /opt/kubernetes/server/bin/kubectl /opt/bin/ + ExecStartPost=/bin/mv /tmp/kubernetes/examples/guestbook /home/core/guestbook-example + ExecStartPost=/bin/chown core. -R /home/core/guestbook-example + ExecStartPost=/bin/rm -rf /tmp/kubernetes + ExecStartPost=/bin/sed 's/# type: LoadBalancer/type: NodePort/' -i /home/core/guestbook-example/frontend-service.yaml + RemainAfterExit=yes + Type=oneshot + [Install] + WantedBy=kubernetes-master.target + WantedBy=kubernetes-node.target + + - name: kube-apiserver.service + enable: true + content: | + [Unit] + After=install-kubernetes.service + Before=kube-controller-manager.service + Before=kube-scheduler.service + ConditionFileIsExecutable=/opt/kubernetes/server/bin/kube-apiserver + Description=Kubernetes API Server + Documentation=http://kubernetes.io/ + Wants=install-kubernetes.service + ConditionHost=kube-00 + [Service] + ExecStart=/opt/kubernetes/server/bin/kube-apiserver \ + --address=0.0.0.0 \ + --port=8080 \ + $ETCD_SERVERS \ + --service-cluster-ip-range=10.1.0.0/16 \ + --logtostderr=true --v=3 + Restart=always + RestartSec=10 + [Install] + WantedBy=kubernetes-master.target + + - name: kube-scheduler.service + enable: true + content: | + [Unit] + After=kube-apiserver.service + After=install-kubernetes.service + ConditionFileIsExecutable=/opt/kubernetes/server/bin/kube-scheduler + Description=Kubernetes Scheduler + Documentation=http://kubernetes.io/ + Wants=kube-apiserver.service + ConditionHost=kube-00 + [Service] + ExecStart=/opt/kubernetes/server/bin/kube-scheduler \ + --logtostderr=true \ + --master=127.0.0.1:8080 + Restart=always + RestartSec=10 + [Install] + WantedBy=kubernetes-master.target + + - name: kube-controller-manager.service + enable: true + content: | + [Unit] + After=install-kubernetes.service + After=kube-apiserver.service + ConditionFileIsExecutable=/opt/kubernetes/server/bin/kube-controller-manager + Description=Kubernetes Controller Manager + Documentation=http://kubernetes.io/ + Wants=kube-apiserver.service + Wants=install-kubernetes.service + ConditionHost=kube-00 + [Service] + ExecStart=/opt/kubernetes/server/bin/kube-controller-manager \ + --master=127.0.0.1:8080 \ + --logtostderr=true + Restart=always + RestartSec=10 + [Install] + WantedBy=kubernetes-master.target + + - name: kubelet.service + enable: true + content: | + [Unit] + After=install-kubernetes.service + ConditionFileIsExecutable=/opt/kubernetes/server/bin/kubelet + Description=Kubernetes Kubelet + Documentation=http://kubernetes.io/ + Wants=install-kubernetes.service + ConditionHost=!kube-00 + [Service] + ExecStartPre=/bin/mkdir -p /etc/kubernetes/manifests/ + ExecStart=/opt/kubernetes/server/bin/kubelet \ + --address=0.0.0.0 \ + --port=10250 \ + --hostname-override=%H \ + --api-servers=http://kube-00:8080 \ + --logtostderr=true \ + --cluster-dns=10.1.0.3 \ + --cluster-domain=kube.local \ + --config=/etc/kubernetes/manifests/ + Restart=always + RestartSec=10 + [Install] + WantedBy=kubernetes-node.target + + - name: kube-proxy.service + enable: true + content: | + [Unit] + After=install-kubernetes.service + ConditionFileIsExecutable=/opt/kubernetes/server/bin/kube-proxy + Description=Kubernetes Proxy + Documentation=http://kubernetes.io/ + Wants=install-kubernetes.service + [Service] + ExecStart=/opt/kubernetes/server/bin/kube-proxy \ + --master=http://kube-00:8080 \ + --logtostderr=true + Restart=always + RestartSec=10 + [Install] + WantedBy=kubernetes-master.target + WantedBy=kubernetes-node.target + + - name: kube-create-addons.service + enable: true + content: | + [Unit] + After=install-kubernetes.service + ConditionFileIsExecutable=/opt/kubernetes/server/bin/kubectl + ConditionPathIsDirectory=/etc/kubernetes/addons/ + ConditionHost=kube-00 + Description=Kubernetes Addons + Documentation=http://kubernetes.io/ + Wants=install-kubernetes.service + Wants=kube-apiserver.service + [Service] + Type=oneshot + RemainAfterExit=no + ExecStart=/opt/kubernetes/server/bin/kubectl create -f /etc/kubernetes/addons/ + SuccessExitStatus=1 + [Install] + WantedBy=kubernetes-master.target diff --git a/_includes/docs/docs/getting-started-guides/coreos/azure/create-kubernetes-cluster.js b/_includes/docs/docs/getting-started-guides/coreos/azure/create-kubernetes-cluster.js new file mode 100755 index 0000000000..70248c596c --- /dev/null +++ b/_includes/docs/docs/getting-started-guides/coreos/azure/create-kubernetes-cluster.js @@ -0,0 +1,15 @@ +#!/usr/bin/env node + +var azure = require('./lib/azure_wrapper.js'); +var kube = require('./lib/deployment_logic/kubernetes.js'); + +azure.create_config('kube', { 'etcd': 3, 'kube': 3 }); + +azure.run_task_queue([ + azure.queue_default_network(), + azure.queue_storage_if_needed(), + azure.queue_machines('etcd', 'stable', + kube.create_etcd_cloud_config), + azure.queue_machines('kube', 'stable', + kube.create_node_cloud_config), +]); diff --git a/_includes/docs/docs/getting-started-guides/coreos/azure/destroy-cluster.js b/_includes/docs/docs/getting-started-guides/coreos/azure/destroy-cluster.js new file mode 100755 index 0000000000..ce441e538a --- /dev/null +++ b/_includes/docs/docs/getting-started-guides/coreos/azure/destroy-cluster.js @@ -0,0 +1,7 @@ +#!/usr/bin/env node + +var azure = require('./lib/azure_wrapper.js'); + +azure.destroy_cluster(process.argv[2]); + +console.log('The cluster had been destroyed, you can delete the state file now.'); diff --git a/_includes/docs/docs/getting-started-guides/coreos/azure/expose_guestbook_app_port.sh b/_includes/docs/docs/getting-started-guides/coreos/azure/expose_guestbook_app_port.sh new file mode 100755 index 0000000000..65dfaf5d3a --- /dev/null +++ b/_includes/docs/docs/getting-started-guides/coreos/azure/expose_guestbook_app_port.sh @@ -0,0 +1,29 @@ +#!/bin/bash + +# Copyright 2014 The Kubernetes Authors All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -e + +[ ! -z $1 ] || (echo Usage: $0 ssh_conf; exit 1) + +fe_port=$(ssh -F $1 kube-00 \ + "/opt/bin/kubectl get -o template --template='{{(index .spec.ports 0).nodePort}}' services frontend -L name=frontend" \ +) + +echo "Guestbook app is on port $fe_port, will map it to port 80 on kube-00" + +./node_modules/.bin/azure vm endpoint create kube-00 80 $fe_port + +./node_modules/.bin/azure vm endpoint show kube-00 tcp-80-${fe_port} diff --git a/_includes/docs/docs/getting-started-guides/coreos/azure/index.md b/_includes/docs/docs/getting-started-guides/coreos/azure/index.md new file mode 100644 index 0000000000..8954f58c21 --- /dev/null +++ b/_includes/docs/docs/getting-started-guides/coreos/azure/index.md @@ -0,0 +1,225 @@ + +In this guide I will demonstrate how to deploy a Kubernetes cluster to Azure cloud. You will be using CoreOS with [Weave](http://weave.works), +which implements simple and secure networking, in a transparent, yet robust way. The purpose of this guide is to provide an out-of-the-box +implementation that can ultimately be taken into production with little change. It will demonstrate how to provision a dedicated Kubernetes +master and etcd nodes, and show how to scale the cluster with ease. + +* TOC +{:toc} + +### Prerequisites + +1. You need an Azure account. + +## Let's go! + +To get started, you need to checkout the code: + +```shell +git clone https://github.com/kubernetes/kubernetes +cd kubernetes/docs/getting-started-guides/coreos/azure/ +``` + +You will need to have [Node.js installed](http://nodejs.org/download/) on you machine. If you have previously used Azure CLI, you should have it already. + +First, you need to install some of the dependencies with + +```shell +npm install +``` + +Now, all you need to do is: + +```shell +./azure-login.js -u +./create-kubernetes-cluster.js +``` + +This script will provision a cluster suitable for production use, where there is a ring of 3 dedicated etcd nodes: 1 kubernetes master and 2 kubernetes nodes. +The `kube-00` VM will be the master, your work loads are only to be deployed on the nodes, `kube-01` and `kube-02`. Initially, all VMs are single-core, to +ensure a user of the free tier can reproduce it without paying extra. I will show how to add more bigger VMs later. + +![VMs in Azure](/images/docs/initial_cluster.png) + +Once the creation of Azure VMs has finished, you should see the following: + +```shell +... +azure_wrapper/info: Saved SSH config, you can use it like so: `ssh -F ./output/kube_1c1496016083b4_ssh_conf ` +azure_wrapper/info: The hosts in this deployment are: + [ 'etcd-00', 'etcd-01', 'etcd-02', 'kube-00', 'kube-01', 'kube-02' ] +azure_wrapper/info: Saved state into `./output/kube_1c1496016083b4_deployment.yml` +``` + +Let's login to the master node like so: + +```shell +ssh -F ./output/kube_1c1496016083b4_ssh_conf kube-00 +``` + +> Note: config file name will be different, make sure to use the one you see. + +Check there are 2 nodes in the cluster: + +```shell +core@kube-00 ~ $ kubectl get nodes +NAME LABELS STATUS +kube-01 kubernetes.io/hostname=kube-01 Ready +kube-02 kubernetes.io/hostname=kube-02 Ready +``` + +## Deploying the workload + +Let's follow the Guestbook example now: + +```shell +kubectl create -f ~/guestbook-example +``` + +You need to wait for the pods to get deployed, run the following and wait for `STATUS` to change from `Pending` to `Running`. + +```shell +kubectl get pods --watch +``` + +> Note: the most time it will spend downloading Docker container images on each of the nodes. + +Eventually you should see: + +```shell +NAME READY STATUS RESTARTS AGE +frontend-0a9xi 1/1 Running 0 4m +frontend-4wahe 1/1 Running 0 4m +frontend-6l36j 1/1 Running 0 4m +redis-master-talmr 1/1 Running 0 4m +redis-slave-12zfd 1/1 Running 0 4m +redis-slave-3nbce 1/1 Running 0 4m +``` + +## Scaling + +Two single-core nodes are certainly not enough for a production system of today. Let's scale the cluster by adding a couple of bigger nodes. + +You will need to open another terminal window on your machine and go to the same working directory (e.g. `~/Workspace/kubernetes/docs/getting-started-guides/coreos/azure/`). + +First, lets set the size of new VMs: + +```shell +export AZ_VM_SIZE=Large +``` + +Now, run scale script with state file of the previous deployment and number of nodes to add: + +```shell +core@kube-00 ~ $ ./scale-kubernetes-cluster.js ./output/kube_1c1496016083b4_deployment.yml 2 +... +azure_wrapper/info: Saved SSH config, you can use it like so: `ssh -F ./output/kube_8f984af944f572_ssh_conf ` +azure_wrapper/info: The hosts in this deployment are: + [ 'etcd-00', + 'etcd-01', + 'etcd-02', + 'kube-00', + 'kube-01', + 'kube-02', + 'kube-03', + 'kube-04' ] +azure_wrapper/info: Saved state into `./output/kube_8f984af944f572_deployment.yml` +``` + +> Note: this step has created new files in `./output`. + +Back on `kube-00`: + +```shell +core@kube-00 ~ $ kubectl get nodes +NAME LABELS STATUS +kube-01 kubernetes.io/hostname=kube-01 Ready +kube-02 kubernetes.io/hostname=kube-02 Ready +kube-03 kubernetes.io/hostname=kube-03 Ready +kube-04 kubernetes.io/hostname=kube-04 Ready +``` + +You can see that two more nodes joined happily. Let's scale the number of Guestbook instances now. + +First, double-check how many replication controllers there are: + +```shell +core@kube-00 ~ $ kubectl get rc +ONTROLLER CONTAINER(S) IMAGE(S) SELECTOR REPLICAS +frontend php-redis kubernetes/example-guestbook-php-redis:v2 name=frontend 3 +redis-master master redis name=redis-master 1 +redis-slave worker kubernetes/redis-slave:v2 name=redis-slave 2 +``` + +As there are 4 nodes, let's scale proportionally: + +```shell +core@kube-00 ~ $ kubectl scale --replicas=4 rc redis-slave +>>>>>>> coreos/azure: Updates for 1.0 +scaled +core@kube-00 ~ $ kubectl scale --replicas=4 rc frontend +scaled +``` + +Check what you have now: + +```shell +core@kube-00 ~ $ kubectl get rc +CONTROLLER CONTAINER(S) IMAGE(S) SELECTOR REPLICAS +frontend php-redis kubernetes/example-guestbook-php-redis:v2 name=frontend 4 +redis-master master redis name=redis-master 1 +redis-slave worker kubernetes/redis-slave:v2 name=redis-slave 4 +``` + +You now will have more instances of front-end Guestbook apps and Redis slaves; and, if you look up all pods labeled `name=frontend`, you should see one running on each node. + +```shell +core@kube-00 ~/guestbook-example $ kubectl get pods -l name=frontend +NAME READY STATUS RESTARTS AGE +frontend-0a9xi 1/1 Running 0 22m +frontend-4wahe 1/1 Running 0 22m +frontend-6l36j 1/1 Running 0 22m +frontend-z9oxo 1/1 Running 0 41s +``` + +## Exposing the app to the outside world + +There is no native Azure load-balancer support in Kubernetes 1.0, however here is how you can expose the Guestbook app to the Internet. + +```shell +./expose_guestbook_app_port.sh ./output/kube_1c1496016083b4_ssh_conf +Guestbook app is on port 31605, will map it to port 80 on kube-00 +info: Executing command vm endpoint create ++ Getting virtual machines ++ Reading network configuration ++ Updating network configuration +info: vm endpoint create command OK +info: Executing command vm endpoint show ++ Getting virtual machines +data: Name : tcp-80-31605 +data: Local port : 31605 +data: Protcol : tcp +data: Virtual IP Address : 137.117.156.164 +data: Direct server return : Disabled +info: vm endpoint show command OK +``` + +You then should be able to access it from anywhere via the Azure virtual IP for `kube-00` displayed above, i.e. `http://137.117.156.164/` in my case. + +## Next steps + +You now have a full-blow cluster running in Azure, congrats! + +You should probably try deploy other [example apps](https://github.com/kubernetes/kubernetes/tree/{{page.githubbranch}}/examples/) or write your own ;) + +## Tear down... + +If you don't wish care about the Azure bill, you can tear down the cluster. It's easy to redeploy it, as you can see. + +```shell +./destroy-cluster.js ./output/kube_8f984af944f572_deployment.yml +``` + +> Note: make sure to use the _latest state file_, as after scaling there is a new one. + +By the way, with the scripts shown, you can deploy multiple clusters, if you like :) \ No newline at end of file diff --git a/_includes/docs/docs/getting-started-guides/coreos/azure/lib/azure_wrapper.js b/_includes/docs/docs/getting-started-guides/coreos/azure/lib/azure_wrapper.js new file mode 100644 index 0000000000..d389efbea2 --- /dev/null +++ b/_includes/docs/docs/getting-started-guides/coreos/azure/lib/azure_wrapper.js @@ -0,0 +1,271 @@ +var _ = require('underscore'); + +var fs = require('fs'); +var cp = require('child_process'); + +var yaml = require('js-yaml'); + +var openssl = require('openssl-wrapper'); + +var clr = require('colors'); +var inspect = require('util').inspect; + +var util = require('./util.js'); + +var coreos_image_ids = { + 'stable': '2b171e93f07c4903bcad35bda10acf22__CoreOS-Stable-717.3.0', + 'beta': '2b171e93f07c4903bcad35bda10acf22__CoreOS-Beta-723.3.0', // untested + 'alpha': '2b171e93f07c4903bcad35bda10acf22__CoreOS-Alpha-745.1.0' // untested +}; + +var conf = {}; + +var hosts = { + collection: [], + ssh_port_counter: 2200, +}; + +var task_queue = []; + +exports.run_task_queue = function (dummy) { + var tasks = { + todo: task_queue, + done: [], + }; + + var pop_task = function() { + console.log(clr.yellow('azure_wrapper/task:'), clr.grey(inspect(tasks))); + var ret = {}; + ret.current = tasks.todo.shift(); + ret.remaining = tasks.todo.length; + return ret; + }; + + (function iter (task) { + if (task.current === undefined) { + if (conf.destroying === undefined) { + create_ssh_conf(); + save_state(); + } + return; + } else { + if (task.current.length !== 0) { + console.log(clr.yellow('azure_wrapper/exec:'), clr.blue(inspect(task.current))); + cp.fork('node_modules/azure-cli/bin/azure', task.current) + .on('exit', function (code, signal) { + tasks.done.push({ + code: code, + signal: signal, + what: task.current.join(' '), + remaining: task.remaining, + }); + if (code !== 0 && conf.destroying === undefined) { + console.log(clr.red('azure_wrapper/fail: Exiting due to an error.')); + save_state(); + console.log(clr.cyan('azure_wrapper/info: You probably want to destroy and re-run.')); + process.abort(); + } else { + iter(pop_task()); + } + }); + } else { + iter(pop_task()); + } + } + })(pop_task()); +}; + +var save_state = function () { + var file_name = util.join_output_file_path(conf.name, 'deployment.yml'); + try { + conf.hosts = hosts.collection; + fs.writeFileSync(file_name, yaml.safeDump(conf)); + console.log(clr.yellow('azure_wrapper/info: Saved state into `%s`'), file_name); + } catch (e) { + console.log(clr.red(e)); + } +}; + +var load_state = function (file_name) { + try { + conf = yaml.safeLoad(fs.readFileSync(file_name, 'utf8')); + console.log(clr.yellow('azure_wrapper/info: Loaded state from `%s`'), file_name); + return conf; + } catch (e) { + console.log(clr.red(e)); + } +}; + +var create_ssh_key = function (prefix) { + var opts = { + x509: true, + nodes: true, + newkey: 'rsa:2048', + subj: '/O=Weaveworks, Inc./L=London/C=GB/CN=weave.works', + keyout: util.join_output_file_path(prefix, 'ssh.key'), + out: util.join_output_file_path(prefix, 'ssh.pem'), + }; + openssl.exec('req', opts, function (err, buffer) { + if (err) console.log(clr.red(err)); + fs.chmod(opts.keyout, '0600', function (err) { + if (err) console.log(clr.red(err)); + }); + }); + return { + key: opts.keyout, + pem: opts.out, + } +} + +var create_ssh_conf = function () { + var file_name = util.join_output_file_path(conf.name, 'ssh_conf'); + var ssh_conf_head = [ + "Host *", + "\tHostname " + conf.resources['service'] + ".cloudapp.net", + "\tUser core", + "\tCompression yes", + "\tLogLevel FATAL", + "\tStrictHostKeyChecking no", + "\tUserKnownHostsFile /dev/null", + "\tIdentitiesOnly yes", + "\tIdentityFile " + conf.resources['ssh_key']['key'], + "\n", + ]; + + fs.writeFileSync(file_name, ssh_conf_head.concat(_.map(hosts.collection, function (host) { + return _.template("Host <%= name %>\n\tPort <%= port %>\n")(host); + })).join('\n')); + console.log(clr.yellow('azure_wrapper/info:'), clr.green('Saved SSH config, you can use it like so: `ssh -F ', file_name, '`')); + console.log(clr.yellow('azure_wrapper/info:'), clr.green('The hosts in this deployment are:\n'), _.map(hosts.collection, function (host) { return host.name; })); +}; + +var get_location = function () { + if (process.env['AZ_AFFINITY']) { + return '--affinity-group=' + process.env['AZ_AFFINITY']; + } else if (process.env['AZ_LOCATION']) { + return '--location=' + process.env['AZ_LOCATION']; + } else { + return '--location=West Europe'; + } +} +var get_vm_size = function () { + if (process.env['AZ_VM_SIZE']) { + return '--vm-size=' + process.env['AZ_VM_SIZE']; + } else { + return '--vm-size=Small'; + } +} + +exports.queue_default_network = function () { + task_queue.push([ + 'network', 'vnet', 'create', + get_location(), + '--address-space=172.16.0.0', + conf.resources['vnet'], + ]); +} + +exports.queue_storage_if_needed = function() { + if (!process.env['AZURE_STORAGE_ACCOUNT']) { + conf.resources['storage_account'] = util.rand_suffix; + task_queue.push([ + 'storage', 'account', 'create', + '--type=LRS', + get_location(), + conf.resources['storage_account'], + ]); + process.env['AZURE_STORAGE_ACCOUNT'] = conf.resources['storage_account']; + } else { + // Preserve it for resizing, so we don't create a new one by accedent, + // when the environment variable is unset + conf.resources['storage_account'] = process.env['AZURE_STORAGE_ACCOUNT']; + } +}; + +exports.queue_machines = function (name_prefix, coreos_update_channel, cloud_config_creator) { + var x = conf.nodes[name_prefix]; + var vm_create_base_args = [ + 'vm', 'create', + get_location(), + get_vm_size(), + '--connect=' + conf.resources['service'], + '--virtual-network-name=' + conf.resources['vnet'], + '--no-ssh-password', + '--ssh-cert=' + conf.resources['ssh_key']['pem'], + ]; + + var cloud_config = cloud_config_creator(x, conf); + + var next_host = function (n) { + hosts.ssh_port_counter += 1; + var host = { name: util.hostname(n, name_prefix), port: hosts.ssh_port_counter }; + if (cloud_config instanceof Array) { + host.cloud_config_file = cloud_config[n]; + } else { + host.cloud_config_file = cloud_config; + } + hosts.collection.push(host); + return _.map([ + "--vm-name=<%= name %>", + "--ssh=<%= port %>", + "--custom-data=<%= cloud_config_file %>", + ], function (arg) { return _.template(arg)(host); }); + }; + + task_queue = task_queue.concat(_(x).times(function (n) { + if (conf.resizing && n < conf.old_size) { + return []; + } else { + return vm_create_base_args.concat(next_host(n), [ + coreos_image_ids[coreos_update_channel], 'core', + ]); + } + })); +}; + +exports.create_config = function (name, nodes) { + conf = { + name: name, + nodes: nodes, + weave_salt: util.rand_string(), + resources: { + vnet: [name, 'internal-vnet', util.rand_suffix].join('-'), + service: [name, util.rand_suffix].join('-'), + ssh_key: create_ssh_key(name), + } + }; + +}; + +exports.destroy_cluster = function (state_file) { + load_state(state_file); + if (conf.hosts === undefined) { + console.log(clr.red('azure_wrapper/fail: Nothing to delete.')); + process.abort(); + } + + conf.destroying = true; + task_queue = _.map(conf.hosts, function (host) { + return ['vm', 'delete', '--quiet', '--blob-delete', host.name]; + }); + + task_queue.push(['network', 'vnet', 'delete', '--quiet', conf.resources['vnet']]); + task_queue.push(['storage', 'account', 'delete', '--quiet', conf.resources['storage_account']]); + + exports.run_task_queue(); +}; + +exports.load_state_for_resizing = function (state_file, node_type, new_nodes) { + load_state(state_file); + if (conf.hosts === undefined) { + console.log(clr.red('azure_wrapper/fail: Nothing to look at.')); + process.abort(); + } + conf.resizing = true; + conf.old_size = conf.nodes[node_type]; + conf.old_state_file = state_file; + conf.nodes[node_type] += new_nodes; + hosts.collection = conf.hosts; + hosts.ssh_port_counter += conf.hosts.length; + process.env['AZURE_STORAGE_ACCOUNT'] = conf.resources['storage_account']; +} diff --git a/_includes/docs/docs/getting-started-guides/coreos/azure/lib/cloud_config.js b/_includes/docs/docs/getting-started-guides/coreos/azure/lib/cloud_config.js new file mode 100644 index 0000000000..d08b3f06ae --- /dev/null +++ b/_includes/docs/docs/getting-started-guides/coreos/azure/lib/cloud_config.js @@ -0,0 +1,58 @@ +var _ = require('underscore'); +var fs = require('fs'); +var yaml = require('js-yaml'); +var colors = require('colors/safe'); + +var write_cloud_config_from_object = function (data, output_file) { + try { + fs.writeFileSync(output_file, [ + '#cloud-config', + yaml.safeDump(data), + ].join("\n")); + return output_file; + } catch (e) { + console.log(colors.red(e)); + } +}; + +exports.generate_environment_file_entry_from_object = function (hostname, environ) { + var data = { + hostname: hostname, + environ_array: _.map(environ, function (value, key) { + return [key.toUpperCase(), JSON.stringify(value.toString())].join('='); + }), + }; + + return { + permissions: '0600', + owner: 'root', + content: _.template("<%= environ_array.join('\\n') %>\n")(data), + path: _.template("/etc/weave.<%= hostname %>.env")(data), + }; +}; + +exports.process_template = function (input_file, output_file, processor) { + var data = {}; + try { + data = yaml.safeLoad(fs.readFileSync(input_file, 'utf8')); + } catch (e) { + console.log(colors.red(e)); + } + return write_cloud_config_from_object(processor(_.clone(data)), output_file); +}; + +exports.write_files_from = function (local_dir, remote_dir) { + try { + return _.map(fs.readdirSync(local_dir), function (fn) { + return { + path: [remote_dir, fn].join('/'), + owner: 'root', + permissions: '0640', + encoding: 'base64', + content: fs.readFileSync([local_dir, fn].join('/')).toString('base64'), + }; + }); + } catch (e) { + console.log(colors.red(e)); + } +}; diff --git a/_includes/docs/docs/getting-started-guides/coreos/azure/lib/deployment_logic/kubernetes.js b/_includes/docs/docs/getting-started-guides/coreos/azure/lib/deployment_logic/kubernetes.js new file mode 100644 index 0000000000..2002b43a53 --- /dev/null +++ b/_includes/docs/docs/getting-started-guides/coreos/azure/lib/deployment_logic/kubernetes.js @@ -0,0 +1,77 @@ +var _ = require('underscore'); +_.mixin(require('underscore.string').exports()); + +var util = require('../util.js'); +var cloud_config = require('../cloud_config.js'); + + +etcd_initial_cluster_conf_self = function (conf) { + var port = '2380'; + + var data = { + nodes: _(conf.nodes.etcd).times(function (n) { + var host = util.hostname(n, 'etcd'); + return [host, [host, port].join(':')].join('=http://'); + }), + }; + + return { + 'name': 'etcd2.service', + 'drop-ins': [{ + 'name': '50-etcd-initial-cluster.conf', + 'content': _.template("[Service]\nEnvironment=ETCD_INITIAL_CLUSTER=<%= nodes.join(',') %>\n")(data), + }], + }; +}; + +etcd_initial_cluster_conf_kube = function (conf) { + var port = '4001'; + + var data = { + nodes: _(conf.nodes.etcd).times(function (n) { + var host = util.hostname(n, 'etcd'); + return 'http://' + [host, port].join(':'); + }), + }; + + return { + 'name': 'kube-apiserver.service', + 'drop-ins': [{ + 'name': '50-etcd-initial-cluster.conf', + 'content': _.template("[Service]\nEnvironment=ETCD_SERVERS=--etcd-servers=<%= nodes.join(',') %>\n")(data), + }], + }; +}; + +exports.create_etcd_cloud_config = function (node_count, conf) { + var input_file = './cloud_config_templates/kubernetes-cluster-etcd-node-template.yml'; + var output_file = util.join_output_file_path('kubernetes-cluster-etcd-nodes', 'generated.yml'); + + return cloud_config.process_template(input_file, output_file, function(data) { + data.coreos.units.push(etcd_initial_cluster_conf_self(conf)); + return data; + }); +}; + +exports.create_node_cloud_config = function (node_count, conf) { + var elected_node = 0; + + var input_file = './cloud_config_templates/kubernetes-cluster-main-nodes-template.yml'; + var output_file = util.join_output_file_path('kubernetes-cluster-main-nodes', 'generated.yml'); + + var make_node_config = function (n) { + return cloud_config.generate_environment_file_entry_from_object(util.hostname(n, 'kube'), { + weave_password: conf.weave_salt, + weave_peers: n === elected_node ? "" : util.hostname(elected_node, 'kube'), + breakout_route: util.ipv4([10, 2, 0, 0], 16), + bridge_address_cidr: util.ipv4([10, 2, n, 1], 24), + }); + }; + + var write_files_extra = cloud_config.write_files_from('addons', '/etc/kubernetes/addons'); + return cloud_config.process_template(input_file, output_file, function(data) { + data.write_files = data.write_files.concat(_(node_count).times(make_node_config), write_files_extra); + data.coreos.units.push(etcd_initial_cluster_conf_kube(conf)); + return data; + }); +}; diff --git a/_includes/docs/docs/getting-started-guides/coreos/azure/lib/util.js b/_includes/docs/docs/getting-started-guides/coreos/azure/lib/util.js new file mode 100644 index 0000000000..2c88b8cff3 --- /dev/null +++ b/_includes/docs/docs/getting-started-guides/coreos/azure/lib/util.js @@ -0,0 +1,33 @@ +var _ = require('underscore'); +_.mixin(require('underscore.string').exports()); + +exports.ipv4 = function (ocets, prefix) { + return { + ocets: ocets, + prefix: prefix, + toString: function () { + return [ocets.join('.'), prefix].join('/'); + } + } +}; + +exports.hostname = function hostname (n, prefix) { + return _.template("<%= pre %>-<%= seq %>")({ + pre: prefix || 'core', + seq: _.pad(n, 2, '0'), + }); +}; + +exports.rand_string = function () { + var crypto = require('crypto'); + var shasum = crypto.createHash('sha256'); + shasum.update(crypto.randomBytes(256)); + return shasum.digest('hex'); +}; + + +exports.rand_suffix = exports.rand_string().substring(50); + +exports.join_output_file_path = function(prefix, suffix) { + return './output/' + [prefix, exports.rand_suffix, suffix].join('_'); +}; diff --git a/_includes/docs/docs/getting-started-guides/coreos/azure/package.json b/_includes/docs/docs/getting-started-guides/coreos/azure/package.json new file mode 100644 index 0000000000..3269b418e0 --- /dev/null +++ b/_includes/docs/docs/getting-started-guides/coreos/azure/package.json @@ -0,0 +1,19 @@ +{ + "name": "coreos-azure-weave", + "version": "1.0.0", + "description": "Small utility to bring up a woven CoreOS cluster", + "main": "index.js", + "scripts": { + "test": "echo \"Error: no test specified\" && exit 1" + }, + "author": "Ilya Dmitrichenko ", + "license": "Apache 2.0", + "dependencies": { + "azure-cli": "^0.9.5", + "colors": "^1.0.3", + "js-yaml": "^3.2.5", + "openssl-wrapper": "^0.2.1", + "underscore": "^1.7.0", + "underscore.string": "^3.0.2" + } +} diff --git a/_includes/docs/docs/getting-started-guides/coreos/azure/scale-kubernetes-cluster.js b/_includes/docs/docs/getting-started-guides/coreos/azure/scale-kubernetes-cluster.js new file mode 100755 index 0000000000..f606898874 --- /dev/null +++ b/_includes/docs/docs/getting-started-guides/coreos/azure/scale-kubernetes-cluster.js @@ -0,0 +1,10 @@ +#!/usr/bin/env node + +var azure = require('./lib/azure_wrapper.js'); +var kube = require('./lib/deployment_logic/kubernetes.js'); + +azure.load_state_for_resizing(process.argv[2], 'kube', parseInt(process.argv[3] || 1)); + +azure.run_task_queue([ + azure.queue_machines('kube', 'stable', kube.create_node_cloud_config), +]); diff --git a/_includes/docs/docs/getting-started-guides/coreos/bare_metal_calico.md b/_includes/docs/docs/getting-started-guides/coreos/bare_metal_calico.md new file mode 100644 index 0000000000..0f42d9c44b --- /dev/null +++ b/_includes/docs/docs/getting-started-guides/coreos/bare_metal_calico.md @@ -0,0 +1,118 @@ + +This guide explains how to deploy a bare-metal Kubernetes cluster on CoreOS using [Calico networking](http://www.projectcalico.org). + +Specifically, this guide will have you do the following: + +- Deploy a Kubernetes master node on CoreOS using cloud-config +- Deploy two Kubernetes compute nodes with Calico Networking using cloud-config + +## Prerequisites + +1. At least three bare-metal machines (or VMs) to work with. This guide will configure them as follows + - 1 Kubernetes Master + - 2 Kubernetes Nodes +2. Your nodes should have IP connectivity. + +## Cloud-config + +This guide will use [cloud-config](https://coreos.com/docs/cluster-management/setup/cloudinit-cloud-config/) to configure each of the nodes in our Kubernetes cluster. + +For ease of distribution, the cloud-config files required for this demonstration can be found on [GitHub](https://github.com/projectcalico/calico-kubernetes-coreos-demo). + +This repo includes two cloud config files: + +- `master-config.yaml`: Cloud-config for the Kubernetes master +- `node-config.yaml`: Cloud-config for each Kubernetes compute host + +In the next few steps you will be asked to configure these files and host them on an HTTP server where your cluster can access them. + +## Building Kubernetes + +To get the Kubernetes source, clone the GitHub repo, and build the binaries. + +```shell +git clone https://github.com/kubernetes/kubernetes.git +cd kubernetes +./build/release.sh +``` + +Once the binaries are built, host the entire `/_output/dockerized/bin///` folder on an accessible HTTP server so they can be accessed by the cloud-config. You'll point your cloud-config files at this HTTP server later. + +## Download CoreOS + +Let's download the CoreOS bootable ISO. We'll use this image to boot and install CoreOS on each server. + +```shell +wget http://stable.release.core-os.net/amd64-usr/current/coreos_production_iso_image.iso +``` + +You can also download the ISO from the [CoreOS website](https://coreos.com/docs/running-coreos/platforms/iso/). + +## Configure the Kubernetes Master + +Once you've downloaded the image, use it to boot your Kubernetes Master server. Once booted, you should be automatically logged in as the `core` user. + +Let's get the master-config.yaml and fill in the necessary variables. Run the following commands on your HTTP server to get the cloud-config files. + +```shell +git clone https://github.com/Metaswitch/calico-kubernetes-demo.git +cd calico-kubernetes-demo/coreos +``` + +You'll need to replace the following variables in the `master-config.yaml` file to match your deployment. + +- ``: The public key you will use for SSH access to this server. +- ``: The address used to get the kubernetes binaries over HTTP. + +> **Note:** The config will prepend `"http://"` and append `"/(kubernetes | kubectl | ...)"` to your `KUBERNETES_LOC` variable:, format accordingly + +Host the modified `master-config.yaml` file and pull it on to your Kubernetes Master server. + +The CoreOS bootable ISO comes with a tool called `coreos-install` which will allow us to install CoreOS to disk and configure the install using cloud-config. The following command will download and install stable CoreOS, using the master-config.yaml file for configuration. + +```shell +sudo coreos-install -d /dev/sda -C stable -c master-config.yaml +``` + +Once complete, eject the bootable ISO and restart the server. When it comes back up, you should have SSH access as the `core` user using the public key provided in the master-config.yaml file. + +## Configure the compute hosts + +>The following steps will set up a Kubernetes node for use as a compute host. This demo uses two compute hosts, so you should run the following steps on each. + +First, boot up your node using the bootable ISO we downloaded earlier. You should be automatically logged in as the `core` user. + +Let's modify the `node-config.yaml` cloud-config file on your HTTP server. Make a copy for this node, and fill in the necessary variables. + +You'll need to replace the following variables in the `node-config.yaml` file to match your deployment. + +- ``: Hostname for this node (e.g. kube-node1, kube-node2) +- ``: The public key you will use for SSH access to this server. +- ``: The IPv4 address of the Kubernetes master. +- ``: The address to use in order to get the kubernetes binaries over HTTP. +- ``: The IP and subnet to use for pods on this node. By default, this should fall within the 192.168.0.0/16 subnet. + +> Note: The DOCKER_BRIDGE_IP is the range used by this Kubernetes node to assign IP addresses to pods on this node. This subnet must not overlap with the subnets assigned to the other Kubernetes nodes in your cluster. Calico expects each DOCKER_BRIDGE_IP subnet to fall within 192.168.0.0/16 by default (e.g. 192.168.1.1/24 for node 1), but if you'd like to use pod IPs within a different subnet, simply run `calicoctl pool add ` and select DOCKER_BRIDGE_IP accordingly. + +Host the modified `node-config.yaml` file and pull it on to your Kubernetes node. + +```shell +wget http:///node-config.yaml +``` + +Install and configure CoreOS on the node using the following command. + +```shell +sudo coreos-install -d /dev/sda -C stable -c node-config.yaml +``` + +Once complete, restart the server. When it comes back up, you should have SSH access as the `core` user using the public key provided in the `node-config.yaml` file. It will take some time for the node to be fully configured. Once fully configured, you can check that the node is running with the following command on the Kubernetes master. + +```shell +/home/core/kubectl get nodes +``` + +## Testing the Cluster + +You should now have a functional bare-metal Kubernetes cluster with one master and two compute hosts. +Try running the [guestbook demo](https://github.com/kubernetes/kubernetes/tree/{{page.githubbranch}}/examples/guestbook/) to test out your new cluster! \ No newline at end of file diff --git a/_includes/docs/docs/getting-started-guides/coreos/bare_metal_offline.md b/_includes/docs/docs/getting-started-guides/coreos/bare_metal_offline.md new file mode 100644 index 0000000000..d2371b2bbe --- /dev/null +++ b/_includes/docs/docs/getting-started-guides/coreos/bare_metal_offline.md @@ -0,0 +1,692 @@ + +Deploy a CoreOS running Kubernetes environment. This particular guild is made to help those in an OFFLINE system, +whether for testing a POC before the real deal, or you are restricted to be totally offline for your applications. + +* TOC +{:toc} + +## Prerequisites + +1. Installed *CentOS 6* for PXE server +2. At least two bare metal nodes to work with + +## High Level Design + +1. Manage the tftp directory + * /tftpboot/(coreos)(centos)(RHEL) + * /tftpboot/pxelinux.0/(MAC) -> linked to Linux image config file +2. Update per install the link for pxelinux +3. Update the DHCP config to reflect the host needing deployment +4. Setup nodes to deploy CoreOS creating a etcd cluster. +5. Have no access to the public [etcd discovery tool](https://discovery.etcd.io/). +6. Installing the CoreOS slaves to become Kubernetes nodes. + +## This Guides variables + +| Node Description | MAC | IP | +| :---------------------------- | :---------------: | :---------: | +| CoreOS/etcd/Kubernetes Master | d0:00:67:13:0d:00 | 10.20.30.40 | +| CoreOS Slave 1 | d0:00:67:13:0d:01 | 10.20.30.41 | +| CoreOS Slave 2 | d0:00:67:13:0d:02 | 10.20.30.42 | + +## Setup PXELINUX CentOS + +To setup CentOS PXELINUX environment there is a complete [guide here](http://docs.fedoraproject.org/en-US/Fedora/7/html/Installation_Guide/ap-pxe-server). This section is the abbreviated version. + +1. Install packages needed on CentOS + +```shell +sudo yum install tftp-server dhcp syslinux +``` + +2. `vi /etc/xinetd.d/tftp` to enable tftp service and change disable to 'no' + +```conf +disable = no +``` + +3. Copy over the syslinux images we will need. + +```shell +su - +mkdir -p /tftpboot +cd /tftpboot +cp /usr/share/syslinux/pxelinux.0 /tftpboot +cp /usr/share/syslinux/menu.c32 /tftpboot +cp /usr/share/syslinux/memdisk /tftpboot +cp /usr/share/syslinux/mboot.c32 /tftpboot +cp /usr/share/syslinux/chain.c32 /tftpboot + +/sbin/service dhcpd start +/sbin/service xinetd start +/sbin/chkconfig tftp on +``` + +4. Setup default boot menu + +```shell +mkdir /tftpboot/pxelinux.cfg +touch /tftpboot/pxelinux.cfg/default +``` + +5. Edit the menu `vi /tftpboot/pxelinux.cfg/default` + +```conf +default menu.c32 +prompt 0 +timeout 15 +ONTIMEOUT local +display boot.msg + +MENU TITLE Main Menu + +LABEL local + MENU LABEL Boot local hard drive + LOCALBOOT 0 +``` + +Now you should have a working PXELINUX setup to image CoreOS nodes. You can verify the services by using VirtualBox locally or with bare metal servers. + +## Adding CoreOS to PXE + +This section describes how to setup the CoreOS images to live alongside a pre-existing PXELINUX environment. + +1. Find or create the TFTP root directory that everything will be based off of. + * For this document we will assume `/tftpboot/` is our root directory. +2. Once we know and have our tftp root directory we will create a new directory structure for our CoreOS images. +3. Download the CoreOS PXE files provided by the CoreOS team. + +```shell +MY_TFTPROOT_DIR=/tftpboot +mkdir -p $MY_TFTPROOT_DIR/images/coreos/ +cd $MY_TFTPROOT_DIR/images/coreos/ +wget http://stable.release.core-os.net/amd64-usr/current/coreos_production_pxe.vmlinuz +wget http://stable.release.core-os.net/amd64-usr/current/coreos_production_pxe.vmlinuz.sig +wget http://stable.release.core-os.net/amd64-usr/current/coreos_production_pxe_image.cpio.gz +wget http://stable.release.core-os.net/amd64-usr/current/coreos_production_pxe_image.cpio.gz.sig +gpg --verify coreos_production_pxe.vmlinuz.sig +gpg --verify coreos_production_pxe_image.cpio.gz.sig +``` + +4. Edit the menu `vi /tftpboot/pxelinux.cfg/default` again + +```conf +default menu.c32 +prompt 0 +timeout 300 +ONTIMEOUT local +display boot.msg + +MENU TITLE Main Menu + +LABEL local + MENU LABEL Boot local hard drive + LOCALBOOT 0 + +MENU BEGIN CoreOS Menu + + LABEL coreos-master + MENU LABEL CoreOS Master + KERNEL images/coreos/coreos_production_pxe.vmlinuz + APPEND initrd=images/coreos/coreos_production_pxe_image.cpio.gz cloud-config-url=http:///pxe-cloud-config-single-master.yml + + LABEL coreos-slave + MENU LABEL CoreOS Slave + KERNEL images/coreos/coreos_production_pxe.vmlinuz + APPEND initrd=images/coreos/coreos_production_pxe_image.cpio.gz cloud-config-url=http:///pxe-cloud-config-slave.yml +MENU END +``` + +This configuration file will now boot from local drive but have the option to PXE image CoreOS. + +## DHCP configuration + +This section covers configuring the DHCP server to hand out our new images. In this case we are assuming that there are other servers that will boot alongside other images. + +1. Add the `filename` to the _host_ or _subnet_ sections. + +```conf +filename "/tftpboot/pxelinux.0"; +``` + +2. At this point we want to make pxelinux configuration files that will be the templates for the different CoreOS deployments. + +```conf +subnet 10.20.30.0 netmask 255.255.255.0 { + next-server 10.20.30.242; + option broadcast-address 10.20.30.255; + filename ""; + + ... + # http://www.syslinux.org/wiki/index.php/PXELINUX + host core_os_master { + hardware ethernet d0:00:67:13:0d:00; + option routers 10.20.30.1; + fixed-address 10.20.30.40; + option domain-name-servers 10.20.30.242; + filename "/pxelinux.0"; + } + host core_os_slave { + hardware ethernet d0:00:67:13:0d:01; + option routers 10.20.30.1; + fixed-address 10.20.30.41; + option domain-name-servers 10.20.30.242; + filename "/pxelinux.0"; + } + host core_os_slave2 { + hardware ethernet d0:00:67:13:0d:02; + option routers 10.20.30.1; + fixed-address 10.20.30.42; + option domain-name-servers 10.20.30.242; + filename "/pxelinux.0"; + } + ... +} +``` + +We will be specifying the node configuration later in the guide. + +## Kubernetes + +To deploy our configuration we need to create an `etcd` master. To do so we want to pxe CoreOS with a specific cloud-config.yml. There are two options we have here. +1. Is to template the cloud config file and programmatically create new static configs for different cluster setups. +2. Have a service discovery protocol running in our stack to do auto discovery. + +This demo we just make a static single `etcd` server to host our Kubernetes and `etcd` master servers. + +Since we are OFFLINE here most of the helping processes in CoreOS and Kubernetes are then limited. To do our setup we will then have to download and serve up our binaries for Kubernetes in our local environment. + +An easy solution is to host a small web server on the DHCP/TFTP host for all our binaries to make them available to the local CoreOS PXE machines. + +To get this up and running we are going to setup a simple `apache` server to serve our binaries needed to bootstrap Kubernetes. + +This is on the PXE server from the previous section: + +```shell +rm /etc/httpd/conf.d/welcome.conf +cd /var/www/html/ +wget -O kube-register https://github.com/kelseyhightower/kube-register/releases/download/v0.0.2/kube-register-0.0.2-linux-amd64 +wget -O setup-network-environment https://github.com/kelseyhightower/setup-network-environment/releases/download/v1.0.0/setup-network-environment +wget https://storage.googleapis.com/kubernetes-release/release/v0.15.0/bin/linux/amd64/kubernetes --no-check-certificate +wget https://storage.googleapis.com/kubernetes-release/release/v0.15.0/bin/linux/amd64/kube-apiserver --no-check-certificate +wget https://storage.googleapis.com/kubernetes-release/release/v0.15.0/bin/linux/amd64/kube-controller-manager --no-check-certificate +wget https://storage.googleapis.com/kubernetes-release/release/v0.15.0/bin/linux/amd64/kube-scheduler --no-check-certificate +wget https://storage.googleapis.com/kubernetes-release/release/v0.15.0/bin/linux/amd64/kubectl --no-check-certificate +wget https://storage.googleapis.com/kubernetes-release/release/v0.15.0/bin/linux/amd64/kubecfg --no-check-certificate +wget https://storage.googleapis.com/kubernetes-release/release/v0.15.0/bin/linux/amd64/kubelet --no-check-certificate +wget https://storage.googleapis.com/kubernetes-release/release/v0.15.0/bin/linux/amd64/kube-proxy --no-check-certificate +wget -O flanneld https://storage.googleapis.com/k8s/flanneld --no-check-certificate +``` + +This sets up our binaries we need to run Kubernetes. This would need to be enhanced to download from the Internet for updates in the future. + +Now for the good stuff! + +## Cloud Configs + +The following config files are tailored for the OFFLINE version of a Kubernetes deployment. + +These are based on the work found here: [master.yml](/{{page.version}}/docs/getting-started-guides/coreos/cloud-configs/master.yaml), [node.yml](/{{page.version}}/docs/getting-started-guides/coreos/cloud-configs/node.yaml) + +To make the setup work, you need to replace a few placeholders: + + - Replace `` with your PXE server ip address (e.g. 10.20.30.242) + - Replace `` with the Kubernetes master ip address (e.g. 10.20.30.40) + - If you run a private docker registry, replace `rdocker.example.com` with your docker registry dns name. + - If you use a proxy, replace `rproxy.example.com` with your proxy server (and port) + - Add your own SSH public key(s) to the cloud config at the end + +### master.yml + +On the PXE server make and fill in the variables `vi /var/www/html/coreos/pxe-cloud-config-master.yml`. + +```yaml +#cloud-config +--- +write_files: + - path: /opt/bin/waiter.sh + owner: root + content: | + #! /usr/bin/bash + until curl http://127.0.0.1:4001/v2/machines; do sleep 2; done + - path: /opt/bin/kubernetes-download.sh + owner: root + permissions: 0755 + content: | + #! /usr/bin/bash + /usr/bin/wget -N -P "/opt/bin" "http:///kubectl" + /usr/bin/wget -N -P "/opt/bin" "http:///kubernetes" + /usr/bin/wget -N -P "/opt/bin" "http:///kubecfg" + chmod +x /opt/bin/* + - path: /etc/profile.d/opt-path.sh + owner: root + permissions: 0755 + content: | + #! /usr/bin/bash + PATH=$PATH/opt/bin +coreos: + units: + - name: 10-eno1.network + runtime: true + content: | + [Match] + Name=eno1 + [Network] + DHCP=yes + - name: 20-nodhcp.network + runtime: true + content: | + [Match] + Name=en* + [Network] + DHCP=none + - name: get-kube-tools.service + runtime: true + command: start + content: | + [Service] + ExecStartPre=-/usr/bin/mkdir -p /opt/bin + ExecStart=/opt/bin/kubernetes-download.sh + RemainAfterExit=yes + Type=oneshot + - name: setup-network-environment.service + command: start + content: | + [Unit] + Description=Setup Network Environment + Documentation=https://github.com/kelseyhightower/setup-network-environment + Requires=network-online.target + After=network-online.target + [Service] + ExecStartPre=-/usr/bin/mkdir -p /opt/bin + ExecStartPre=/usr/bin/wget -N -P /opt/bin http:///setup-network-environment + ExecStartPre=/usr/bin/chmod +x /opt/bin/setup-network-environment + ExecStart=/opt/bin/setup-network-environment + RemainAfterExit=yes + Type=oneshot + - name: etcd.service + command: start + content: | + [Unit] + Description=etcd + Requires=setup-network-environment.service + After=setup-network-environment.service + [Service] + EnvironmentFile=/etc/network-environment + User=etcd + PermissionsStartOnly=true + ExecStart=/usr/bin/etcd \ + --name ${DEFAULT_IPV4} \ + --addr ${DEFAULT_IPV4}:4001 \ + --bind-addr 0.0.0.0 \ + --cluster-active-size 1 \ + --data-dir /var/lib/etcd \ + --http-read-timeout 86400 \ + --peer-addr ${DEFAULT_IPV4}:7001 \ + --snapshot true + Restart=always + RestartSec=10s + - name: fleet.socket + command: start + content: | + [Socket] + ListenStream=/var/run/fleet.sock + - name: fleet.service + command: start + content: | + [Unit] + Description=fleet daemon + Wants=etcd.service + After=etcd.service + Wants=fleet.socket + After=fleet.socket + [Service] + Environment="FLEET_ETCD_SERVERS=http://127.0.0.1:4001" + Environment="FLEET_METADATA=role=master" + ExecStart=/usr/bin/fleetd + Restart=always + RestartSec=10s + - name: etcd-waiter.service + command: start + content: | + [Unit] + Description=etcd waiter + Wants=network-online.target + Wants=etcd.service + After=etcd.service + After=network-online.target + Before=flannel.service + Before=setup-network-environment.service + [Service] + ExecStartPre=/usr/bin/chmod +x /opt/bin/waiter.sh + ExecStart=/usr/bin/bash /opt/bin/waiter.sh + RemainAfterExit=true + Type=oneshot + - name: flannel.service + command: start + content: | + [Unit] + Wants=etcd-waiter.service + After=etcd-waiter.service + Requires=etcd.service + After=etcd.service + After=network-online.target + Wants=network-online.target + Description=flannel is an etcd backed overlay network for containers + [Service] + Type=notify + ExecStartPre=-/usr/bin/mkdir -p /opt/bin + ExecStartPre=/usr/bin/wget -N -P /opt/bin http:///flanneld + ExecStartPre=/usr/bin/chmod +x /opt/bin/flanneld + ExecStartPre=-/usr/bin/etcdctl mk /coreos.com/network/config '{"Network":"10.100.0.0/16", "Backend": {"Type": "vxlan"}}' + ExecStart=/opt/bin/flanneld + - name: kube-apiserver.service + command: start + content: | + [Unit] + Description=Kubernetes API Server + Documentation=https://github.com/kubernetes/kubernetes + Requires=etcd.service + After=etcd.service + [Service] + ExecStartPre=-/usr/bin/mkdir -p /opt/bin + ExecStartPre=/usr/bin/wget -N -P /opt/bin http:///kube-apiserver + ExecStartPre=/usr/bin/chmod +x /opt/bin/kube-apiserver + ExecStart=/opt/bin/kube-apiserver \ + --address=0.0.0.0 \ + --port=8080 \ + --service-cluster-ip-range=10.100.0.0/16 \ + --etcd-servers=http://127.0.0.1:4001 \ + --logtostderr=true + Restart=always + RestartSec=10 + - name: kube-controller-manager.service + command: start + content: | + [Unit] + Description=Kubernetes Controller Manager + Documentation=https://github.com/kubernetes/kubernetes + Requires=kube-apiserver.service + After=kube-apiserver.service + [Service] + ExecStartPre=/usr/bin/wget -N -P /opt/bin http:///kube-controller-manager + ExecStartPre=/usr/bin/chmod +x /opt/bin/kube-controller-manager + ExecStart=/opt/bin/kube-controller-manager \ + --master=127.0.0.1:8080 \ + --logtostderr=true + Restart=always + RestartSec=10 + - name: kube-scheduler.service + command: start + content: | + [Unit] + Description=Kubernetes Scheduler + Documentation=https://github.com/kubernetes/kubernetes + Requires=kube-apiserver.service + After=kube-apiserver.service + [Service] + ExecStartPre=/usr/bin/wget -N -P /opt/bin http:///kube-scheduler + ExecStartPre=/usr/bin/chmod +x /opt/bin/kube-scheduler + ExecStart=/opt/bin/kube-scheduler --master=127.0.0.1:8080 + Restart=always + RestartSec=10 + - name: kube-register.service + command: start + content: | + [Unit] + Description=Kubernetes Registration Service + Documentation=https://github.com/kelseyhightower/kube-register + Requires=kube-apiserver.service + After=kube-apiserver.service + Requires=fleet.service + After=fleet.service + [Service] + ExecStartPre=/usr/bin/wget -N -P /opt/bin http:///kube-register + ExecStartPre=/usr/bin/chmod +x /opt/bin/kube-register + ExecStart=/opt/bin/kube-register \ + --metadata=role=node \ + --fleet-endpoint=unix:///var/run/fleet.sock \ + --healthz-port=10248 \ + --api-endpoint=http://127.0.0.1:8080 + Restart=always + RestartSec=10 + update: + group: stable + reboot-strategy: off +ssh_authorized_keys: + - ssh-rsa AAAAB3NzaC1yc2EAAAAD... +``` + +### node.yml + +On the PXE server make and fill in the variables `vi /var/www/html/coreos/pxe-cloud-config-slave.yml`. + +```yaml +#cloud-config +--- +write_files: + - path: /etc/default/docker + content: | + DOCKER_EXTRA_OPTS='--insecure-registry="rdocker.example.com:5000"' +coreos: + units: + - name: 10-eno1.network + runtime: true + content: | + [Match] + Name=eno1 + [Network] + DHCP=yes + - name: 20-nodhcp.network + runtime: true + content: | + [Match] + Name=en* + [Network] + DHCP=none + - name: etcd.service + mask: true + - name: docker.service + drop-ins: + - name: 50-insecure-registry.conf + content: | + [Service] + Environment="HTTP_PROXY=http://rproxy.example.com:3128/" "NO_PROXY=localhost,127.0.0.0/8,rdocker.example.com" + - name: fleet.service + command: start + content: | + [Unit] + Description=fleet daemon + Wants=fleet.socket + After=fleet.socket + [Service] + Environment="FLEET_ETCD_SERVERS=http://:4001" + Environment="FLEET_METADATA=role=node" + ExecStart=/usr/bin/fleetd + Restart=always + RestartSec=10s + - name: flannel.service + command: start + content: | + [Unit] + After=network-online.target + Wants=network-online.target + Description=flannel is an etcd backed overlay network for containers + [Service] + Type=notify + ExecStartPre=-/usr/bin/mkdir -p /opt/bin + ExecStartPre=/usr/bin/wget -N -P /opt/bin http:///flanneld + ExecStartPre=/usr/bin/chmod +x /opt/bin/flanneld + ExecStart=/opt/bin/flanneld -etcd-endpoints http://:4001 + - name: docker.service + command: start + content: | + [Unit] + After=flannel.service + Wants=flannel.service + Description=Docker Application Container Engine + Documentation=http://docs.docker.io + [Service] + EnvironmentFile=-/etc/default/docker + EnvironmentFile=/run/flannel/subnet.env + ExecStartPre=/bin/mount --make-rprivate / + ExecStart=/usr/bin/docker -d --bip=${FLANNEL_SUBNET} --mtu=${FLANNEL_MTU} -s=overlay -H fd:// ${DOCKER_EXTRA_OPTS} + [Install] + WantedBy=multi-user.target + - name: setup-network-environment.service + command: start + content: | + [Unit] + Description=Setup Network Environment + Documentation=https://github.com/kelseyhightower/setup-network-environment + Requires=network-online.target + After=network-online.target + [Service] + ExecStartPre=-/usr/bin/mkdir -p /opt/bin + ExecStartPre=/usr/bin/wget -N -P /opt/bin http:///setup-network-environment + ExecStartPre=/usr/bin/chmod +x /opt/bin/setup-network-environment + ExecStart=/opt/bin/setup-network-environment + RemainAfterExit=yes + Type=oneshot + - name: kube-proxy.service + command: start + content: | + [Unit] + Description=Kubernetes Proxy + Documentation=https://github.com/kubernetes/kubernetes + Requires=setup-network-environment.service + After=setup-network-environment.service + [Service] + ExecStartPre=/usr/bin/wget -N -P /opt/bin http:///kube-proxy + ExecStartPre=/usr/bin/chmod +x /opt/bin/kube-proxy + ExecStart=/opt/bin/kube-proxy \ + --etcd-servers=http://:4001 \ + --logtostderr=true + Restart=always + RestartSec=10 + - name: kube-kubelet.service + command: start + content: | + [Unit] + Description=Kubernetes Kubelet + Documentation=https://github.com/kubernetes/kubernetes + Requires=setup-network-environment.service + After=setup-network-environment.service + [Service] + EnvironmentFile=/etc/network-environment + ExecStartPre=/usr/bin/wget -N -P /opt/bin http:///kubelet + ExecStartPre=/usr/bin/chmod +x /opt/bin/kubelet + ExecStart=/opt/bin/kubelet \ + --address=0.0.0.0 \ + --port=10250 \ + --hostname-override=${DEFAULT_IPV4} \ + --api-servers=:8080 \ + --healthz-bind-address=0.0.0.0 \ + --healthz-port=10248 \ + --logtostderr=true + Restart=always + RestartSec=10 + update: + group: stable + reboot-strategy: off +ssh_authorized_keys: + - ssh-rsa AAAAB3NzaC1yc2EAAAAD... +``` + +## New pxelinux.cfg file + +Create a pxelinux target file for a _slave_ node: `vi /tftpboot/pxelinux.cfg/coreos-node-slave` + +```conf +default coreos +prompt 1 +timeout 15 + +display boot.msg + +label coreos + menu default + kernel images/coreos/coreos_production_pxe.vmlinuz + append initrd=images/coreos/coreos_production_pxe_image.cpio.gz cloud-config-url=http:///coreos/pxe-cloud-config-slave.yml console=tty0 console=ttyS0 coreos.autologin=tty1 coreos.autologin=ttyS0 +``` + +And one for the _master_ node: `vi /tftpboot/pxelinux.cfg/coreos-node-master` + +```conf +default coreos +prompt 1 +timeout 15 + +display boot.msg + +label coreos + menu default + kernel images/coreos/coreos_production_pxe.vmlinuz + append initrd=images/coreos/coreos_production_pxe_image.cpio.gz cloud-config-url=http:///coreos/pxe-cloud-config-master.yml console=tty0 console=ttyS0 coreos.autologin=tty1 coreos.autologin=ttyS0 +``` + +## Specify the pxelinux targets + +Now that we have our new targets setup for master and slave we want to configure the specific hosts to those targets. We will do this by using the pxelinux mechanism of setting a specific MAC addresses to a specific pxelinux.cfg file. + +Refer to the MAC address table in the beginning of this guide. Documentation for more details can be found [here](http://www.syslinux.org/wiki/index.php/PXELINUX). + +```shell +cd /tftpboot/pxelinux.cfg +ln -s coreos-node-master 01-d0-00-67-13-0d-00 +ln -s coreos-node-slave 01-d0-00-67-13-0d-01 +ln -s coreos-node-slave 01-d0-00-67-13-0d-02 +``` + +Reboot these servers to get the images PXEd and ready for running containers! + +## Creating test pod + +Now that the CoreOS with Kubernetes installed is up and running lets spin up some Kubernetes pods to demonstrate the system. + +See [a simple nginx example](/{{page.version}}/docs/user-guide/simple-nginx) to try out your new cluster. + +For more complete applications, please look in the [examples directory](https://github.com/kubernetes/kubernetes/tree/{{page.githubbranch}}/examples/). + +## Helping commands for debugging + +List all keys in etcd: + +```shell +etcdctl ls --recursive +``` + +List fleet machines + +```shell +fleetctl list-machines +``` + +Check system status of services on master: + +```shell +systemctl status kube-apiserver +systemctl status kube-controller-manager +systemctl status kube-scheduler +systemctl status kube-register +``` + +Check system status of services on a node: + +```shell +systemctl status kube-kubelet +systemctl status docker.service +``` + +List Kubernetes + +```shell +kubectl get pods +kubectl get nodes +``` + +Kill all pods: + +```shell +for i in `kubectl get pods | awk '{print $1}'`; do kubectl stop pod $i; done +``` \ No newline at end of file diff --git a/_includes/docs/docs/getting-started-guides/coreos/cloud-configs/master.yaml b/_includes/docs/docs/getting-started-guides/coreos/cloud-configs/master.yaml new file mode 100644 index 0000000000..c3b703d1d7 --- /dev/null +++ b/_includes/docs/docs/getting-started-guides/coreos/cloud-configs/master.yaml @@ -0,0 +1,140 @@ +#cloud-config + +--- +write-files: + - path: /etc/conf.d/nfs + permissions: '0644' + content: | + OPTS_RPC_MOUNTD="" + - path: /opt/bin/wupiao + permissions: '0755' + content: | + #!/bin/bash + # [w]ait [u]ntil [p]ort [i]s [a]ctually [o]pen + [ -n "$1" ] && \ + until curl -o /dev/null -sIf http://${1}; do \ + sleep 1 && echo .; + done; + exit $? + +hostname: master +coreos: + etcd2: + name: master + listen-client-urls: http://0.0.0.0:2379,http://0.0.0.0:4001 + advertise-client-urls: http://$private_ipv4:2379,http://$private_ipv4:4001 + initial-cluster-token: k8s_etcd + listen-peer-urls: http://$private_ipv4:2380,http://$private_ipv4:7001 + initial-advertise-peer-urls: http://$private_ipv4:2380 + initial-cluster: master=http://$private_ipv4:2380 + initial-cluster-state: new + fleet: + metadata: "role=master" + units: + - name: generate-serviceaccount-key.service + command: start + content: | + [Unit] + Description=Generate service-account key file + + [Service] + ExecStartPre=-/usr/bin/mkdir -p /opt/bin + ExecStart=/bin/openssl genrsa -out /opt/bin/kube-serviceaccount.key 2048 2>/dev/null + RemainAfterExit=yes + Type=oneshot + - name: setup-network-environment.service + command: start + content: | + [Unit] + Description=Setup Network Environment + Documentation=https://github.com/kelseyhightower/setup-network-environment + Requires=network-online.target + After=network-online.target + + [Service] + ExecStartPre=-/usr/bin/mkdir -p /opt/bin + ExecStartPre=/usr/bin/curl -L -o /opt/bin/setup-network-environment -z /opt/bin/setup-network-environment https://github.com/kelseyhightower/setup-network-environment/releases/download/v1.0.0/setup-network-environment + ExecStartPre=/usr/bin/chmod +x /opt/bin/setup-network-environment + ExecStart=/opt/bin/setup-network-environment + RemainAfterExit=yes + Type=oneshot + - name: fleet.service + command: start + - name: flanneld.service + command: start + drop-ins: + - name: 50-network-config.conf + content: | + [Unit] + Requires=etcd2.service + [Service] + ExecStartPre=/usr/bin/etcdctl set /coreos.com/network/config '{"Network":"10.244.0.0/16", "Backend": {"Type": "vxlan"}}' + - name: docker.service + command: start + - name: kube-apiserver.service + command: start + content: | + [Unit] + Description=Kubernetes API Server + Documentation=https://github.com/GoogleCloudPlatform/kubernetes + Requires=setup-network-environment.service etcd2.service generate-serviceaccount-key.service + After=setup-network-environment.service etcd2.service generate-serviceaccount-key.service + + [Service] + EnvironmentFile=/etc/network-environment + ExecStartPre=-/usr/bin/mkdir -p /opt/bin + ExecStartPre=/usr/bin/curl -L -o /opt/bin/kube-apiserver -z /opt/bin/kube-apiserver https://storage.googleapis.com/kubernetes-release/release/v1.0.3/bin/linux/amd64/kube-apiserver + ExecStartPre=/usr/bin/chmod +x /opt/bin/kube-apiserver + ExecStartPre=/opt/bin/wupiao 127.0.0.1:2379/v2/machines + ExecStart=/opt/bin/kube-apiserver \ + --service-account-key-file=/opt/bin/kube-serviceaccount.key \ + --service-account-lookup=false \ + --admission-control=NamespaceLifecycle,NamespaceAutoProvision,LimitRanger,SecurityContextDeny,ServiceAccount,ResourceQuota \ + --runtime-config=api/v1 \ + --allow-privileged=true \ + --insecure-bind-address=0.0.0.0 \ + --insecure-port=8080 \ + --kubelet-https=true \ + --secure-port=6443 \ + --service-cluster-ip-range=10.100.0.0/16 \ + --etcd-servers=http://127.0.0.1:2379 \ + --public-address-override=${DEFAULT_IPV4} \ + --logtostderr=true + Restart=always + RestartSec=10 + - name: kube-controller-manager.service + command: start + content: | + [Unit] + Description=Kubernetes Controller Manager + Documentation=https://github.com/GoogleCloudPlatform/kubernetes + Requires=kube-apiserver.service + After=kube-apiserver.service + + [Service] + ExecStartPre=/usr/bin/curl -L -o /opt/bin/kube-controller-manager -z /opt/bin/kube-controller-manager https://storage.googleapis.com/kubernetes-release/release/v1.0.3/bin/linux/amd64/kube-controller-manager + ExecStartPre=/usr/bin/chmod +x /opt/bin/kube-controller-manager + ExecStart=/opt/bin/kube-controller-manager \ + --service-account-private-key-file=/opt/bin/kube-serviceaccount.key \ + --master=127.0.0.1:8080 \ + --logtostderr=true + Restart=always + RestartSec=10 + - name: kube-scheduler.service + command: start + content: | + [Unit] + Description=Kubernetes Scheduler + Documentation=https://github.com/GoogleCloudPlatform/kubernetes + Requires=kube-apiserver.service + After=kube-apiserver.service + + [Service] + ExecStartPre=/usr/bin/curl -L -o /opt/bin/kube-scheduler -z /opt/bin/kube-scheduler https://storage.googleapis.com/kubernetes-release/release/v1.0.3/bin/linux/amd64/kube-scheduler + ExecStartPre=/usr/bin/chmod +x /opt/bin/kube-scheduler + ExecStart=/opt/bin/kube-scheduler --master=127.0.0.1:8080 + Restart=always + RestartSec=10 + update: + group: alpha + reboot-strategy: off diff --git a/_includes/docs/docs/getting-started-guides/coreos/cloud-configs/node.yaml b/_includes/docs/docs/getting-started-guides/coreos/cloud-configs/node.yaml new file mode 100644 index 0000000000..b9a4cafcd9 --- /dev/null +++ b/_includes/docs/docs/getting-started-guides/coreos/cloud-configs/node.yaml @@ -0,0 +1,98 @@ +#cloud-config +write-files: + - path: /opt/bin/wupiao + permissions: '0755' + content: | + #!/bin/bash + # [w]ait [u]ntil [p]ort [i]s [a]ctually [o]pen + [ -n "$1" ] && [ -n "$2" ] && while ! curl --output /dev/null \ + --silent --head --fail \ + http://${1}:${2}; do sleep 1 && echo -n .; done; + exit $? +coreos: + etcd2: + listen-client-urls: http://0.0.0.0:2379,http://0.0.0.0:4001 + advertise-client-urls: http://0.0.0.0:2379,http://0.0.0.0:4001 + initial-cluster: master=http://:2380 + proxy: on + fleet: + metadata: "role=node" + units: + - name: fleet.service + command: start + - name: flanneld.service + command: start + drop-ins: + - name: 50-network-config.conf + content: | + [Unit] + Requires=etcd2.service + [Service] + ExecStartPre=/usr/bin/etcdctl set /coreos.com/network/config '{"Network":"10.244.0.0/16", "Backend": {"Type": "vxlan"}}' + - name: docker.service + command: start + - name: setup-network-environment.service + command: start + content: | + [Unit] + Description=Setup Network Environment + Documentation=https://github.com/kelseyhightower/setup-network-environment + Requires=network-online.target + After=network-online.target + + [Service] + ExecStartPre=-/usr/bin/mkdir -p /opt/bin + ExecStartPre=/usr/bin/curl -L -o /opt/bin/setup-network-environment -z /opt/bin/setup-network-environment https://github.com/kelseyhightower/setup-network-environment/releases/download/v1.0.0/setup-network-environment + ExecStartPre=/usr/bin/chmod +x /opt/bin/setup-network-environment + ExecStart=/opt/bin/setup-network-environment + RemainAfterExit=yes + Type=oneshot + - name: kube-proxy.service + command: start + content: | + [Unit] + Description=Kubernetes Proxy + Documentation=https://github.com/GoogleCloudPlatform/kubernetes + Requires=setup-network-environment.service + After=setup-network-environment.service + + [Service] + ExecStartPre=/usr/bin/curl -L -o /opt/bin/kube-proxy -z /opt/bin/kube-proxy https://storage.googleapis.com/kubernetes-release/release/v1.0.3/bin/linux/amd64/kube-proxy + ExecStartPre=/usr/bin/chmod +x /opt/bin/kube-proxy + # wait for kubernetes master to be up and ready + ExecStartPre=/opt/bin/wupiao 8080 + ExecStart=/opt/bin/kube-proxy \ + --master=:8080 \ + --logtostderr=true + Restart=always + RestartSec=10 + - name: kube-kubelet.service + command: start + content: | + [Unit] + Description=Kubernetes Kubelet + Documentation=https://github.com/GoogleCloudPlatform/kubernetes + Requires=setup-network-environment.service + After=setup-network-environment.service + + [Service] + EnvironmentFile=/etc/network-environment + ExecStartPre=/usr/bin/curl -L -o /opt/bin/kubelet -z /opt/bin/kubelet https://storage.googleapis.com/kubernetes-release/release/v1.0.3/bin/linux/amd64/kubelet + ExecStartPre=/usr/bin/chmod +x /opt/bin/kubelet + # wait for kubernetes master to be up and ready + ExecStartPre=/opt/bin/wupiao 8080 + ExecStart=/opt/bin/kubelet \ + --address=0.0.0.0 \ + --port=10250 \ + --hostname-override=${DEFAULT_IPV4} \ + --api-servers=:8080 \ + --allow-privileged=true \ + --logtostderr=true \ + --cadvisor-port=4194 \ + --healthz-bind-address=0.0.0.0 \ + --healthz-port=10248 + Restart=always + RestartSec=10 + update: + group: alpha + reboot-strategy: off diff --git a/_includes/docs/docs/getting-started-guides/coreos/coreos_multinode_cluster.md b/_includes/docs/docs/getting-started-guides/coreos/coreos_multinode_cluster.md new file mode 100644 index 0000000000..911980b8c7 --- /dev/null +++ b/_includes/docs/docs/getting-started-guides/coreos/coreos_multinode_cluster.md @@ -0,0 +1,197 @@ + +Use the [master.yaml](/{{page.version}}/docs/getting-started-guides/coreos/cloud-configs/master.yaml) and [node.yaml](/{{page.version}}/docs/getting-started-guides/coreos/cloud-configs/node.yaml) cloud-configs to provision a multi-node Kubernetes cluster. + +> **Attention**: This requires at least CoreOS version **[695.0.0][coreos695]**, which includes `etcd2`. + +[coreos695]: https://coreos.com/releases/#695.0.0 + +* TOC +{:toc} + +### AWS + +*Attention:* Replace `` below for a [suitable version of CoreOS image for AWS](https://coreos.com/docs/running-coreos/cloud-providers/ec2/). + +#### Provision the Master + +```shell +aws ec2 create-security-group --group-name kubernetes --description "Kubernetes Security Group" +aws ec2 authorize-security-group-ingress --group-name kubernetes --protocol tcp --port 22 --cidr 0.0.0.0/0 +aws ec2 authorize-security-group-ingress --group-name kubernetes --protocol tcp --port 80 --cidr 0.0.0.0/0 +aws ec2 authorize-security-group-ingress --group-name kubernetes --source-security-group-name kubernetes +``` + +```shell +aws ec2 run-instances \ +--image-id \ +--key-name \ +--region us-west-2 \ +--security-groups kubernetes \ +--instance-type m3.medium \ +--user-data file://master.yaml +``` + +#### Capture the private IP address + +```shell +aws ec2 describe-instances --instance-id +``` + +#### Edit node.yaml + +Edit `node.yaml` and replace all instances of `` with the private IP address of the master node. + +#### Provision worker nodes + +```shell +aws ec2 run-instances \ +--count 1 \ +--image-id \ +--key-name \ +--region us-west-2 \ +--security-groups kubernetes \ +--instance-type m3.medium \ +--user-data file://node.yaml +``` + +### Google Compute Engine (GCE) + +*Attention:* Replace `` below for a [suitable version of CoreOS image for Google Compute Engine](https://coreos.com/docs/running-coreos/cloud-providers/google-compute-engine/). + +#### Provision the Master + +```shell +gcloud compute instances create master \ +--image-project coreos-cloud \ +--image \ +--boot-disk-size 200GB \ +--machine-type n1-standard-1 \ +--zone us-central1-a \ +--metadata-from-file user-data=master.yaml +``` + +#### Capture the private IP address + +```shell +gcloud compute instances list +``` + +#### Edit node.yaml + +Edit `node.yaml` and replace all instances of `` with the private IP address of the master node. + +#### Provision worker nodes + +```shell +gcloud compute instances create node1 \ +--image-project coreos-cloud \ +--image \ +--boot-disk-size 200GB \ +--machine-type n1-standard-1 \ +--zone us-central1-a \ +--metadata-from-file user-data=node.yaml +``` + +#### Establish network connectivity + +Next, setup an ssh tunnel to the master so you can run kubectl from your local host. +In one terminal, run `gcloud compute ssh master --ssh-flag="-L 8080:127.0.0.1:8080"` and in a second +run `gcloud compute ssh master --ssh-flag="-R 8080:127.0.0.1:8080"`. + +### OpenStack + +These instructions are for running on the command line. Most of this you can also do through the Horizon dashboard. +These instructions were tested on the Ice House release on a Metacloud distribution of OpenStack but should be similar if not the same across other versions/distributions of OpenStack. + +#### Make sure you can connect with OpenStack + +Make sure the environment variables are set for OpenStack such as: + +```shell +OS_TENANT_ID +OS_PASSWORD +OS_AUTH_URL +OS_USERNAME +OS_TENANT_NAME +``` + +Test this works with something like: + +```shell +nova list +``` + +#### Get a Suitable CoreOS Image + +You'll need a [suitable version of CoreOS image for OpenStack](https://coreos.com/os/docs/latest/booting-on-openstack) +Once you download that, upload it to glance. An example is shown below: + +```shell +glance image-create --name CoreOS723 \ +--container-format bare --disk-format qcow2 \ +--file coreos_production_openstack_image.img \ +--is-public True +``` + +#### Create security group + +```shell +nova secgroup-create kubernetes "Kubernetes Security Group" +nova secgroup-add-rule kubernetes tcp 22 22 0.0.0.0/0 +nova secgroup-add-rule kubernetes tcp 80 80 0.0.0.0/0 +``` + +#### Provision the Master + +```shell +nova boot \ +--image \ +--key-name \ +--flavor \ +--security-group kubernetes \ +--user-data files/master.yaml \ +kube-master +``` + +`` is the CoreOS image name. In our example we can use the image we created in the previous step and put in 'CoreOS723' + +`` is the keypair name that you already generated to access the instance. + +`` is the flavor ID you use to size the instance. Run `nova flavor-list` +to get the IDs. 3 on the system this was tested with gives the m1.large size. + +The important part is to ensure you have the files/master.yml as this is what will do all the post boot configuration. This path is relevant so we are assuming in this example that you are running the nova command in a directory where there is a subdirectory called files that has the master.yml file in it. Absolute paths also work. + +Next, assign it a public IP address: + +```shell +nova floating-ip-list +``` + +Get an IP address that's free and run: + +```shell +nova floating-ip-associate kube-master +``` + +...where `` is the IP address that was available from the `nova floating-ip-list` +command. + +#### Provision Worker Nodes + +Edit `node.yaml` +and replace all instances of `````` +with the private IP address of the master node. You can get this by running ```nova show kube-master``` +assuming you named your instance kube master. This is not the floating IP address you just assigned it. + +```shell +nova boot \ +--image \ +--key-name \ +--flavor \ +--security-group kubernetes \ +--user-data files/node.yaml \ +minion01 +``` + +This is basically the same as the master nodes but with the node.yaml post-boot script instead of the master. \ No newline at end of file diff --git a/_includes/docs/docs/getting-started-guides/dcos.md b/_includes/docs/docs/getting-started-guides/dcos.md new file mode 100644 index 0000000000..4ad5d9bcfc --- /dev/null +++ b/_includes/docs/docs/getting-started-guides/dcos.md @@ -0,0 +1,128 @@ + +This guide will walk you through installing [Kubernetes-Mesos](https://github.com/mesosphere/kubernetes-mesos) on [Datacenter Operating System (DCOS)](https://mesosphere.com/product/) with the [DCOS CLI](https://github.com/mesosphere/dcos-cli) and operating Kubernetes with the [DCOS Kubectl plugin](https://github.com/mesosphere/dcos-kubectl). + +* TOC +{:toc} + + +## About Kubernetes on DCOS + +DCOS is system software that manages computer cluster hardware and software resources and provides common services for distributed applications. Among other services, it provides [Apache Mesos](http://mesos.apache.org/) as its cluster kernel and [Marathon](https://mesosphere.github.io/marathon/) as its init system. With DCOS CLI, Mesos frameworks like [Kubernetes-Mesos](https://github.com/mesosphere/kubernetes-mesos) can be installed with a single command. + +Another feature of the DCOS CLI is that it allows plugins like the [DCOS Kubectl plugin](https://github.com/mesosphere/dcos-kubectl). This allows for easy access to a version-compatible Kubectl without having to manually download or install. + +Further information about the benefits of installing Kubernetes on DCOS can be found in the [Kubernetes-Mesos documentation](https://releases.k8s.io/{{page.githubbranch}}/contrib/mesos/README.md). + +For more details about the Kubernetes DCOS packaging, see the [Kubernetes-Mesos project](https://github.com/mesosphere/kubernetes-mesos). + +Since Kubernetes-Mesos is still alpha, it is a good idea to familiarize yourself with the [current known issues](https://releases.k8s.io/{{page.githubbranch}}/contrib/mesos/docs/issues.md) which may limit or modify the behavior of Kubernetes on DCOS. + +If you have problems completing the steps below, please [file an issue against the kubernetes-mesos project](https://github.com/mesosphere/kubernetes-mesos/issues). + + +## Resources + +Explore the following resources for more information about Kubernetes, Kubernetes on Mesos/DCOS, and DCOS itself. + +- [DCOS Documentation](https://docs.mesosphere.com/) +- [Managing DCOS Services](https://docs.mesosphere.com/services/kubernetes/) +- [Kubernetes Examples](https://github.com/kubernetes/kubernetes/tree/{{page.githubbranch}}/examples/) +- [Kubernetes on Mesos Documentation](https://releases.k8s.io/{{page.githubbranch}}/contrib/mesos/README.md) +- [Kubernetes on Mesos Release Notes](https://github.com/mesosphere/kubernetes-mesos/releases) +- [Kubernetes on DCOS Package Source](https://github.com/mesosphere/kubernetes-mesos) + + +## Prerequisites + +- A running [DCOS cluster](https://mesosphere.com/product/) + - [DCOS Community Edition](https://docs.mesosphere.com/install/) is currently available on [AWS](https://mesosphere.com/amazon/). + - [DCOS Enterprise Edition](https://mesosphere.com/product/) can be deployed on virtual or bare metal machines. Contact sales@mesosphere.com for more info and to set up an engagement. +- [DCOS CLI](https://docs.mesosphere.com/install/cli/) installed locally + + +## Install + +1. Configure and validate the [Mesosphere Multiverse](https://github.com/mesosphere/multiverse) as a package source repository + + ```shell +$ dcos config prepend package.sources https://github.com/mesosphere/multiverse/archive/version-1.x.zip + $ dcos package update --validate + ``` +2. Install etcd + + By default, the Kubernetes DCOS package starts a single-node etcd. In order to avoid state loss in the event of Kubernetes component container failure, install an HA [etcd-mesos](https://github.com/mesosphere/etcd-mesos) cluster on DCOS. + + ```shell +$ dcos package install etcd + ``` +3. Verify that etcd is installed and healthy + + The etcd cluster takes a short while to deploy. Verify that `/etcd` is healthy before going on to the next step. + + ```shell +$ dcos marathon app list + ID MEM CPUS TASKS HEALTH DEPLOYMENT CONTAINER CMD + /etcd 128 0.2 1/1 1/1 --- DOCKER None + ``` +4. Create Kubernetes installation configuration + + Configure Kubernetes to use the HA etcd installed on DCOS. + + ```shell +$ cat >/tmp/options.json < +``` + +Otherwise, we'll use latest `hyperkube` image as default k8s version. + +## Master Node + +The first step in the process is to initialize the master node. + +Clone the Kubernetes repo, and run [master.sh](/{{page.version}}/docs/getting-started-guides/docker-multinode/master.sh) on the master machine with root: + +```shell +cd kubernetes/docs/getting-started-guides/docker-multinode/ +./master.sh +... +`Master done!` +``` + +See [here](/{{page.version}}/docs/getting-started-guides/docker-multinode/master) for detailed instructions explanation. + +## Adding a worker node + +Once your master is up and running you can add one or more workers on different machines. + +Clone the Kubernetes repo, and run [worker.sh](/{{page.version}}/docs/getting-started-guides/docker-multinode/worker.sh) on the worker machine with root: + +```shell +export MASTER_IP= +cd kubernetes/docs/getting-started-guides/docker-multinode/ +./worker.sh +... +`Worker done!` +```` + +See [here](/{{page.version}}/docs/getting-started-guides/docker-multinode/worker) for detailed instructions explanation. + +## Deploy a DNS + +See [here](/{{page.version}}/docs/getting-started-guides/docker-multinode/deployDNS) for instructions. + +## Testing your cluster + +Once your cluster has been created you can [test it out](/{{page.version}}/docs/getting-started-guides/docker-multinode/testing) + +For more complete applications, please look in the [examples directory](https://github.com/kubernetes/kubernetes/tree/{{page.githubbranch}}/examples/) diff --git a/_includes/docs/docs/getting-started-guides/docker-multinode/deployDNS.md b/_includes/docs/docs/getting-started-guides/docker-multinode/deployDNS.md new file mode 100644 index 0000000000..611f9e0198 --- /dev/null +++ b/_includes/docs/docs/getting-started-guides/docker-multinode/deployDNS.md @@ -0,0 +1,49 @@ + +### Get the template file + +First of all, download the template dns rc and svc file from + +[skydns-rc template](/{{page.version}}/docs/getting-started-guides/docker-multinode/skydns-rc.yaml.in) + +[skydns-svc template](/{{page.version}}/docs/getting-started-guides/docker-multinode/skydns-svc.yaml.in) + +### Set env + +Then you need to set `DNS_REPLICAS` , `DNS_DOMAIN` , `DNS_SERVER_IP` , `KUBE_SERVER` ENV. + +```shell +$ export DNS_REPLICAS=1 + +$ export DNS_DOMAIN=cluster.local # specify in startup parameter `--cluster-domain` for containerized kubelet + +$ export DNS_SERVER_IP=10.0.0.10 # specify in startup parameter `--cluster-dns` for containerized kubelet + +$ export KUBE_SERVER=10.10.103.250 # your master server ip, you may change it +``` + +### Replace the corresponding value in the template. + +```shell +$ sed -e "s/{{ pillar\['dns_replicas'\] }}/${DNS_REPLICAS}/g;s/{{ pillar\['dns_domain'\] }}/${DNS_DOMAIN}/g;s/{kube_server_url}/${KUBE_SERVER}/g;" skydns-rc.yaml.in > ./skydns-rc.yaml + +$ sed -e "s/{{ pillar\['dns_server'\] }}/${DNS_SERVER_IP}/g" skydns-svc.yaml.in > ./skydns-svc.yaml +``` + +### Use `kubectl` to create skydns rc and service + + +```shell +$ kubectl -s "$KUBE_SERVER:8080" --namespace=kube-system create -f ./skydns-rc.yaml + +$ kubectl -s "$KUBE_SERVER:8080" --namespace=kube-system create -f ./skydns-svc.yaml +``` + +### Test if DNS works + +Follow [this link](https://releases.k8s.io/{{page.githubbranch}}/cluster/addons/dns#how-do-i-test-if-it-is-working) to check it out. + + + + + + diff --git a/_includes/docs/docs/getting-started-guides/docker-multinode/master.md b/_includes/docs/docs/getting-started-guides/docker-multinode/master.md new file mode 100644 index 0000000000..9e1d3d2b53 --- /dev/null +++ b/_includes/docs/docs/getting-started-guides/docker-multinode/master.md @@ -0,0 +1,177 @@ + +We'll begin by setting up the master node. For the purposes of illustration, we'll assume that the IP of this machine is `${MASTER_IP}` + +There are two main phases to installing the master: + * [Setting up `flanneld` and `etcd`](#setting-up-flanneld-and-etcd) + * [Starting the Kubernetes master components](#starting-the-kubernetes-master) + + +## Setting up flanneld and etcd + +_Note_: +There is a [bug](https://github.com/docker/docker/issues/14106) in Docker 1.7.0 that prevents this from working correctly. +Please install Docker 1.6.2 or Docker 1.7.1. + +### Setup Docker-Bootstrap + +We're going to use `flannel` to set up networking between Docker daemons. Flannel itself (and etcd on which it relies) will run inside of +Docker containers themselves. To achieve this, we need a separate "bootstrap" instance of the Docker daemon. This daemon will be started with +`--iptables=false` so that it can only run containers with `--net=host`. That's sufficient to bootstrap our system. + +Run: + +```shell +sudo sh -c 'docker -d -H unix:///var/run/docker-bootstrap.sock -p /var/run/docker-bootstrap.pid --iptables=false --ip-masq=false --bridge=none --graph=/var/lib/docker-bootstrap 2> /var/log/docker-bootstrap.log 1> /dev/null &' +``` + +_Important Note_: +If you are running this on a long running system, rather than experimenting, you should run the bootstrap Docker instance under something like SysV init, upstart or systemd so that it is restarted +across reboots and failures. + + +### Startup etcd for flannel and the API server to use + +Run: + +```shell +sudo docker -H unix:///var/run/docker-bootstrap.sock run --net=host -d gcr.io/google_containers/etcd:2.0.12 /usr/local/bin/etcd --addr=127.0.0.1:4001 --bind-addr=0.0.0.0:4001 --data-dir=/var/etcd/data +``` + +Next, you need to set a CIDR range for flannel. This CIDR should be chosen to be non-overlapping with any existing network you are using: + +```shell +sudo docker -H unix:///var/run/docker-bootstrap.sock run --net=host gcr.io/google_containers/etcd:2.0.12 etcdctl set /coreos.com/network/config '{ "Network": "10.1.0.0/16" }' +``` + +### Set up Flannel on the master node + +Flannel is a network abstraction layer build by CoreOS, we will use it to provide simplified networking between our Pods of containers. + +Flannel re-configures the bridge that Docker uses for networking. As a result we need to stop Docker, reconfigure its networking, and then restart Docker. + +#### Bring down Docker + +To re-configure Docker to use flannel, we need to take docker down, run flannel and then restart Docker. + +Turning down Docker is system dependent, it may be: + +```shell +sudo /etc/init.d/docker stop +``` + +or + +```shell +sudo systemctl stop docker +``` + +or it may be something else. + +#### Run flannel + +Now run flanneld itself: + +```shell +sudo docker -H unix:///var/run/docker-bootstrap.sock run -d --net=host --privileged -v /dev/net:/dev/net quay.io/coreos/flannel:0.5.0 +``` + +The previous command should have printed a really long hash, copy this hash. + +Now get the subnet settings from flannel: + +```shell +sudo docker -H unix:///var/run/docker-bootstrap.sock exec cat /run/flannel/subnet.env +``` + +#### Edit the docker configuration + +You now need to edit the docker configuration to activate new flags. Again, this is system specific. + +This may be in `/etc/default/docker` or `/etc/systemd/service/docker.service` or it may be elsewhere. + +Regardless, you need to add the following to the docker command line: + +```shell +--bip=${FLANNEL_SUBNET} --mtu=${FLANNEL_MTU} +``` + +#### Remove the existing Docker bridge + +Docker creates a bridge named `docker0` by default. You need to remove this: + +```shell +sudo /sbin/ifconfig docker0 down +sudo brctl delbr docker0 +``` + +You may need to install the `bridge-utils` package for the `brctl` binary. + +#### Restart Docker + +Again this is system dependent, it may be: + +```shell +sudo /etc/init.d/docker start +``` + +it may be: + +```shell +systemctl start docker +``` + +## Starting the Kubernetes Master + +Ok, now that your networking is set up, you can startup Kubernetes, this is the same as the single-node case, we will use the "main" instance of the Docker daemon for the Kubernetes components. + +```shell +sudo docker run \ + --volume=/:/rootfs:ro \ + --volume=/sys:/sys:ro \ + --volume=/dev:/dev \ + --volume=/var/lib/docker/:/var/lib/docker:rw \ + --volume=/var/lib/kubelet/:/var/lib/kubelet:rw \ + --volume=/var/run:/var/run:rw \ + --net=host \ + --privileged=true \ + --pid=host \ + -d \ + gcr.io/google_containers/hyperkube:v1.0.1 /hyperkube kubelet --api-servers=http://localhost:8080 --v=2 --address=0.0.0.0 --enable-server --hostname-override=127.0.0.1 --config=/etc/kubernetes/manifests-multi --cluster-dns=10.0.0.10 --cluster-domain=cluster.local +``` + +> Note that `--cluster-dns` and `--cluster-domain` is used to deploy dns, feel free to discard them if dns is not needed. + +### Also run the service proxy + +```shell +sudo docker run -d --net=host --privileged gcr.io/google_containers/hyperkube:v1.0.1 /hyperkube proxy --master=http://127.0.0.1:8080 --v=2 +``` + +### Test it out + +At this point, you should have a functioning 1-node cluster. Let's test it out! + +Download the kubectl binary and make it available by editing your PATH ENV. +([OS X](http://storage.googleapis.com/kubernetes-release/release/v1.0.1/bin/darwin/amd64/kubectl)) +([linux](http://storage.googleapis.com/kubernetes-release/release/v1.0.1/bin/linux/amd64/kubectl)) + +List the nodes + +```shell +kubectl get nodes +``` + +This should print: + +```shell +NAME LABELS STATUS +127.0.0.1 kubernetes.io/hostname=127.0.0.1 Ready +``` + +If the status of the node is `NotReady` or `Unknown` please check that all of the containers you created are successfully running. +If all else fails, ask questions on [Slack](/{{page.version}}/docs/troubleshooting/#slack). + + +### Next steps + +Move on to [adding one or more workers](/{{page.version}}/docs/getting-started-guides/docker-multinode/worker) or [deploy a dns](/{{page.version}}/docs/getting-started-guides/docker-multinode/deployDNS) \ No newline at end of file diff --git a/_includes/docs/docs/getting-started-guides/docker-multinode/master.sh b/_includes/docs/docs/getting-started-guides/docker-multinode/master.sh new file mode 100755 index 0000000000..0a247804ec --- /dev/null +++ b/_includes/docs/docs/getting-started-guides/docker-multinode/master.sh @@ -0,0 +1,176 @@ +#!/bin/bash + +# Copyright 2015 The Kubernetes Authors All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# A scripts to install k8s worker node. +# Author @wizard_cxy @reouser + +set -e + +# Make sure docker daemon is running +if ( ! ps -ef | grep "/usr/bin/docker" | grep -v 'grep' &> /dev/null ); then + echo "Docker is not running on this machine!" + exit 1 +fi + +# Make sure k8s version env is properly set +if [ -z ${K8S_VERSION} ]; then + K8S_VERSION="1.0.3" + echo "K8S_VERSION is not set, using default: ${K8S_VERSION}" +else + echo "k8s version is set to: ${K8S_VERSION}" +fi + + +# Run as root +if [ "$(id -u)" != "0" ]; then + echo >&2 "Please run as root" + exit 1 +fi + +# Check if a command is valid +command_exists() { + command -v "$@" > /dev/null 2>&1 +} + +lsb_dist="" + +# Detect the OS distro, we support ubuntu, debian, mint, centos, fedora dist +detect_lsb() { + case "$(uname -m)" in + *64) + ;; + *) + echo "Error: We currently only support 64-bit platforms." + exit 1 + ;; + esac + + if command_exists lsb_release; then + lsb_dist="$(lsb_release -si)" + fi + if [ -z ${lsb_dist} ] && [ -r /etc/lsb-release ]; then + lsb_dist="$(. /etc/lsb-release && echo "$DISTRIB_ID")" + fi + if [ -z ${lsb_dist} ] && [ -r /etc/debian_version ]; then + lsb_dist='debian' + fi + if [ -z ${lsb_dist} ] && [ -r /etc/fedora-release ]; then + lsb_dist='fedora' + fi + if [ -z ${lsb_dist} ] && [ -r /etc/os-release ]; then + lsb_dist="$(. /etc/os-release && echo "$ID")" + fi + + lsb_dist="$(echo ${lsb_dist} | tr '[:upper:]' '[:lower:]')" +} + + +# Start the bootstrap daemon +bootstrap_daemon() { + sudo -b docker -d -H unix:///var/run/docker-bootstrap.sock -p /var/run/docker-bootstrap.pid --iptables=false --ip-masq=false --bridge=none --graph=/var/lib/docker-bootstrap 2> /var/log/docker-bootstrap.log 1> /dev/null + + sleep 5 +} + +# Start k8s components in containers +DOCKER_CONF="" + +start_k8s(){ + # Start etcd + docker -H unix:///var/run/docker-bootstrap.sock run --restart=always --net=host -d gcr.io/google_containers/etcd:2.0.12 /usr/local/bin/etcd --addr=127.0.0.1:4001 --bind-addr=0.0.0.0:4001 --data-dir=/var/etcd/data + + sleep 5 + # Set flannel net config + docker -H unix:///var/run/docker-bootstrap.sock run --net=host gcr.io/google_containers/etcd:2.0.12 etcdctl set /coreos.com/network/config '{ "Network": "10.1.0.0/16", "Backend": {"Type": "vxlan"}}' + + # iface may change to a private network interface, eth0 is for default + flannelCID=$(docker -H unix:///var/run/docker-bootstrap.sock run --restart=always -d --net=host --privileged -v /dev/net:/dev/net quay.io/coreos/flannel:0.5.0 /opt/bin/flanneld -iface="eth0") + + sleep 8 + + # Copy flannel env out and source it on the host + docker -H unix:///var/run/docker-bootstrap.sock cp ${flannelCID}:/run/flannel/subnet.env . + source subnet.env + + # Configure docker net settings, then restart it + case "$lsb_dist" in + fedora|centos|amzn) + DOCKER_CONF="/etc/sysconfig/docker" + ;; + ubuntu|debian|linuxmint) + DOCKER_CONF="/etc/default/docker" + ;; + esac + + # Append the docker opts + echo "DOCKER_OPTS=\"\$DOCKER_OPTS --mtu=${FLANNEL_MTU} --bip=${FLANNEL_SUBNET}\"" | sudo tee -a ${DOCKER_CONF} + + + # sleep a little bit + ifconfig docker0 down + + case "$lsb_dist" in + fedora|centos|amzn) + yum install bridge-utils && brctl delbr docker0 && systemctl restart docker + ;; + ubuntu|debian|linuxmint) + apt-get install bridge-utils && brctl delbr docker0 && service docker restart + ;; + esac + + # sleep a little bit + sleep 5 + + # Start kubelet & proxy, then start master components as pods + docker run \ + --net=host \ + --pid=host \ + --privileged \ + --restart=always \ + -d \ + -v /sys:/sys:ro \ + -v /var/run:/var/run:rw \ + -v /:/rootfs:ro \ + -v /dev:/dev \ + -v /var/lib/docker/:/var/lib/docker:ro \ + -v /var/lib/kubelet/:/var/lib/kubelet:rw \ + gcr.io/google_containers/hyperkube:v${K8S_VERSION} \ + /hyperkube kubelet \ + --api-servers=http://localhost:8080 \ + --v=2 --address=0.0.0.0 --enable-server \ + --hostname-override=127.0.0.1 \ + --config=/etc/kubernetes/manifests-multi \ + --cluster-dns=10.0.0.10 \ + --cluster-domain=cluster.local + + docker run \ + -d \ + --net=host \ + --privileged \ + gcr.io/google_containers/hyperkube:v${K8S_VERSION} \ + /hyperkube proxy --master=http://127.0.0.1:8080 --v=2 +} + +echo "Detecting your OS distro ..." +detect_lsb + +echo "Starting bootstrap docker ..." +bootstrap_daemon + +echo "Starting k8s ..." +start_k8s + +echo "Master done!" diff --git a/_includes/docs/docs/getting-started-guides/docker-multinode/skydns-rc.yaml.in b/_includes/docs/docs/getting-started-guides/docker-multinode/skydns-rc.yaml.in new file mode 100644 index 0000000000..845af9bf94 --- /dev/null +++ b/_includes/docs/docs/getting-started-guides/docker-multinode/skydns-rc.yaml.in @@ -0,0 +1,92 @@ +apiVersion: v1 +kind: ReplicationController +metadata: + name: kube-dns-v8 + namespace: kube-system + labels: + k8s-app: kube-dns + version: v8 + kubernetes.io/cluster-service: "true" +spec: + replicas: {{ pillar['dns_replicas'] }} + selector: + k8s-app: kube-dns + version: v8 + template: + metadata: + labels: + k8s-app: kube-dns + version: v8 + kubernetes.io/cluster-service: "true" + spec: + containers: + - name: etcd + image: gcr.io/google_containers/etcd:2.0.9 + resources: + limits: + cpu: 100m + memory: 50Mi + command: + - /usr/local/bin/etcd + - -data-dir + - /var/etcd/data + - -listen-client-urls + - http://127.0.0.1:2379,http://127.0.0.1:4001 + - -advertise-client-urls + - http://127.0.0.1:2379,http://127.0.0.1:4001 + - -initial-cluster-token + - skydns-etcd + volumeMounts: + - name: etcd-storage + mountPath: /var/etcd/data + - name: kube2sky + image: gcr.io/google_containers/kube2sky:1.11 + resources: + limits: + cpu: 100m + memory: 50Mi + args: + # command = "/kube2sky" + - -domain={{ pillar['dns_domain'] }} + - -kube_master_url=http://{kube_server_url}:8080 + - name: skydns + image: gcr.io/google_containers/skydns:2015-03-11-001 + resources: + limits: + cpu: 100m + memory: 50Mi + args: + # command = "/skydns" + - -machines=http://localhost:4001 + - -addr=0.0.0.0:53 + - -domain={{ pillar['dns_domain'] }}. + ports: + - containerPort: 53 + name: dns + protocol: UDP + - containerPort: 53 + name: dns-tcp + protocol: TCP + livenessProbe: + httpGet: + path: /healthz + port: 8080 + scheme: HTTP + initialDelaySeconds: 30 + timeoutSeconds: 5 + - name: healthz + image: gcr.io/google_containers/exechealthz:1.0 + resources: + limits: + cpu: 10m + memory: 20Mi + args: + - -cmd=nslookup kubernetes.default.svc.{{ pillar['dns_domain'] }} localhost >/dev/null + - -port=8080 + ports: + - containerPort: 8080 + protocol: TCP + volumes: + - name: etcd-storage + emptyDir: {} + dnsPolicy: Default # Don't use cluster DNS. diff --git a/_includes/docs/docs/getting-started-guides/docker-multinode/skydns-svc.yaml.in b/_includes/docs/docs/getting-started-guides/docker-multinode/skydns-svc.yaml.in new file mode 100644 index 0000000000..242c8871ee --- /dev/null +++ b/_includes/docs/docs/getting-started-guides/docker-multinode/skydns-svc.yaml.in @@ -0,0 +1,20 @@ +apiVersion: v1 +kind: Service +metadata: + name: kube-dns + namespace: kube-system + labels: + k8s-app: kube-dns + kubernetes.io/cluster-service: "true" + kubernetes.io/name: "KubeDNS" +spec: + selector: + k8s-app: kube-dns + clusterIP: {{ pillar['dns_server'] }} + ports: + - name: dns + port: 53 + protocol: UDP + - name: dns-tcp + port: 53 + protocol: TCP diff --git a/_includes/docs/docs/getting-started-guides/docker-multinode/testing.md b/_includes/docs/docs/getting-started-guides/docker-multinode/testing.md new file mode 100644 index 0000000000..435cb0628a --- /dev/null +++ b/_includes/docs/docs/getting-started-guides/docker-multinode/testing.md @@ -0,0 +1,66 @@ + +To validate that your node(s) have been added, run: + +```shell +kubectl get nodes +``` + +That should show something like: + +```shell +NAME LABELS STATUS +10.240.99.26 kubernetes.io/hostname=10.240.99.26 Ready +127.0.0.1 kubernetes.io/hostname=127.0.0.1 Ready +``` + +If the status of any node is `Unknown` or `NotReady` your cluster is broken, double check that all containers are running properly, and if all else fails, contact us on [Slack](/{{page.version}}/docs/troubleshooting/#slack). + +### Run an application + +```shell +kubectl -s http://localhost:8080 run nginx --image=nginx --port=80 +``` + +now run `docker ps` you should see nginx running. You may need to wait a few minutes for the image to get pulled. + +### Expose it as a service + +```shell +kubectl expose rc nginx --port=80 +``` + +Run the following command to obtain the IP of this service we just created. There are two IPs, the first one is internal (`CLUSTER_IP`), and the second one is the external load-balanced IP. + +```shell +kubectl get svc nginx +``` + +Alternatively, you can obtain only the first IP (CLUSTER_IP) by running: + +```shell +{% raw %}kubectl get svc nginx --template={{.spec.clusterIP}}{% endraw %} +``` + +Hit the webserver with the first IP (CLUSTER_IP): + +```shell +curl +``` + +Note that you will need run this curl command on your boot2docker VM if you are running on OS X. + +### Scaling + +Now try to scale up the nginx you created before: + +```shell +kubectl scale rc nginx --replicas=3 +``` + +And list the pods + +```shell +kubectl get pods +``` + +You should see pods landing on the newly added machine. \ No newline at end of file diff --git a/_includes/docs/docs/getting-started-guides/docker-multinode/worker.md b/_includes/docs/docs/getting-started-guides/docker-multinode/worker.md new file mode 100644 index 0000000000..5daafc2b8c --- /dev/null +++ b/_includes/docs/docs/getting-started-guides/docker-multinode/worker.md @@ -0,0 +1,137 @@ + +These instructions are very similar to the master set-up above, but they are duplicated for clarity. +You need to repeat these instructions for each node you want to join the cluster. +We will assume that the IP address of this node is `${NODE_IP}` and you have the IP address of the master in `${MASTER_IP}` that you created in the [master instructions](/{{page.version}}/docs/getting-started-guides/docker-multinode/master). + +For each worker node, there are three steps: + +* [Set up `flanneld` on the worker node](#set-up-flanneld-on-the-worker-node) +* [Start Kubernetes on the worker node](#start-kubernetes-on-the-worker-node) +* [Add the worker to the cluster](#add-the-node-to-the-cluster) + +### Set up Flanneld on the worker node + +As before, the Flannel daemon is going to provide network connectivity. + +_Note_: +There is a [bug](https://github.com/docker/docker/issues/14106) in Docker 1.7.0 that prevents this from working correctly. +Please install Docker 1.6.2 or wait for Docker 1.7.1. + + +#### Set up a bootstrap docker + +As previously, we need a second instance of the Docker daemon running to bootstrap the flannel networking. + +Run: + +```shell +sudo sh -c 'docker -d -H unix:///var/run/docker-bootstrap.sock -p /var/run/docker-bootstrap.pid --iptables=false --ip-masq=false --bridge=none --graph=/var/lib/docker-bootstrap 2> /var/log/docker-bootstrap.log 1> /dev/null &' +``` + +_Important Note_: +If you are running this on a long running system, rather than experimenting, you should run the bootstrap Docker instance under something like SysV init, upstart or systemd so that it is restarted +across reboots and failures. + +#### Bring down Docker + +To re-configure Docker to use flannel, we need to take docker down, run flannel and then restart Docker. + +Turning down Docker is system dependent, it may be: + +```shell +sudo /etc/init.d/docker stop +``` + +or + +```shell +sudo systemctl stop docker +``` + +or it may be something else. + +#### Run flannel + +Now run flanneld itself, this call is slightly different from the above, since we point it at the etcd instance on the master. + +```shell +sudo docker -H unix:///var/run/docker-bootstrap.sock run -d --net=host --privileged -v /dev/net:/dev/net quay.io/coreos/flannel:0.5.0 /opt/bin/flanneld --etcd-endpoints=http://${MASTER_IP}:4001 +``` + +The previous command should have printed a really long hash, copy this hash. + +Now get the subnet settings from flannel: + +```shell +sudo docker -H unix:///var/run/docker-bootstrap.sock exec cat /run/flannel/subnet.env +``` + +#### Edit the docker configuration + +You now need to edit the docker configuration to activate new flags. Again, this is system specific. + +This may be in `/etc/default/docker` or `/etc/systemd/service/docker.service` or it may be elsewhere. + +Regardless, you need to add the following to the docker command line: + +```shell +--bip=${FLANNEL_SUBNET} --mtu=${FLANNEL_MTU} +``` + +#### Remove the existing Docker bridge + +Docker creates a bridge named `docker0` by default. You need to remove this: + +```shell +sudo /sbin/ifconfig docker0 down +sudo brctl delbr docker0 +``` + +You may need to install the `bridge-utils` package for the `brctl` binary. + +#### Restart Docker + +Again this is system dependent, it may be: + +```shell +sudo /etc/init.d/docker start +``` + +it may be: + +```shell +systemctl start docker +``` + +### Start Kubernetes on the worker node + +#### Run the kubelet + +Again this is similar to the above, but the `--api-servers` now points to the master we set up in the beginning. + +```shell +sudo docker run \ + --volume=/:/rootfs:ro \ + --volume=/sys:/sys:ro \ + --volume=/dev:/dev \ + --volume=/var/lib/docker/:/var/lib/docker:rw \ + --volume=/var/lib/kubelet/:/var/lib/kubelet:rw \ + --volume=/var/run:/var/run:rw \ + --net=host \ + --privileged=true \ + --pid=host \ + -d \ + gcr.io/google_containers/hyperkube:v1.0.1 /hyperkube kubelet --api-servers=http://${MASTER_IP}:8080 --v=2 --address=0.0.0.0 --enable-server --hostname-override=$(hostname -i) --cluster-dns=10.0.0.10 --cluster-domain=cluster.local +``` + +#### Run the service proxy + +The service proxy provides load-balancing between groups of containers defined by Kubernetes `Services` + +```shell +sudo docker run -d --net=host --privileged gcr.io/google_containers/hyperkube:v1.0.1 /hyperkube proxy --master=http://${MASTER_IP}:8080 --v=2 +``` + +### Next steps + +Move on to [testing your cluster](/{{page.version}}/docs/getting-started-guides/docker-multinode/testing) or add another node](#). \ No newline at end of file diff --git a/_includes/docs/docs/getting-started-guides/docker-multinode/worker.sh b/_includes/docs/docs/getting-started-guides/docker-multinode/worker.sh new file mode 100755 index 0000000000..dda80b553c --- /dev/null +++ b/_includes/docs/docs/getting-started-guides/docker-multinode/worker.sh @@ -0,0 +1,174 @@ +#!/bin/bash + +# Copyright 2015 The Kubernetes Authors All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# A scripts to install k8s worker node. +# Author @wizard_cxy @reouser + +set -e + +# Make sure docker daemon is running +if ( ! ps -ef | grep "/usr/bin/docker" | grep -v 'grep' &> /dev/null ); then + echo "Docker is not running on this machine!" + exit 1 +fi + +# Make sure k8s version env is properly set +if [ -z ${K8S_VERSION} ]; then + K8S_VERSION="1.0.3" + echo "K8S_VERSION is not set, using default: ${K8S_VERSION}" +else + echo "k8s version is set to: ${K8S_VERSION}" +fi + + + +# Run as root +if [ "$(id -u)" != "0" ]; then + echo >&2 "Please run as root" + exit 1 +fi + +# Make sure master ip is properly set +if [ -z ${MASTER_IP} ]; then + echo "Please export MASTER_IP in your env" + exit 1 +else + echo "k8s master is set to: ${MASTER_IP}" +fi + +# Check if a command is valid +command_exists() { + command -v "$@" > /dev/null 2>&1 +} + +lsb_dist="" + +# Detect the OS distro, we support ubuntu, debian, mint, centos, fedora dist +detect_lsb() { + case "$(uname -m)" in + *64) + ;; + *) + echo "Error: We currently only support 64-bit platforms." + exit 1 + ;; + esac + + if command_exists lsb_release; then + lsb_dist="$(lsb_release -si)" + fi + if [ -z ${lsb_dist} ] && [ -r /etc/lsb-release ]; then + lsb_dist="$(. /etc/lsb-release && echo "$DISTRIB_ID")" + fi + if [ -z ${lsb_dist} ] && [ -r /etc/debian_version ]; then + lsb_dist='debian' + fi + if [ -z ${lsb_dist} ] && [ -r /etc/fedora-release ]; then + lsb_dist='fedora' + fi + if [ -z ${lsb_dist} ] && [ -r /etc/os-release ]; then + lsb_dist="$(. /etc/os-release && echo "$ID")" + fi + + lsb_dist="$(echo ${lsb_dist} | tr '[:upper:]' '[:lower:]')" +} + + +# Start the bootstrap daemon +bootstrap_daemon() { + sudo -b docker -d -H unix:///var/run/docker-bootstrap.sock -p /var/run/docker-bootstrap.pid --iptables=false --ip-masq=false --bridge=none --graph=/var/lib/docker-bootstrap 2> /var/log/docker-bootstrap.log 1> /dev/null + + sleep 5 +} + +DOCKER_CONF="" + +# Start k8s components in containers +start_k8s() { + # Start flannel + flannelCID=$(sudo docker -H unix:///var/run/docker-bootstrap.sock run -d --restart=always --net=host --privileged -v /dev/net:/dev/net quay.io/coreos/flannel:0.5.0 /opt/bin/flanneld --etcd-endpoints=http://${MASTER_IP}:4001 -iface="eth0") + + sleep 8 + + # Copy flannel env out and source it on the host + sudo docker -H unix:///var/run/docker-bootstrap.sock cp ${flannelCID}:/run/flannel/subnet.env . + source subnet.env + + # Configure docker net settings, then restart it + case "$lsb_dist" in + fedora|centos|amzn) + DOCKER_CONF="/etc/sysconfig/docker" + ;; + ubuntu|debian|linuxmint) + DOCKER_CONF="/etc/default/docker" + ;; + esac + + echo "DOCKER_OPTS=\"\$DOCKER_OPTS --mtu=${FLANNEL_MTU} --bip=${FLANNEL_SUBNET}\"" | sudo tee -a ${DOCKER_CONF} + + ifconfig docker0 down + + case "$lsb_dist" in + fedora|centos) + yum install bridge-utils && brctl delbr docker0 && systemctl restart docker + ;; + ubuntu|debian|linuxmint) + apt-get install bridge-utils && brctl delbr docker0 && service docker restart + ;; + esac + + # sleep a little bit + sleep 5 + + # Start kubelet & proxy in container + docker run \ + --net=host \ + --pid=host \ + --privileged \ + --restart=always \ + -d \ + -v /sys:/sys:ro \ + -v /var/run:/var/run:rw \ + -v /dev:/dev \ + -v /var/lib/docker/:/var/lib/docker:ro \ + -v /var/lib/kubelet/:/var/lib/kubelet:rw \ + gcr.io/google_containers/hyperkube:v${K8S_VERSION} \ + /hyperkube kubelet --api-servers=http://${MASTER_IP}:8080 \ + --v=2 --address=0.0.0.0 --enable-server \ + --hostname-override=$(hostname -i) \ + --cluster-dns=10.0.0.10 \ + --cluster-domain=cluster.local + + docker run \ + -d \ + --net=host \ + --privileged \ + --restart=always \ + gcr.io/google_containers/hyperkube:v${K8S_VERSION} \ + /hyperkube proxy --master=http://${MASTER_IP}:8080 \ + --v=2 +} + +echo "Detecting your OS distro ..." +detect_lsb + +echo "Starting bootstrap docker ..." +bootstrap_daemon + +echo "Starting k8s ..." +start_k8s + +echo "Worker done!" diff --git a/_includes/docs/docs/getting-started-guides/docker.md b/_includes/docs/docs/getting-started-guides/docker.md new file mode 100644 index 0000000000..2526bdd437 --- /dev/null +++ b/_includes/docs/docs/getting-started-guides/docker.md @@ -0,0 +1,142 @@ + +The following instructions show you how to set up a simple, single node Kubernetes cluster using Docker. + +Here's a diagram of what the final result will look like: + +![Kubernetes Single Node on Docker](/images/docs/k8s-singlenode-docker.png) + +* TOC +{:toc} + +## Prerequisites + +1. You need to have docker installed on one machine. +2. Your kernel should support memory and swap accounting. Ensure that the +following configs are turned on in your linux kernel: + +```shell +CONFIG_RESOURCE_COUNTERS=y +CONFIG_MEMCG=y +CONFIG_MEMCG_SWAP=y +CONFIG_MEMCG_SWAP_ENABLED=y +CONFIG_MEMCG_KMEM=y +``` + +3. Enable the memory and swap accounting in the kernel, at boot, as command line +parameters as follows: + +```shell +GRUB_CMDLINE_LINUX="cgroup_enable=memory swapaccount=1" +``` + +NOTE: The above is specifically for GRUB2. + You can check the command line parameters passed to your kernel by looking at the + output of /proc/cmdline: + +```shell +$cat /proc/cmdline + BOOT_IMAGE=/boot/vmlinuz-3.18.4-aufs root=/dev/sda5 ro cgroup_enable=memory + swapaccount=1 +``` + +### Step One: Run etcd + +```shell +docker run --net=host -d gcr.io/google_containers/etcd:2.0.12 /usr/local/bin/etcd --addr=127.0.0.1:4001 --bind-addr=0.0.0.0:4001 --data-dir=/var/etcd/data +``` + +### Step Two: Run the master + +```shell +docker run \ + --volume=/:/rootfs:ro \ + --volume=/sys:/sys:ro \ + --volume=/dev:/dev \ + --volume=/var/lib/docker/:/var/lib/docker:ro \ + --volume=/var/lib/kubelet/:/var/lib/kubelet:rw \ + --volume=/var/run:/var/run:rw \ + --net=host \ + --pid=host \ + --privileged=true \ + -d \ + gcr.io/google_containers/hyperkube:v1.0.1 \ + /hyperkube kubelet --containerized --hostname-override="127.0.0.1" --address="0.0.0.0" --api-servers=http://localhost:8080 --config=/etc/kubernetes/manifests +``` + +This actually runs the kubelet, which in turn runs a [pod](/{{page.version}}/docs/user-guide/pods) that contains the other master components. + +### Step Three: Run the service proxy + +```shell +docker run -d --net=host --privileged gcr.io/google_containers/hyperkube:v1.0.1 /hyperkube proxy --master=http://127.0.0.1:8080 --v=2 +``` + +### Test it out + +At this point you should have a running Kubernetes cluster. You can test this by downloading the kubectl +binary +([OS X](https://storage.googleapis.com/kubernetes-release/release/v1.0.1/bin/darwin/amd64/kubectl)) +([linux](https://storage.googleapis.com/kubernetes-release/release/v1.0.1/bin/linux/amd64/kubectl)) + +*Note:* +On OS/X you will need to set up port forwarding via ssh: + +```shell +boot2docker ssh -L8080:localhost:8080 +``` + +List the nodes in your cluster by running: + +```shell +kubectl get nodes +``` + +This should print: + +```shell +NAME LABELS STATUS +127.0.0.1 Ready +``` + +If you are running different Kubernetes clusters, you may need to specify `-s http://localhost:8080` to select the local cluster. + +### Run an application + +```shell +kubectl -s http://localhost:8080 run nginx --image=nginx --port=80 +``` + +Now run `docker ps` you should see nginx running. You may need to wait a few minutes for the image to get pulled. + +### Expose it as a service + +```shell +kubectl expose rc nginx --port=80 +``` + +Run the following command to obtain the IP of this service we just created. There are two IPs, the first one is internal (CLUSTER_IP), and the second one is the external load-balanced IP. + +```shell +kubectl get svc nginx +``` + +Alternatively, you can obtain only the first IP (CLUSTER_IP) by running: + +```shell +kubectl get svc nginx --template={{.spec.clusterIP}} +``` + +Hit the webserver with the first IP (CLUSTER_IP): + +```shell +curl +``` + +Note that you will need run this curl command on your boot2docker VM if you are running on OS X. + +### A note on turning down your cluster + +Many of these containers run under the management of the `kubelet` binary, which attempts to keep containers running, even if they fail. So, in order to turn down +the cluster, you need to first kill the kubelet container, and then any other containers. + +You may use `docker kill $(docker ps -aq)`, note this removes _all_ containers running under Docker, so use with caution. diff --git a/_includes/docs/docs/getting-started-guides/fedora/fedora-calico.md b/_includes/docs/docs/getting-started-guides/fedora/fedora-calico.md new file mode 100644 index 0000000000..5a6f3b3243 --- /dev/null +++ b/_includes/docs/docs/getting-started-guides/fedora/fedora-calico.md @@ -0,0 +1,308 @@ + +This guide will walk you through the process of getting a Kubernetes Fedora cluster running on Digital Ocean with networking powered by Calico networking. +It will cover the installation and configuration of the following systemd processes on the following hosts: + +Kubernetes Master: + +- `kube-apiserver` +- `kube-controller-manager` +- `kube-scheduler` +- `etcd` +- `docker` +- `calico-node` + +Kubernetes Node: + +- `kubelet` +- `kube-proxy` +- `docker` +- `calico-node` + +For this demo, we will be setting up one Master and one Node with the following information: + +| Hostname | IP | +|-------------|-------------| +| kube-master |10.134.251.56| +| kube-node-1 |10.134.251.55| + +This guide is scalable to multiple nodes provided you [configure interface-cbr0 with its own subnet on each Node](#configure-the-virtual-interface---cbr0) +and [add an entry to /etc/hosts for each host](#setup-communication-between-hosts). + +Ensure you substitute the IP Addresses and Hostnames used in this guide with ones in your own setup. + +* TOC +{:toc} + +## Prerequisites + +You need two or more Fedora 22 droplets on Digital Ocean with [Private Networking](https://www.digitalocean.com/community/tutorials/how-to-set-up-and-use-digitalocean-private-networking) enabled. + +## Setup Communication Between Hosts + +Digital Ocean private networking configures a private network on eth1 for each host. To simplify communication between the hosts, we will add an entry to /etc/hosts +so that all hosts in the cluster can hostname-resolve one another to this interface. **It is important that the hostname resolves to this interface instead of eth0, as +all Kubernetes and Calico services will be running on it.** + +```shell +echo "10.134.251.56 kube-master" >> /etc/hosts +echo "10.134.251.55 kube-node-1" >> /etc/hosts +``` + +> Make sure that communication works between kube-master and each kube-node by using a utility such as ping. + +## Setup Master + +### Install etcd + +* Both Calico and Kubernetes use etcd as their datastore. We will run etcd on Master and point all Kubernetes and Calico services at it. + +```shell +yum -y install etcd +``` + +* Edit `/etc/etcd/etcd.conf` + +```conf +ETCD_LISTEN_CLIENT_URLS="http://kube-master:4001" + +ETCD_ADVERTISE_CLIENT_URLS="http://kube-master:4001" +``` + +### Install Kubernetes + +* Run the following command on Master to install the latest Kubernetes (as well as docker): + +```shell +yum -y install kubernetes +``` + +* Edit `/etc/kubernetes/config ` + +```conf +# How the controller-manager, scheduler, and proxy find the apiserver +KUBE_MASTER="--master=http://kube-master:8080" +``` + +* Edit `/etc/kubernetes/apiserver` + +```conf +# The address on the local server to listen to. +KUBE_API_ADDRESS="--insecure-bind-address=0.0.0.0" + +KUBE_ETCD_SERVERS="--etcd-servers=http://kube-master:4001" + +# Remove ServiceAccount from this line to run without API Tokens +KUBE_ADMISSION_CONTROL="--admission-control=NamespaceLifecycle,NamespaceExists,LimitRanger,SecurityContextDeny,ResourceQuota" +``` + +* Create /var/run/kubernetes on master: + +```shell +mkdir /var/run/kubernetes +chown kube:kube /var/run/kubernetes +chmod 750 /var/run/kubernetes +``` + +* Start the appropriate services on master: + +```shell +for SERVICE in etcd kube-apiserver kube-controller-manager kube-scheduler; do + systemctl restart $SERVICE + systemctl enable $SERVICE + systemctl status $SERVICE +done +``` + +### Install Calico + +Next, we'll launch Calico on Master to allow communication between Pods and any services running on the Master. +* Install calicoctl, the calico configuration tool. + +```shell +wget https://github.com/Metaswitch/calico-docker/releases/download/v0.5.5/calicoctl +chmod +x ./calicoctl +sudo mv ./calicoctl /usr/bin +``` + +* Create `/etc/systemd/system/calico-node.service` + +```conf +[Unit] +Description=calicoctl node +Requires=docker.service +After=docker.service + +[Service] +User=root +Environment="ETCD_AUTHORITY=kube-master:4001" +PermissionsStartOnly=true +ExecStartPre=/usr/bin/calicoctl checksystem --fix +ExecStart=/usr/bin/calicoctl node --ip=10.134.251.56 --detach=false + +[Install] +WantedBy=multi-user.target +``` + +>Be sure to substitute `--ip=10.134.251.56` with your Master's eth1 IP Address. + +* Start Calico + +```shell +systemctl enable calico-node.service +systemctl start calico-node.service +``` + +>Starting calico for the first time may take a few minutes as the calico-node docker image is downloaded. + +## Setup Node + +### Configure the Virtual Interface - cbr0 + +By default, docker will create and run on a virtual interface called `docker0`. This interface is automatically assigned the address range 172.17.42.1/16. +In order to set our own address range, we will create a new virtual interface called `cbr0` and then start docker on it. + +* Add a virtual interface by creating `/etc/sysconfig/network-scripts/ifcfg-cbr0`: + +```conf +DEVICE=cbr0 +TYPE=Bridge +IPADDR=192.168.1.1 +NETMASK=255.255.255.0 +ONBOOT=yes +BOOTPROTO=static +``` + +>**Note for Multi-Node Clusters:** Each node should be assigned an IP address on a unique subnet. In this example, node-1 is using 192.168.1.1/24, +so node-2 should be assigned another pool on the 192.168.x.0/24 subnet, e.g. 192.168.2.1/24. + +* Ensure that your system has bridge-utils installed. Then, restart the networking daemon to activate the new interface + +```shell +systemctl restart network.service +``` + +### Install Docker + +* Install Docker + +```shell +yum -y install docker +``` + +* Configure docker to run on `cbr0` by editing `/etc/sysconfig/docker-network`: + +```conf +DOCKER_NETWORK_OPTIONS="--bridge=cbr0 --iptables=false --ip-masq=false" +``` + +* Start docker + +```shell +systemctl start docker +``` + +### Install Calico + +* Install calicoctl, the calico configuration tool. + +```shell +wget https://github.com/Metaswitch/calico-docker/releases/download/v0.5.5/calicoctl +chmod +x ./calicoctl +sudo mv ./calicoctl /usr/bin +``` + +* Create `/etc/systemd/system/calico-node.service` + +```conf +[Unit] +Description=calicoctl node +Requires=docker.service +After=docker.service + +[Service] +User=root +Environment="ETCD_AUTHORITY=kube-master:4001" +PermissionsStartOnly=true +ExecStartPre=/usr/bin/calicoctl checksystem --fix +ExecStart=/usr/bin/calicoctl node --ip=10.134.251.55 --detach=false --kubernetes + +[Install] +WantedBy=multi-user.target +``` + +> Note: You must replace the IP address with your node's eth1 IP Address! + +* Start Calico + +```shell +systemctl enable calico-node.service +systemctl start calico-node.service +``` + +* Configure the IP Address Pool + + Most Kubernetes application deployments will require communication between Pods and the kube-apiserver on Master. On a standard Digital +Ocean Private Network, requests sent from Pods to the kube-apiserver will not be returned as the networking fabric will drop response packets +destined for any 192.168.0.0/16 address. To resolve this, you can have calicoctl add a masquerade rule to all outgoing traffic on the node: + +```shell +ETCD_AUTHORITY=kube-master:4001 calicoctl pool add 192.168.0.0/16 --nat-outgoing +``` + +### Install Kubernetes + +* First, install Kubernetes. + +```shell +yum -y install kubernetes +``` + +* Edit `/etc/kubernetes/config` + +```conf +# How the controller-manager, scheduler, and proxy find the apiserver +KUBE_MASTER="--master=http://kube-master:8080" +``` + +* Edit `/etc/kubernetes/kubelet` + + We'll pass in an extra parameter - `--network-plugin=calico` to tell the Kubelet to use the Calico networking plugin. Additionally, we'll add two +environment variables that will be used by the Calico networking plugin. + +```shell +# The address for the info server to serve on (set to 0.0.0.0 or "" for all interfaces) +KUBELET_ADDRESS="--address=0.0.0.0" + +# You may leave this blank to use the actual hostname +# KUBELET_HOSTNAME="--hostname-override=127.0.0.1" + +# location of the api-server +KUBELET_API_SERVER="--api-servers=http://kube-master:8080" + +# Add your own! +KUBELET_ARGS="--network-plugin=calico" + +# The following are variables which the kubelet will pass to the calico-networking plugin +ETCD_AUTHORITY="kube-master:4001" +KUBE_API_ROOT="http://kube-master:8080/api/v1" +``` + +* Start Kubernetes on the node. + +```shell +for SERVICE in kube-proxy kubelet; do + systemctl restart $SERVICE + systemctl enable $SERVICE + systemctl status $SERVICE +done +``` + +## Check Running Cluster + +The cluster should be running! Check that your nodes are reporting as such: + +```shell +kubectl get nodes +NAME LABELS STATUS +kube-node-1 kubernetes.io/hostname=kube-node-1 Ready +``` \ No newline at end of file diff --git a/_includes/docs/docs/getting-started-guides/fedora/fedora_ansible_config.md b/_includes/docs/docs/getting-started-guides/fedora/fedora_ansible_config.md new file mode 100644 index 0000000000..5a783dd9c7 --- /dev/null +++ b/_includes/docs/docs/getting-started-guides/fedora/fedora_ansible_config.md @@ -0,0 +1,224 @@ + +Configuring Kubernetes on Fedora via Ansible offers a simple way to quickly create a clustered environment with little effort. + +* TOC +{:toc} + +## Prerequisites + +1. Host able to run ansible and able to clone the following repo: [kubernetes](https://github.com/kubernetes/kubernetes.git) +2. A Fedora 21+ host to act as cluster master +3. As many Fedora 21+ hosts as you would like, that act as cluster nodes + +The hosts can be virtual or bare metal. Ansible will take care of the rest of the configuration for you - configuring networking, installing packages, handling the firewall, etc. This example will use one master and two nodes. + +## Architecture of the cluster + +A Kubernetes cluster requires etcd, a master, and n nodes, so we will create a cluster with three hosts, for example: + +```shell +master,etcd = kube-master.example.com + node1 = kube-node-01.example.com + node2 = kube-node-02.example.com +``` + +**Make sure your local machine has** + + - ansible (must be 1.9.0+) + - git + - python-netaddr + +If not + +```shell +yum install -y ansible git python-netaddr +``` + +**Now clone down the Kubernetes repository** + +```shell +git clone https://github.com/kubernetes/contrib.git +cd contrib/ansible +``` + +**Tell ansible about each machine and its role in your cluster** + +Get the IP addresses from the master and nodes. Add those to the `~/contrib/ansible/inventory` file on the host running Ansible. + +```shell +[masters] +kube-master.example.com + +[etcd] +kube-master.example.com + +[nodes] +kube-node-01.example.com +kube-node-02.example.com +``` + +## Setting up ansible access to your nodes + +If you already are running on a machine which has passwordless ssh access to the kube-master and kube-node-{01,02} nodes, and 'sudo' privileges, simply set the value of `ansible_ssh_user` in `~/contrib/ansible/group_vars/all.yaml` to the username which you use to ssh to the nodes (i.e. `fedora`), and proceed to the next step... + +*Otherwise* setup ssh on the machines like so (you will need to know the root password to all machines in the cluster). + +edit: ~/contrib/ansible/group_vars/all.yml + +```yaml +ansible_ssh_user: root +``` + +**Configuring ssh access to the cluster** + +If you already have ssh access to every machine using ssh public keys you may skip to [setting up the cluster](#setting-up-the-cluster) + +Make sure your local machine (root) has an ssh key pair if not + +```shell +ssh-keygen +``` + +Copy the ssh public key to **all** nodes in the cluster + +```shell +for node in kube-master.example.com kube-node-01.example.com kube-node-02.example.com; do + ssh-copy-id ${node} +done +``` + +## Setting up the cluster + +Although the default value of variables in `~/contrib/ansible/group_vars/all.yml` should be good enough, if not, change them as needed. + +```conf +edit: ~/contrib/ansible/group_vars/all.yml +``` + +**Configure access to kubernetes packages** + +Modify `source_type` as below to access kubernetes packages through the package manager. + +```yaml +source_type: packageManager +``` + +**Configure the IP addresses used for services** + +Each Kubernetes service gets its own IP address. These are not real IPs. You need only select a range of IPs which are not in use elsewhere in your environment. + +```yaml +kube_service_addresses: 10.254.0.0/16 +``` + +**Managing flannel** + +Modify `flannel_subnet`, `flannel_prefix` and `flannel_host_prefix` only if defaults are not appropriate for your cluster. + + +**Managing add on services in your cluster** + +Set `cluster_logging` to false or true (default) to disable or enable logging with elasticsearch. + +```yaml +cluster_logging: true +``` + +Turn `cluster_monitoring` to true (default) or false to enable or disable cluster monitoring with heapster and influxdb. + +```yaml +cluster_monitoring: true +``` + +Turn `dns_setup` to true (recommended) or false to enable or disable whole DNS configuration. + +```yaml +dns_setup: true +``` + +**Tell ansible to get to work!** + +This will finally setup your whole Kubernetes cluster for you. + +```shell +cd ~/contrib/ansible/ + +./setup.sh +``` + +## Testing and using your new cluster + +That's all there is to it. It's really that easy. At this point you should have a functioning Kubernetes cluster. + +**Show kubernetes nodes** + +Run the following on the kube-master: + +```shell +kubectl get nodes +``` + +**Show services running on masters and nodes** + +```shell +systemctl | grep -i kube +``` + +**Show firewall rules on the masters and nodes** + +```shell +iptables -nvL +``` +**Create /tmp/apache.json on the master with the following contents and deploy pod** + +```json +{ + "kind": "Pod", + "apiVersion": "v1", + "metadata": { + "name": "fedoraapache", + "labels": { + "name": "fedoraapache" + } + }, + "spec": { + "containers": [ + { + "name": "fedoraapache", + "image": "fedora/apache", + "ports": [ + { + "hostPort": 80, + "containerPort": 80 + } + ] + } + ] + } +} +``` + +```shell +kubectl create -f /tmp/apache.json +``` + +**Check where the pod was created** + +```shell +kubectl get pods +``` + +**Check Docker status on nodes** + +```shell +docker ps +docker images +``` + +**After the pod is 'Running' Check web server access on the node** + +```shell +curl http://localhost +``` + +That's it ! \ No newline at end of file diff --git a/_includes/docs/docs/getting-started-guides/fedora/fedora_manual_config.md b/_includes/docs/docs/getting-started-guides/fedora/fedora_manual_config.md new file mode 100644 index 0000000000..b86b38ff36 --- /dev/null +++ b/_includes/docs/docs/getting-started-guides/fedora/fedora_manual_config.md @@ -0,0 +1,213 @@ + +* TOC +{:toc} + +## Prerequisites + +1. You need 2 or more machines with Fedora installed. + +## Instructions + +This is a getting started guide for [Fedora](http://fedoraproject.org). It is a manual configuration so you understand all the underlying packages / services / ports, etc... + +This guide will only get ONE node (previously minion) working. Multiple nodes require a functional [networking configuration](/{{page.version}}/docs/admin/networking) +done outside of Kubernetes. Although the additional Kubernetes configuration requirements should be obvious. + +The Kubernetes package provides a few services: kube-apiserver, kube-scheduler, kube-controller-manager, kubelet, kube-proxy. These +services are managed by systemd and the configuration resides in a central location: /etc/kubernetes. We will break the services up +between the hosts. The first host, fed-master, will be the Kubernetes master. This host will run the kube-apiserver, kube-controller-manager, +and kube-scheduler. In addition, the master will also run _etcd_ (not needed if _etcd_ runs on a different host but this guide assumes +that _etcd_ and Kubernetes master run on the same host). The remaining host, fed-node will be the node and run kubelet, proxy and docker. + +**System Information:** + +Hosts: + +```conf +fed-master = 192.168.121.9 +fed-node = 192.168.121.65 +``` + +**Prepare the hosts:** + +* Install Kubernetes on all hosts - fed-{master,node}. This will also pull in docker. Also install etcd on fed-master. + This guide has been tested with kubernetes-0.18 and beyond. +* The [--enablerepo=updates-testing](https://fedoraproject.org/wiki/QA:Updates_Testing) directive in the yum + command below will ensure that the most recent Kubernetes version that is scheduled for pre-release will + be installed. This should be a more recent version than the Fedora "stable" release for Kubernetes that you + would get without adding the directive. +* If you want the very latest Kubernetes release [you can download and yum install the RPM directly from + Fedora Koji](http://koji.fedoraproject.org/koji/packageinfo?packageID=19202) instead of using the yum + install command below. + +```shell +yum -y install --enablerepo=updates-testing kubernetes +``` + +* Install etcd and iptables + +```shell +yum -y install etcd iptables +``` + +* Add master and node to /etc/hosts on all machines (not needed if hostnames already in DNS). Make sure that communication works between fed-master and fed-node by using a utility such as ping. + +```shell +echo "192.168.121.9 fed-master +192.168.121.65 fed-node" >> /etc/hosts +``` + +* Edit /etc/kubernetes/config which will be the same on all hosts (master and node) to contain: + +```shell +# Comma separated list of nodes in the etcd cluster +KUBE_MASTER="--master=http://fed-master:8080" + +# logging to stderr means we get it in the systemd journal +KUBE_LOGTOSTDERR="--logtostderr=true" + +# journal message level, 0 is debug +KUBE_LOG_LEVEL="--v=0" + +# Should this cluster be allowed to run privileged docker containers +KUBE_ALLOW_PRIV="--allow-privileged=false" +``` + +* Disable the firewall on both the master and node, as docker does not play well with other firewall rule managers. Please note that iptables-services does not exist on default fedora server install. + +```shell +systemctl disable iptables-services firewalld +systemctl stop iptables-services firewalld +``` + +**Configure the Kubernetes services on the master.** + +* Edit /etc/kubernetes/apiserver to appear as such. The service-cluster-ip-range IP addresses must be an unused block of addresses, not used anywhere else. +They do not need to be routed or assigned to anything. + +```shell +# The address on the local server to listen to. +KUBE_API_ADDRESS="--address=0.0.0.0" + +# Comma separated list of nodes in the etcd cluster +KUBE_ETCD_SERVERS="--etcd-servers=http://127.0.0.1:4001" + +# Address range to use for services +KUBE_SERVICE_ADDRESSES="--service-cluster-ip-range=10.254.0.0/16" + +# Add your own! +KUBE_API_ARGS="" +``` + +* Edit /etc/etcd/etcd.conf,let the etcd to listen all the ip instead of 127.0.0.1, if not, you will get the error like "connection refused". Note that Fedora 22 uses etcd 2.0, One of the changes in etcd 2.0 is that now uses port 2379 and 2380 (as opposed to etcd 0.46 which userd 4001 and 7001). + +```shell +ETCD_LISTEN_CLIENT_URLS="http://0.0.0.0:4001" +``` + +* Create /var/run/kubernetes on master: + +```shell +mkdir /var/run/kubernetes +chown kube:kube /var/run/kubernetes +chmod 750 /var/run/kubernetes +``` + +* Start the appropriate services on master: + +```shell +for SERVICES in etcd kube-apiserver kube-controller-manager kube-scheduler; do + systemctl restart $SERVICES + systemctl enable $SERVICES + systemctl status $SERVICES +done +``` + +* Addition of nodes: + +* Create following node.json file on Kubernetes master node: + +```json +{ + "apiVersion": "v1", + "kind": "Node", + "metadata": { + "name": "fed-node", + "labels":{ "name": "fed-node-label"} + }, + "spec": { + "externalID": "fed-node" + } +} +``` + +Now create a node object internally in your Kubernetes cluster by running: + +```shell +$ kubectl create -f ./node.json + +$ kubectl get nodes +NAME LABELS STATUS +fed-node name=fed-node-label Unknown +``` + +Please note that in the above, it only creates a representation for the node +_fed-node_ internally. It does not provision the actual _fed-node_. Also, it +is assumed that _fed-node_ (as specified in `name`) can be resolved and is +reachable from Kubernetes master node. This guide will discuss how to provision +a Kubernetes node (fed-node) below. + +**Configure the Kubernetes services on the node.** + +***We need to configure the kubelet on the node.*** + +* Edit /etc/kubernetes/kubelet to appear as such: + +```shell +### +# Kubernetes kubelet (node) config + +# The address for the info server to serve on (set to 0.0.0.0 or "" for all interfaces) +KUBELET_ADDRESS="--address=0.0.0.0" + +# You may leave this blank to use the actual hostname +KUBELET_HOSTNAME="--hostname-override=fed-node" + +# location of the api-server +KUBELET_API_SERVER="--api-servers=http://fed-master:8080" + +# Add your own! +#KUBELET_ARGS="" +``` + +* Start the appropriate services on the node (fed-node). + +```shell +for SERVICES in kube-proxy kubelet docker; do + systemctl restart $SERVICES + systemctl enable $SERVICES + systemctl status $SERVICES +done +``` + +* Check to make sure now the cluster can see the fed-node on fed-master, and its status changes to _Ready_. + +```shell +kubectl get nodes +NAME LABELS STATUS +fed-node name=fed-node-label Ready +``` + +* Deletion of nodes: + +To delete _fed-node_ from your Kubernetes cluster, one should run the following on fed-master (Please do not do it, it is just for information): + +```shell +kubectl delete -f ./node.json +``` + +*You should be finished!* + +**The cluster should be running! Launch a test pod.** + +You should have a functional cluster, check out [101](/{{page.version}}/docs/user-guide/walkthrough/)! diff --git a/_includes/docs/docs/getting-started-guides/fedora/flannel_multi_node_cluster.md b/_includes/docs/docs/getting-started-guides/fedora/flannel_multi_node_cluster.md new file mode 100644 index 0000000000..257e5ac719 --- /dev/null +++ b/_includes/docs/docs/getting-started-guides/fedora/flannel_multi_node_cluster.md @@ -0,0 +1,171 @@ + +This document describes how to deploy Kubernetes on multiple hosts to set up a multi-node cluster and networking with flannel. Follow fedora [getting started guide](/{{page.version}}/docs/getting-started-guides/fedora/fedora_manual_config) to setup 1 master (fed-master) and 2 or more nodes. Make sure that all nodes have different names (fed-node1, fed-node2 and so on) and labels (fed-node1-label, fed-node2-label, and so on) to avoid any conflict. Also make sure that the Kubernetes master host is running etcd, kube-controller-manager, kube-scheduler, and kube-apiserver services, and the nodes are running docker, kube-proxy and kubelet services. Now install flannel on Kubernetes nodes. flannel on each node configures an overlay network that docker uses. flannel runs on each node to setup a unique class-C container network. + +* TOC +{:toc} + +## Prerequisites + +You need 2 or more machines with Fedora installed. + +## Master Setup + +**Perform following commands on the Kubernetes master** + +Configure flannel by creating a `flannel-config.json` in your current directory on fed-master. flannel provides udp and vxlan among other overlay networking backend options. In this guide, we choose kernel based vxlan backend. The contents of the json are: + +```json +{ + "Network": "18.16.0.0/16", + "SubnetLen": 24, + "Backend": { + "Type": "vxlan", + "VNI": 1 + } +} +``` + +**NOTE:** Choose an IP range that is *NOT* part of the public IP address range. + +Add the configuration to the etcd server on fed-master. + +```shell +etcdctl set /coreos.com/network/config < flannel-config.json +``` + +* Verify the key exists in the etcd server on fed-master. + +```shell +etcdctl get /coreos.com/network/config +``` + +## Node Setup + +**Perform following commands on all Kubernetes nodes** + +Edit the flannel configuration file /etc/sysconfig/flanneld as follows: + +```shell +# Flanneld configuration options + +# etcd url location. Point this to the server where etcd runs +FLANNEL_ETCD="http://fed-master:4001" + +# etcd config key. This is the configuration key that flannel queries +# For address range assignment +FLANNEL_ETCD_KEY="/coreos.com/network" + +# Any additional options that you want to pass +FLANNEL_OPTIONS="" +``` + +**Note:** By default, flannel uses the interface for the default route. If you have multiple interfaces and would like to use an interface other than the default route one, you could add "-iface=" to FLANNEL_OPTIONS. For additional options, run `flanneld --help` on command line. + +Enable the flannel service. + +```shell +systemctl enable flanneld +``` + +If docker is not running, then starting flannel service is enough and skip the next step. + +```shell +systemctl start flanneld +``` + +If docker is already running, then stop docker, delete docker bridge (docker0), start flanneld and restart docker as follows. Another alternative is to just reboot the system (`systemctl reboot`). + +```shell +systemctl stop docker +ip link delete docker0 +systemctl start flanneld +systemctl start docker +``` + + +## **Test the cluster and flannel configuration** + +Now check the interfaces on the nodes. Notice there is now a flannel.1 interface, and the ip addresses of docker0 and flannel.1 interfaces are in the same network. You will notice that docker0 is assigned a subnet (18.16.29.0/24 as shown below) on each Kubernetes node out of the IP range configured above. A working output should look like this: + +```shell +# ip -4 a|grep inet + inet 127.0.0.1/8 scope host lo + inet 192.168.122.77/24 brd 192.168.122.255 scope global dynamic eth0 + inet 18.16.29.0/16 scope global flannel.1 + inet 18.16.29.1/24 scope global docker0 +``` + +From any node in the cluster, check the cluster members by issuing a query to etcd server via curl (only partial output is shown using `grep -E "\{|\}|key|value"`). If you set up a 1 master and 3 nodes cluster, you should see one block for each node showing the subnets they have been assigned. You can associate those subnets to each node by the MAC address (VtepMAC) and IP address (Public IP) that is listed in the output. + +```shell +curl -s http://fed-master:4001/v2/keys/coreos.com/network/subnets | python -mjson.tool +``` + +```json +{ + "node": { + "key": "/coreos.com/network/subnets", + { + "key": "/coreos.com/network/subnets/18.16.29.0-24", + "value": "{\"PublicIP\":\"192.168.122.77\",\"BackendType\":\"vxlan\",\"BackendData\":{\"VtepMAC\":\"46:f1:d0:18:d0:65\"}}" + }, + { + "key": "/coreos.com/network/subnets/18.16.83.0-24", + "value": "{\"PublicIP\":\"192.168.122.36\",\"BackendType\":\"vxlan\",\"BackendData\":{\"VtepMAC\":\"ca:38:78:fc:72:29\"}}" + }, + { + "key": "/coreos.com/network/subnets/18.16.90.0-24", + "value": "{\"PublicIP\":\"192.168.122.127\",\"BackendType\":\"vxlan\",\"BackendData\":{\"VtepMAC\":\"92:e2:80:ba:2d:4d\"}}" + } + } +} +``` + +From all nodes, review the `/run/flannel/subnet.env` file. This file was generated automatically by flannel. + +```shell +# cat /run/flannel/subnet.env +FLANNEL_SUBNET=18.16.29.1/24 +FLANNEL_MTU=1450 +FLANNEL_IPMASQ=false +``` + +At this point, we have etcd running on the Kubernetes master, and flannel / docker running on Kubernetes nodes. Next steps are for testing cross-host container communication which will confirm that docker and flannel are configured properly. + +Issue the following commands on any 2 nodes: + +```shell +# docker run -it fedora:latest bash +bash-4.3# +``` + +This will place you inside the container. Install iproute and iputils packages to install ip and ping utilities. Due to a [bug](https://bugzilla.redhat.com/show_bug.cgi?id=1142311), it is required to modify capabilities of ping binary to work around "Operation not permitted" error. + +```shell +bash-4.3# yum -y install iproute iputils +bash-4.3# setcap cap_net_raw-ep /usr/bin/ping +``` + +Now note the IP address on the first node: + +```shell +bash-4.3# ip -4 a l eth0 | grep inet + inet 18.16.29.4/24 scope global eth0 +``` + +And also note the IP address on the other node: + +```shell +bash-4.3# ip a l eth0 | grep inet + inet 18.16.90.4/24 scope global eth0 +``` +Now ping from the first node to the other node: + +```shell +bash-4.3# ping 18.16.90.4 +PING 18.16.90.4 (18.16.90.4) 56(84) bytes of data. +64 bytes from 18.16.90.4: icmp_seq=1 ttl=62 time=0.275 ms +64 bytes from 18.16.90.4: icmp_seq=2 ttl=62 time=0.372 ms +``` + +Now Kubernetes multi-node cluster is set up with overlay networking set up by flannel. \ No newline at end of file diff --git a/_includes/docs/docs/getting-started-guides/gce.md b/_includes/docs/docs/getting-started-guides/gce.md new file mode 100644 index 0000000000..85367df0c8 --- /dev/null +++ b/_includes/docs/docs/getting-started-guides/gce.md @@ -0,0 +1,213 @@ + + +The example below creates a Kubernetes cluster with 4 worker node Virtual Machines and a master Virtual Machine (i.e. 5 VMs in your cluster). This cluster is set up and controlled from your workstation (or wherever you find convenient). + +* TOC +{:toc} + +### Before you start + +If you want a simplified getting started experience and GUI for managing clusters, please consider trying [Google Container Engine](https://cloud.google.com/container-engine/) (GKE) for hosted cluster installation and management. + +If you want to use custom binaries or pure open source Kubernetes, please continue with the instructions below. + +### Prerequisites + +1. You need a Google Cloud Platform account with billing enabled. Visit the [Google Developers Console](http://cloud.google.com/console) for more details. +1. Install `gcloud` as necessary. `gcloud` can be installed as a part of the [Google Cloud SDK](https://cloud.google.com/sdk/). +1. Then, make sure you have the `gcloud preview` command line component installed. Run `gcloud preview` at the command line - if it asks to install any components, go ahead and install them. If it simply shows help text, you're good to go. This is required as the cluster setup script uses GCE [Instance Groups](https://cloud.google.com/compute/docs/instance-groups/), which are in the gcloud preview namespace. You will also need to **enable [`Compute Engine Instance Group Manager API`](https://developers.google.com/console/help/new/#activatingapis)** in the developers console. +1. Make sure that gcloud is set to use the Google Cloud Platform project you want. You can check the current project using `gcloud config list project` and change it via `gcloud config set project `. +1. Make sure you have credentials for GCloud by running ` gcloud auth login`. +1. Make sure you can start up a GCE VM from the command line. At least make sure you can do the [Create an instance](https://cloud.google.com/compute/docs/instances/#startinstancegcloud) part of the GCE Quickstart. +1. Make sure you can ssh into the VM without interactive prompts. See the [Log in to the instance](https://cloud.google.com/compute/docs/instances/#sshing) part of the GCE Quickstart. + +### Starting a cluster + +You can install a client and start a cluster with either one of these commands (we list both in case only one is installed on your machine): + + +```shell +curl -sS https://get.k8s.io | bash +``` + +or + +```shell +wget -q -O - https://get.k8s.io | bash +``` + +Once this command completes, you will have a master VM and four worker VMs, running as a Kubernetes cluster. + +By default, some containers will already be running on your cluster. Containers like `kibana` and `elasticsearch` provide [logging](/{{page.version}}/docs/getting-started-guides/logging), while `heapster` provides [monitoring](http://releases.k8s.io/{{page.githubbranch}}/cluster/addons/cluster-monitoring/README.md) services. + +The script run by the commands above creates a cluster with the name/prefix "kubernetes". It defines one specific cluster config, so you can't run it more than once. + +Alternately, you can download and install the latest Kubernetes release from [this page](https://github.com/kubernetes/kubernetes/releases), then run the `/cluster/kube-up.sh` script to start the cluster: + +```shell +cd kubernetes +cluster/kube-up.sh +``` + +If you want more than one cluster running in your project, want to use a different name, or want a different number of worker nodes, see the `/cluster/gce/config-default.sh` file for more fine-grained configuration before you start up your cluster. + +If you run into trouble, please see the section on [troubleshooting](/{{page.version}}/docs/getting-started-guides/gce/#troubleshooting), post to the +[google-containers group](https://groups.google.com/forum/#!forum/google-containers), or come ask questions on [Slack](/{{page.version}}/docs/troubleshooting/#slack). + +The next few steps will show you: + +1. how to set up the command line client on your workstation to manage the cluster +1. examples of how to use the cluster +1. how to delete the cluster +1. how to start clusters with non-default options (like larger clusters) + +### Installing the Kubernetes command line tools on your workstation + +The cluster startup script will leave you with a running cluster and a `kubernetes` directory on your workstation. +The next step is to make sure the `kubectl` tool is in your path. + +The [kubectl](/{{page.version}}/docs/user-guide/kubectl/kubectl) tool controls the Kubernetes cluster manager. It lets you inspect your cluster resources, create, delete, and update components, and much more. +You will use it to look at your new cluster and bring up example apps. + +Add the appropriate binary folder to your `PATH` to access kubectl: + +```shell +# OS X +export PATH=/platforms/darwin/amd64:$PATH +# Linux +export PATH=/platforms/linux/amd64:$PATH +``` + +**Note**: gcloud also ships with `kubectl`, which by default is added to your path. +However the gcloud bundled kubectl version may be older than the one downloaded by the +get.k8s.io install script. We recommend you use the downloaded binary to avoid +potential issues with client/server version skew. + +#### Enabling bash completion of the Kubernetes command line tools + +You may find it useful to enable `kubectl` bash completion: + +``` +$ source ./contrib/completions/bash/kubectl +``` + +**Note**: This will last for the duration of your bash session. If you want to make this permanent you need to add this line in your bash profile. + +Alternatively, on most linux distributions you can also move the completions file to your bash_completions.d like this: + +``` +$ cp ./contrib/completions/bash/kubectl /etc/bash_completion.d/ +``` + +but then you have to update it when you update kubectl. + +### Getting started with your cluster + +#### Inspect your cluster + +Once `kubectl` is in your path, you can use it to look at your cluster. E.g., running: + +```shell +$ kubectl get --all-namespaces services +``` + +should show a set of [services](/{{page.version}}/docs/user-guide/services) that look something like this: + +```shell +NAMESPACE NAME CLUSTER_IP EXTERNAL_IP PORT(S) SELECTOR AGE +default kubernetes 10.0.0.1 443/TCP 1d +kube-system kube-dns 10.0.0.2 53/TCP,53/UDP k8s-app=kube-dns 1d +kube-system kube-ui 10.0.0.3 80/TCP k8s-app=kube-ui 1d +... +``` + +Similarly, you can take a look at the set of [pods](/{{page.version}}/docs/user-guide/pods) that were created during cluster startup. +You can do this via the + +```shell +$ kubectl get --all-namespaces pods +``` + +command. + +You'll see a list of pods that looks something like this (the name specifics will be different): + +```shell +NAMESPACE NAME READY STATUS RESTARTS AGE +kube-system fluentd-cloud-logging-kubernetes-minion-63uo 1/1 Running 0 14m +kube-system fluentd-cloud-logging-kubernetes-minion-c1n9 1/1 Running 0 14m +kube-system fluentd-cloud-logging-kubernetes-minion-c4og 1/1 Running 0 14m +kube-system fluentd-cloud-logging-kubernetes-minion-ngua 1/1 Running 0 14m +kube-system kube-dns-v5-7ztia 3/3 Running 0 15m +kube-system kube-ui-v1-curt1 1/1 Running 0 15m +kube-system monitoring-heapster-v5-ex4u3 1/1 Running 1 15m +kube-system monitoring-influx-grafana-v1-piled 2/2 Running 0 15m +``` + +Some of the pods may take a few seconds to start up (during this time they'll show `Pending`), but check that they all show as `Running` after a short period. + +#### Run some examples + +Then, see [a simple nginx example](/{{page.version}}/docs/user-guide/simple-nginx) to try out your new cluster. + +For more complete applications, please look in the [examples directory](https://github.com/kubernetes/kubernetes/tree/{{page.githubbranch}}/examples/). The [guestbook example](https://github.com/kubernetes/kubernetes/tree/{{page.githubbranch}}/examples/guestbook/) is a good "getting started" walkthrough. + +### Tearing down the cluster + +To remove/delete/teardown the cluster, use the `kube-down.sh` script. + +```shell +cd kubernetes +cluster/kube-down.sh +``` + +Likewise, the `kube-up.sh` in the same directory will bring it back up. You do not need to rerun the `curl` or `wget` command: everything needed to setup the Kubernetes cluster is now on your workstation. + +### Customizing + +The script above relies on Google Storage to stage the Kubernetes release. It +then will start (by default) a single master VM along with 4 worker VMs. You +can tweak some of these parameters by editing `kubernetes/cluster/gce/config-default.sh` +You can view a transcript of a successful cluster creation +[here](https://gist.github.com/satnam6502/fc689d1b46db9772adea). + +### Troubleshooting + +#### Project settings + +You need to have the Google Cloud Storage API, and the Google Cloud Storage +JSON API enabled. It is activated by default for new projects. Otherwise, it +can be done in the Google Cloud Console. See the [Google Cloud Storage JSON +API Overview](https://cloud.google.com/storage/docs/json_api/) for more +details. + +Also ensure that-- as listed in the [Prerequsites section](#prerequisites)-- you've enabled the `Compute Engine Instance Group Manager API`, and can start up a GCE VM from the command line as in the [GCE Quickstart](https://cloud.google.com/compute/docs/quickstart) instructions. + +#### Cluster initialization hang + +If the Kubernetes startup script hangs waiting for the API to be reachable, you can troubleshoot by SSHing into the master and node VMs and looking at logs such as `/var/log/startupscript.log`. + +**Once you fix the issue, you should run `kube-down.sh` to cleanup** after the partial cluster creation, before running `kube-up.sh` to try again. + +#### SSH + +If you're having trouble SSHing into your instances, ensure the GCE firewall +isn't blocking port 22 to your VMs. By default, this should work but if you +have edited firewall rules or created a new non-default network, you'll need to +expose it: `gcloud compute firewall-rules create default-ssh --network= +--description "SSH allowed from anywhere" --allow tcp:22` + +Additionally, your GCE SSH key must either have no passcode or you need to be +using `ssh-agent`. + +#### Networking + +The instances must be able to connect to each other using their private IP. The +script uses the "default" network which should have a firewall rule called +"default-allow-internal" which allows traffic on any port on the private IPs. +If this rule is missing from the default network or if you change the network +being used in `cluster/config-default.sh` create a new rule with the following +field values: + +* Source Ranges: `10.0.0.0/8` +* Allowed Protocols and Port: `tcp:1-65535;udp:1-65535;icmp` \ No newline at end of file diff --git a/_includes/docs/docs/getting-started-guides/index.md b/_includes/docs/docs/getting-started-guides/index.md new file mode 100644 index 0000000000..188b76a094 --- /dev/null +++ b/_includes/docs/docs/getting-started-guides/index.md @@ -0,0 +1,175 @@ + +Kubernetes can run on a range of platforms, from your laptop, to VMs on a cloud provider, to rack of +bare metal servers. The effort required to set up a cluster varies from running a single command to +crafting your own customized cluster. We'll guide you in picking a solution that fits for your needs. + +If you just want to "kick the tires" on Kubernetes, we recommend the [local Docker-based](/{{page.version}}/docs/getting-started-guides/docker) solution. + +The local Docker-based solution is one of several [Local cluster](#local-machine-solutions) solutions +that are quick to set up, but are limited to running on one machine. + +When you are ready to scale up to more machines and higher availability, a [Hosted](#hosted-solutions) +solution is the easiest to create and maintain. + +[Turn-key cloud solutions](#turn-key-cloud-solutions) require only a few commands to create +and cover a wider range of cloud providers. + +[Custom solutions](#custom-solutions) require more effort to setup but cover and even +they vary from step-by-step instructions to general advice for setting up +a Kubernetes cluster from scratch. + +* TOC +{:toc} + +### Local-machine Solutions + +Local-machine solutions create a single cluster with one or more Kubernetes nodes on a single +physical machine. Setup is completely automated and doesn't require a cloud provider account. +But their size and availability is limited to that of a single machine. + +The local-machine solutions are: + +- [Local Docker-based](/{{page.version}}/docs/getting-started-guides/docker) (recommended starting point) +- [Vagrant](/{{page.version}}/docs/getting-started-guides/vagrant) (works on any platform with Vagrant: Linux, MacOS, or Windows.) +- [No-VM local cluster](/{{page.version}}/docs/getting-started-guides/locally) (Linux only) + + +### Hosted Solutions + +[Google Container Engine](https://cloud.google.com/container-engine) offers managed Kubernetes +clusters. + +### Turn-key Cloud Solutions + +These solutions allow you to create Kubernetes clusters on a range of Cloud IaaS providers with only a +few commands, and have active community support. + +- [GCE](/{{page.version}}/docs/getting-started-guides/gce) +- [AWS](/{{page.version}}/docs/getting-started-guides/aws) +- [Azure](/{{page.version}}/docs/getting-started-guides/coreos/azure/) + +### Custom Solutions + +Kubernetes can run on a wide range of Cloud providers and bare-metal environments, and with many +base operating systems. + +If you can find a guide below that matches your needs, use it. It may be a little out of date, but +it will be easier than starting from scratch. If you do want to start from scratch because you +have special requirements or just because you want to understand what is underneath a Kubernetes +cluster, try the [Getting Started from Scratch](/{{page.version}}/docs/getting-started-guides/scratch) guide. + +If you are interested in supporting Kubernetes on a new platform, check out our [advice for +writing a new solution](/{{page.version}}/docs/devel/writing-a-getting-started-guide). + +#### Cloud + +These solutions are combinations of cloud provider and OS not covered by the above solutions. + +- [AWS + coreos](/{{page.version}}/docs/getting-started-guides/coreos) +- [GCE + CoreOS](/{{page.version}}/docs/getting-started-guides/coreos) +- [AWS + Ubuntu](/{{page.version}}/docs/getting-started-guides/juju) +- [Joyent + Ubuntu](/{{page.version}}/docs/getting-started-guides/juju) +- [Rackspace + CoreOS](/{{page.version}}/docs/getting-started-guides/rackspace) + +#### On-Premises VMs + +- [Vagrant](/{{page.version}}/docs/getting-started-guides/coreos) (uses CoreOS and flannel) +- [CloudStack](/{{page.version}}/docs/getting-started-guides/cloudstack) (uses Ansible, CoreOS and flannel) +- [Vmware](/{{page.version}}/docs/getting-started-guides/vsphere) (uses Debian) +- [juju.md](/{{page.version}}/docs/getting-started-guides/juju) (uses Juju, Ubuntu and flannel) +- [Vmware](/{{page.version}}/docs/getting-started-guides/coreos) (uses CoreOS and flannel) +- [libvirt-coreos.md](/{{page.version}}/docs/getting-started-guides/libvirt-coreos) (uses CoreOS) +- [oVirt](/{{page.version}}/docs/getting-started-guides/ovirt) +- [libvirt](/{{page.version}}/docs/getting-started-guides/fedora/flannel_multi_node_cluster) (uses Fedora and flannel) +- [KVM](/{{page.version}}/docs/getting-started-guides/fedora/flannel_multi_node_cluster) (uses Fedora and flannel) + +#### Bare Metal + +- [Offline](/{{page.version}}/docs/getting-started-guides/coreos/bare_metal_offline) (no internet required. Uses CoreOS and Flannel) +- [fedora/fedora_ansible_config.md](/{{page.version}}/docs/getting-started-guides/fedora/fedora_ansible_config) +- [Fedora single node](/{{page.version}}/docs/getting-started-guides/fedora/fedora_manual_config) +- [Fedora multi node](/{{page.version}}/docs/getting-started-guides/fedora/flannel_multi_node_cluster) +- [Centos](/{{page.version}}/docs/getting-started-guides/centos/centos_manual_config) +- [Ubuntu](/{{page.version}}/docs/getting-started-guides/ubuntu) +- [Docker Multi Node](/{{page.version}}/docs/getting-started-guides/docker-multinode) + +#### Integrations + +These solutions provide integration with 3rd party schedulers, resource managers, and/or lower level platforms. + +- [Kubernetes on Mesos](/{{page.version}}/docs/getting-started-guides/mesos) + - Instructions specify GCE, but are generic enough to be adapted to most existing Mesos clusters +- [Kubernetes on DCOS](/{{page.version}}/docs/getting-started-guides/dcos) + - Community Edition DCOS uses AWS + - Enterprise Edition DCOS supports cloud hosting, on-premise VMs, and bare metal + +## Table of Solutions + +Here are all the solutions mentioned above in table form. + +IaaS Provider | Config. Mgmt | OS | Networking | Docs | Conforms | Support Level +-------------------- | ------------ | ------ | ---------- | --------------------------------------------- | ---------| ---------------------------- +GKE | | | GCE | [docs](https://cloud.google.com/container-engine) | ['œ“][3] | Commercial +Vagrant | Saltstack | Fedora | flannel | [docs](/{{page.version}}/docs/getting-started-guides/vagrant) | ['œ“][2] | Project +GCE | Saltstack | Debian | GCE | [docs](/{{page.version}}/docs/getting-started-guides/gce) | ['œ“][1] | Project +Azure | CoreOS | CoreOS | Weave | [docs](/{{page.version}}/docs/getting-started-guides/coreos/azure/) | | Community ([@errordeveloper](https://github.com/errordeveloper), [@squillace](https://github.com/squillace), [@chanezon](https://github.com/chanezon), [@crossorigin](https://github.com/crossorigin)) +Docker Single Node | custom | N/A | local | [docs](/{{page.version}}/docs/getting-started-guides/docker) | | Project ([@brendandburns](https://github.com/brendandburns)) +Docker Multi Node | Flannel | N/A | local | [docs](/{{page.version}}/docs/getting-started-guides/docker-multinode) | | Project ([@brendandburns](https://github.com/brendandburns)) +Bare-metal | Ansible | Fedora | flannel | [docs](/{{page.version}}/docs/getting-started-guides/fedora/fedora_ansible_config) | | Project +Digital Ocean | custom | Fedora | Calico | [docs](/{{page.version}}/docs/getting-started-guides/fedora/fedora-calico) | | Community (@djosborne) +Bare-metal | custom | Fedora | _none_ | [docs](/{{page.version}}/docs/getting-started-guides/fedora/fedora_manual_config) | | Project +Bare-metal | custom | Fedora | flannel | [docs](/{{page.version}}/docs/getting-started-guides/fedora/flannel_multi_node_cluster) | | Community ([@aveshagarwal](https://github.com/aveshagarwal)) +libvirt | custom | Fedora | flannel | [docs](/{{page.version}}/docs/getting-started-guides/fedora/flannel_multi_node_cluster) | | Community ([@aveshagarwal](https://github.com/aveshagarwal)) +KVM | custom | Fedora | flannel | [docs](/{{page.version}}/docs/getting-started-guides/fedora/flannel_multi_node_cluster) | | Community ([@aveshagarwal](https://github.com/aveshagarwal)) +Mesos/Docker | custom | Ubuntu | Docker | [docs](/{{page.version}}/docs/getting-started-guides/mesos-docker) | | Community ([Kubernetes-Mesos Authors](https://github.com/mesosphere/kubernetes-mesos/blob/master/AUTHORS.md)) +Mesos/GCE | | | | [docs](/{{page.version}}/docs/getting-started-guides/mesos) | | Community ([Kubernetes-Mesos Authors](https://github.com/mesosphere/kubernetes-mesos/blob/master/AUTHORS.md)) +DCOS | Marathon | CoreOS/Alpine | custom | [docs](/{{page.version}}/docs/getting-started-guides/dcos) | | Community ([Kubernetes-Mesos Authors](https://github.com/mesosphere/kubernetes-mesos/blob/master/AUTHORS.md)) +AWS | CoreOS | CoreOS | flannel | [docs](/{{page.version}}/docs/getting-started-guides/coreos) | | Community +GCE | CoreOS | CoreOS | flannel | [docs](/{{page.version}}/docs/getting-started-guides/coreos) | | Community ([@pires](https://github.com/pires)) +Vagrant | CoreOS | CoreOS | flannel | [docs](/{{page.version}}/docs/getting-started-guides/coreos) | | Community ([@pires](https://github.com/pires), [@AntonioMeireles](https://github.com/AntonioMeireles)) +Bare-metal (Offline) | CoreOS | CoreOS | flannel | [docs](/{{page.version}}/docs/getting-started-guides/coreos/bare_metal_offline) | | Community ([@jeffbean](https://github.com/jeffbean)) +Bare-metal | CoreOS | CoreOS | Calico | [docs](/{{page.version}}/docs/getting-started-guides/coreos/bare_metal_calico) | | Community ([@caseydavenport](https://github.com/caseydavenport)) +CloudStack | Ansible | CoreOS | flannel | [docs](/{{page.version}}/docs/getting-started-guides/cloudstack) | | Community ([@runseb](https://github.com/runseb)) +Vmware | | Debian | OVS | [docs](/{{page.version}}/docs/getting-started-guides/vsphere) | | Community ([@pietern](https://github.com/pietern)) +Bare-metal | custom | CentOS | _none_ | [docs](/{{page.version}}/docs/getting-started-guides/centos/centos_manual_config) | | Community ([@coolsvap](https://github.com/coolsvap)) +AWS | Juju | Ubuntu | flannel | [docs](/{{page.version}}/docs/getting-started-guides/juju) | | [Community](https://github.com/whitmo/bundle-kubernetes) ( [@whit](https://github.com/whitmo), [@matt](https://github.com/mbruzek), [@chuck](https://github.com/chuckbutler) ) +OpenStack/HPCloud | Juju | Ubuntu | flannel | [docs](/{{page.version}}/docs/getting-started-guides/juju) | | [Community](https://github.com/whitmo/bundle-kubernetes) ( [@whit](https://github.com/whitmo), [@matt](https://github.com/mbruzek), [@chuck](https://github.com/chuckbutler) ) +Joyent | Juju | Ubuntu | flannel | [docs](/{{page.version}}/docs/getting-started-guides/juju) | | [Community](https://github.com/whitmo/bundle-kubernetes) ( [@whit](https://github.com/whitmo), [@matt](https://github.com/mbruzek), [@chuck](https://github.com/chuckbutler) ) +AWS | Saltstack | Ubuntu | OVS | [docs](/{{page.version}}/docs/getting-started-guides/aws) | | Community ([@justinsb](https://github.com/justinsb)) +Bare-metal | custom | Ubuntu | Calico | [docs](/{{page.version}}/docs/getting-started-guides/ubuntu-calico) | | Community ([@djosborne](https://github.com/djosborne)) +Bare-metal | custom | Ubuntu | flannel | [docs](/{{page.version}}/docs/getting-started-guides/ubuntu) | | Community ([@resouer](https://github.com/resouer), [@WIZARD-CXY](https://github.com/WIZARD-CXY)) +Local | | | _none_ | [docs](/{{page.version}}/docs/getting-started-guides/locally) | | Community ([@preillyme](https://github.com/preillyme)) +libvirt/KVM | CoreOS | CoreOS | libvirt/KVM | [docs](/{{page.version}}/docs/getting-started-guides/libvirt-coreos) | | Community ([@lhuard1A](https://github.com/lhuard1A)) +oVirt | | | | [docs](/{{page.version}}/docs/getting-started-guides/ovirt) | | Community ([@simon3z](https://github.com/simon3z)) +Rackspace | CoreOS | CoreOS | flannel | [docs](/{{page.version}}/docs/getting-started-guides/rackspace) | | Community ([@doublerr](https://github.com/doublerr)) +any | any | any | any | [docs](/{{page.version}}/docs/getting-started-guides/scratch) | | Community ([@erictune](https://github.com/erictune)) + + +*Note*: The above table is ordered by version test/used in notes followed by support level. + +Definition of columns: + +- **IaaS Provider** is who/what provides the virtual or physical machines (nodes) that Kubernetes runs on. +- **OS** is the base operating system of the nodes. +- **Config. Mgmt** is the configuration management system that helps install and maintain Kubernetes software on the + nodes. +- **Networking** is what implements the [networking model](/{{page.version}}/docs/admin/networking). Those with networking type + _none_ may not support more than one node, or may support multiple VM nodes only in the same physical node. +- **Conformance** indicates whether a cluster created with this configuration has passed the project's conformance + tests for supporting the API and base features of Kubernetes v1.0.0. +- Support Levels + - **Project**: Kubernetes Committers regularly use this configuration, so it usually works with the latest release + of Kubernetes. + - **Commercial**: A commercial offering with its own support arrangements. + - **Community**: Actively supported by community contributions. May not work with more recent releases of Kubernetes. + - **Inactive**: No active maintainer. Not recommended for first-time Kubernetes users, and may be deleted soon. +- **Notes** is relevant information such as the version of Kubernetes used. + + + + +[1]: https://gist.github.com/erictune/4cabc010906afbcc5061 + +[2]: https://gist.github.com/derekwaynecarr/505e56036cdf010bf6b6 + +[3]: https://gist.github.com/erictune/2f39b22f72565365e59b diff --git a/_includes/docs/docs/getting-started-guides/juju.md b/_includes/docs/docs/getting-started-guides/juju.md new file mode 100644 index 0000000000..b32bd308db --- /dev/null +++ b/_includes/docs/docs/getting-started-guides/juju.md @@ -0,0 +1,248 @@ + +[Juju](https://jujucharms.com/docs/stable/about-juju) makes it easy to deploy +Kubernetes by provisioning, installing and configuring all the systems in +the cluster. Once deployed the cluster can easily scale up with one command +to increase the cluster size. + +* TOC +{:toc} + +## Prerequisites + +> Note: If you're running kube-up, on Ubuntu - all of the dependencies +> will be handled for you. You may safely skip to the section: +> [Launch Kubernetes Cluster](#launch-kubernetes-cluster) + +### On Ubuntu + +[Install the Juju client](https://jujucharms.com/get-started) on your +local Ubuntu system: + +```shell +sudo add-apt-repository ppa:juju/stable +sudo apt-get update +sudo apt-get install juju-core juju-quickstart +``` + +### With Docker + +If you are not using Ubuntu or prefer the isolation of Docker, you may +run the following: + +```shell +mkdir ~/.juju +sudo docker run -v ~/.juju:/home/ubuntu/.juju -ti jujusolutions/jujubox:latest +``` + +At this point from either path you will have access to the `juju +quickstart` command. + +To set up the credentials for your chosen cloud run: + +```shell +juju quickstart --constraints="mem=3.75G" -i +``` + +> The `constraints` flag is optional, it changes the size of virtual machines +> that Juju will generate when it requests a new machine. Larger machines +> will run faster but cost more money than smaller machines. + +Follow the dialogue and choose `save` and `use`. Quickstart will now +bootstrap the juju root node and setup the juju web based user +interface. + + +## Launch Kubernetes cluster + +You will need to export the `KUBERNETES_PROVIDER` environment variable before +bringing up the cluster. + +```shell +export KUBERNETES_PROVIDER=juju +cluster/kube-up.sh +``` + +If this is your first time running the `kube-up.sh` script, it will install +the required dependencies to get started with Juju, additionally it will +launch a curses based configuration utility allowing you to select your cloud +provider and enter the proper access credentials. + +Next it will deploy the kubernetes master, etcd, 2 nodes with flannel based +Software Defined Networking (SDN) so containers on different hosts can +communicate with each other. + + +## Exploring the cluster + +The `juju status` command provides information about each unit in the cluster: + +```shell +$ juju status --format=oneline +- docker/0: 52.4.92.78 (started) + - flannel-docker/0: 52.4.92.78 (started) + - kubernetes/0: 52.4.92.78 (started) +- docker/1: 52.6.104.142 (started) + - flannel-docker/1: 52.6.104.142 (started) + - kubernetes/1: 52.6.104.142 (started) +- etcd/0: 52.5.216.210 (started) 4001/tcp +- juju-gui/0: 52.5.205.174 (started) 80/tcp, 443/tcp +- kubernetes-master/0: 52.6.19.238 (started) 8080/tcp +``` + +You can use `juju ssh` to access any of the units: + +```shell +juju ssh kubernetes-master/0 +``` + +## Run some containers! + +`kubectl` is available on the Kubernetes master node. We'll ssh in to +launch some containers, but one could use `kubectl` locally by setting +`KUBERNETES_MASTER` to point at the ip address of "kubernetes-master/0". + +No pods will be available before starting a container: + +```shell +kubectl get pods +NAME READY STATUSRESTARTS AGE + +kubectl get replicationcontrollers +CONTROLLER CONTAINER(S) IMAGE(S) SELECTOR REPLICAS +``` + +We'll follow the aws-coreos example. Create a pod manifest: `pod.json` + +```json +{ + "apiVersion": "v1", + "kind": "Pod", + "metadata": { +"name": "hello", +"labels": { + "name": "hello", + "environment": "testing" +} + }, + "spec": { +"containers": [{ + "name": "hello", + "image": "quay.io/kelseyhightower/hello", + "ports": [{ +"containerPort": 80, +"hostPort": 80 + }] +}] + } +} +``` + +Create the pod with kubectl: + +```shell +kubectl create -f pod.json +``` + +Get info on the pod: + +```shell +kubectl get pods +``` + +To test the hello app, we need to locate which node is hosting +the container. Better tooling for using Juju to introspect container +is in the works but we can use `juju run` and `juju status` to find +our hello app. + +Exit out of our ssh session and run: + +```shell +juju run --unit kubernetes/0 "docker ps -n=1" +... +juju run --unit kubernetes/1 "docker ps -n=1" +CONTAINER IDIMAGE COMMAND CREATED STATUS PORTS NAMES +02beb61339d8quay.io/kelseyhightower/hello:latest /hello About an hour ago Up About an hourk8s_hello.... +``` + +We see "kubernetes/1" has our container, we can open port 80: + +```shell +juju run --unit kubernetes/1 "open-port 80" +juju expose kubernetes +sudo apt-get install curl +curl $(juju status --format=oneline kubernetes/1 | cut -d' ' -f3) +``` + +Finally delete the pod: + +```shell +juju ssh kubernetes-master/0 +kubectl delete pods hello +``` + +## Scale out cluster + +We can add node units like so: + +```shell +juju add-unit docker # creates unit docker/2, kubernetes/2, docker-flannel/2 +``` + +## Launch the "k8petstore" example app + +The [k8petstore example](https://github.com/kubernetes/kubernetes/tree/{{page.githubbranch}}/examples/k8petstore/) is available as a +[juju action](https://jujucharms.com/docs/devel/actions). + +```shell +juju action do kubernetes-master/0 +``` + +> Note: this example includes curl statements to exercise the app, which +> automatically generates "petstore" transactions written to redis, and allows +> you to visualize the throughput in your browser. + +## Tear down cluster + +```shell +./kube-down.sh +``` + +or destroy your current Juju environment (using the `juju env` command): + +```shell +juju destroy-environment --force `juju env` +``` + + +## More Info + +The Kubernetes charms and bundles can be found in the `kubernetes` project on +github.com: + + - [Bundle Repository](http://releases.k8s.io/{{page.githubbranch}}/cluster/juju/bundles) + * [Kubernetes master charm](https://releases.k8s.io/{{page.githubbranch}}/cluster/juju/charms/trusty/kubernetes-master) + * [Kubernetes node charm](https://releases.k8s.io/{{page.githubbranch}}/cluster/juju/charms/trusty/kubernetes) + - [More about Juju](https://jujucharms.com) + + +### Cloud compatibility + +Juju runs natively against a variety of public cloud providers. Juju currently +works with: + +- [Amazon Web Service](https://jujucharms.com/docs/stable/config-aws) +- [Windows Azure](https://jujucharms.com/docs/stable/config-azure) +- [DigitalOcean](https://jujucharms.com/docs/stable/config-digitalocean) +- [Google Compute Engine](https://jujucharms.com/docs/stable/config-gce) +- [HP Public Cloud](https://jujucharms.com/docs/stable/config-hpcloud) +- [Joyent](https://jujucharms.com/docs/stable/config-joyent) +- [LXC](https://jujucharms.com/docs/stable/config-LXC) +- Any [OpenStack](https://jujucharms.com/docs/stable/config-openstack) deployment +- [Vagrant](https://jujucharms.com/docs/stable/config-vagrant) +- [Vmware vSphere](https://jujucharms.com/docs/stable/config-vmware) + +If you do not see your favorite cloud provider listed many clouds can be +configured for [manual provisioning](https://jujucharms.com/docs/stable/config-manual). + +The Kubernetes bundle has been tested on GCE and AWS and found to work with +version 1.0.0. \ No newline at end of file diff --git a/_includes/docs/docs/getting-started-guides/libvirt-coreos.md b/_includes/docs/docs/getting-started-guides/libvirt-coreos.md new file mode 100644 index 0000000000..d5a9e08b91 --- /dev/null +++ b/_includes/docs/docs/getting-started-guides/libvirt-coreos.md @@ -0,0 +1,291 @@ + +* TOC +{:toc} + +### Highlights + +* Super-fast cluster boot-up (few seconds instead of several minutes for vagrant) +* Reduced disk usage thanks to [COW](https://en.wikibooks.org/wiki/QEMU/Images#Copy_on_write) +* Reduced memory footprint thanks to [KSM](https://www.kernel.org/doc/Documentation/vm/ksm.txt) + +### Warnings about `libvirt-coreos` use case + +The primary goal of the `libvirt-coreos` cluster provider is to deploy a multi-node Kubernetes cluster on local VMs as fast as possible and to be as light as possible in term of resources used. + +In order to achieve that goal, its deployment is very different from the 'standard production deployment'? method used on other providers. This was done on purpose in order to implement some optimizations made possible by the fact that we know that all VMs will be running on the same physical machine. + +The `libvirt-coreos` cluster provider doesn't aim at being production look-alike. + +Another difference is that no security is enforced on `libvirt-coreos` at all. For example, + +* Kube API server is reachable via a clear-text connection (no SSL); +* Kube API server requires no credentials; +* etcd access is not protected; +* Kubernetes secrets are not protected as securely as they are on production environments; +* etc. + +So, an k8s application developer should not validate its interaction with Kubernetes on `libvirt-coreos` because he might technically succeed in doing things that are prohibited on a production environment like: + +* un-authenticated access to Kube API server; +* Access to Kubernetes private data structures inside etcd; +* etc. + +On the other hand, `libvirt-coreos` might be useful for people investigating low level implementation of Kubernetes because debugging techniques like sniffing the network traffic or introspecting the etcd content are easier on `libvirt-coreos` than on a production deployment. + +### Prerequisites + +1. Install [dnsmasq](http://www.thekelleys.org.uk/dnsmasq/doc) +2. Install [ebtables](http://ebtables.netfilter.org/) +3. Install [qemu](http://wiki.qemu.org/Main_Page) +4. Install [libvirt](http://libvirt.org/) +5. Enable and start the libvirt daemon, e.g: + * ``systemctl enable libvirtd`` + * ``systemctl start libvirtd`` +6. [Grant libvirt access to your user¹](https://libvirt.org/aclpolkit) +7. Check that your $HOME is accessible to the qemu user² + +#### ¹ Depending on your distribution, libvirt access may be denied by default or may require a password at each access. + +You can test it with the following command: + +```shell +virsh -c qemu:///system pool-list +``` + +If you have access error messages, please read https://libvirt.org/acl.html and https://libvirt.org/aclpolkit.html . + +In short, if your libvirt has been compiled with Polkit support (ex: Arch, Fedora 21), you can create `/etc/polkit-1/rules.d/50-org.libvirt.unix.manage.rules` as follows to grant full access to libvirt to `$USER` + +```shell +sudo /bin/sh -c "cat - > /etc/polkit-1/rules.d/50-org.libvirt.unix.manage.rules" << EOF +``` + +```conf +polkit.addRule(function(action, subject) { + if (action.id == "org.libvirt.unix.manage" && + subject.user == "$USER") { + return polkit.Result.YES; + polkit.log("action=" + action); + polkit.log("subject=" + subject); + } +}); +EOF +``` + +If your libvirt has not been compiled with Polkit (ex: Ubuntu 14.04.1 LTS), check the permissions on the libvirt unix socket: + +```shell +$ ls -l /var/run/libvirt/libvirt-sock +srwxrwx--- 1 root libvirtd 0 févr. 12 16:03 /var/run/libvirt/libvirt-sock + +$ usermod -a -G libvirtd $USER +# $USER needs to logout/login to have the new group be taken into account +``` + +(Replace `$USER` with your login name) + +#### ² Qemu will run with a specific user. It must have access to the VMs drives + +All the disk drive resources needed by the VM (CoreOS disk image, Kubernetes binaries, cloud-init files, etc.) are put inside `./cluster/libvirt-coreos/libvirt_storage_pool`. + +As we're using the `qemu:///system` instance of libvirt, qemu will run with a specific `user:group` distinct from your user. It is configured in `/etc/libvirt/qemu.conf`. That qemu user must have access to that libvirt storage pool. + +If your `$HOME` is world readable, everything is fine. If your $HOME is private, `cluster/kube-up.sh` will fail with an error message like: + +```shell +error: Cannot access storage file '$HOME/.../kubernetes/cluster/libvirt-coreos/libvirt_storage_pool/kubernetes_master.img' (as uid:99, gid:78): Permission denied +``` + +In order to fix that issue, you have several possibilities: + +* set `POOL_PATH` inside `cluster/libvirt-coreos/config-default.sh` to a directory: + * backed by a filesystem with a lot of free disk space + * writable by your user; + * accessible by the qemu user. +* Grant the qemu user access to the storage pool. + +On Arch: + +```shell +setfacl -m g:kvm:--x ~ +``` + +### Setup + +By default, the libvirt-coreos setup will create a single Kubernetes master and 3 Kubernetes nodes. Because the VM drives use Copy-on-Write and because of memory ballooning and KSM, there is a lot of resource over-allocation. + +To start your local cluster, open a shell and run: + +```shell +cd kubernetes + +export KUBERNETES_PROVIDER=libvirt-coreos +cluster/kube-up.sh +``` + +The `KUBERNETES_PROVIDER` environment variable tells all of the various cluster management scripts which variant to use. If you forget to set this, the assumption is you are running on Google Compute Engine. + +The `NUM_MINIONS` environment variable may be set to specify the number of nodes to start. If it is not set, the number of nodes defaults to 3. + +The `KUBE_PUSH` environment variable may be set to specify which Kubernetes binaries must be deployed on the cluster. Its possible values are: + +* `release` (default if `KUBE_PUSH` is not set) will deploy the binaries of `_output/release-tars/kubernetes-server-'|.tar.gz`. This is built with `make release` or `make release-skip-tests`. +* `local` will deploy the binaries of `_output/local/go/bin`. These are built with `make`. + +You can check that your machines are there and running with: + +```shell +$ virsh -c qemu:///system list + Id Name State +---------------------------------------------------- + 15 kubernetes_master running + 16 kubernetes_minion-01 running + 17 kubernetes_minion-02 running + 18 kubernetes_minion-03 running +``` + +You can check that the Kubernetes cluster is working with: + +```shell +$ kubectl get nodes +NAME LABELS STATUS +192.168.10.2 Ready +192.168.10.3 Ready +192.168.10.4 Ready +``` + +The VMs are running [CoreOS](https://coreos.com/). +Your ssh keys have already been pushed to the VM. (It looks for ~/.ssh/id_*.pub) +The user to use to connect to the VM is `core`. +The IP to connect to the master is 192.168.10.1. +The IPs to connect to the nodes are 192.168.10.2 and onwards. + +Connect to `kubernetes_master`: + +```shell +ssh core@192.168.10.1 +``` + +Connect to `kubernetes_minion-01`: + +```shell +ssh core@192.168.10.2 +``` + +### Interacting with your Kubernetes cluster with the `kube-*` scripts. + +All of the following commands assume you have set `KUBERNETES_PROVIDER` appropriately: + +```shell +export KUBERNETES_PROVIDER=libvirt-coreos +``` + +Bring up a libvirt-CoreOS cluster of 5 nodes + +```shell +NUM_MINIONS=5 cluster/kube-up.sh +``` + +Destroy the libvirt-CoreOS cluster + +```shell +cluster/kube-down.sh +``` + +Update the libvirt-CoreOS cluster with a new Kubernetes release produced by `make release` or `make release-skip-tests`: + +```shell +cluster/kube-push.sh +``` + +Update the libvirt-CoreOS cluster with the locally built Kubernetes binaries produced by `make`: + +```shell +KUBE_PUSH=local cluster/kube-push.sh +``` + +Interact with the cluster + +```shell +kubectl ... +``` + +### Troubleshooting + +#### !!! Cannot find kubernetes-server-linux-amd64.tar.gz + +Build the release tarballs: + +```shell +make release +``` + +#### Can't find virsh in PATH, please fix and retry. + +Install libvirt + +On Arch: + +```shell +pacman -S qemu libvirt +``` + +On Ubuntu 14.04.1: + +```shell +aptitude install qemu-system-x86 libvirt-bin +``` + +On Fedora 21: + +```shell +yum install qemu libvirt +``` + +#### error: Failed to connect socket to '/var/run/libvirt/libvirt-sock': No such file or directory + +Start the libvirt daemon + +On Arch: + +```shell +systemctl start libvirtd +``` + +On Ubuntu 14.04.1: + +```shell +service libvirt-bin start +``` + +#### error: Failed to connect socket to '/var/run/libvirt/libvirt-sock': Permission denied + +Fix libvirt access permission (Remember to adapt `$USER`) + +On Arch and Fedora 21: + +```shell +cat > /etc/polkit-1/rules.d/50-org.libvirt.unix.manage.rules < + +```yaml +apiVersion: v1 +kind: Pod +metadata: + name: counter +spec: + containers: + - name: count + image: ubuntu:14.04 + args: [bash, -c, + 'for ((i = 0; ; i++)); do echo "$i: $(date)"; sleep 1; done'] +``` + +[Download example](https://github.com/kubernetes/kubernetes/tree/{{page.githubbranch}}/examples/blog-logging/counter-pod.yaml) + + +This pod specification has one container which runs a bash script when the container is born. This script simply writes out the value of a counter and the date once per second and runs indefinitely. Let's create the pod in the default +namespace. + +```shell +$ kubectl create -f examples/blog-logging/counter-pod.yaml + pods/counter +``` + +We can observe the running pod: + +```shell +$ kubectl get pods +NAME READY STATUS RESTARTS AGE +counter 1/1 Running 0 5m +``` + +This step may take a few minutes to download the ubuntu:14.04 image during which the pod status will be shown as `Pending`. + +One of the nodes is now running the counter pod: + +![Counter Pod](https://github.com/kubernetes/kubernetes/tree/{{page.githubbranch}}/examples/blog-logging/diagrams/27gf-counter.png) + +When the pod status changes to `Running` we can use the kubectl logs command to view the output of this counter pod. + +```shell +$ kubectl logs counter +0: Tue Jun 2 21:37:31 UTC 2015 +1: Tue Jun 2 21:37:32 UTC 2015 +2: Tue Jun 2 21:37:33 UTC 2015 +3: Tue Jun 2 21:37:34 UTC 2015 +4: Tue Jun 2 21:37:35 UTC 2015 +5: Tue Jun 2 21:37:36 UTC 2015 +... +``` + +This command fetches the log text from the Docker log file for the image that is running in this container. We can connect to the running container and observe the running counter bash script. + +```shell +$ kubectl exec -i counter bash +ps aux +USER PID %CPU %MEM VSZ RSS TTY STAT START TIME COMMAND +root 1 0.0 0.0 17976 2888 ? Ss 00:02 0:00 bash -c for ((i = 0; ; i++)); do echo "$i: $(date)"; sleep 1; done +root 468 0.0 0.0 17968 2904 ? Ss 00:05 0:00 bash +root 479 0.0 0.0 4348 812 ? S 00:05 0:00 sleep 1 +root 480 0.0 0.0 15572 2212 ? R 00:05 0:00 ps aux +``` + +What happens if for any reason the image in this pod is killed off and then restarted by Kubernetes? Will we still see the log lines from the previous invocation of the container followed by the log lines for the started container? Or will we lose the log lines from the original container's execution and only see the log lines for the new container? Let's find out. First let's stop the currently running counter. + +```shell +$ kubectl stop pod counter +pods/counter +``` + +Now let's restart the counter. + +```shell +$ kubectl create -f examples/blog-logging/counter-pod.yaml +pods/counter +``` + +Let's wait for the container to restart and get the log lines again. + +```shell +$ kubectl logs counter +0: Tue Jun 2 21:51:40 UTC 2015 +1: Tue Jun 2 21:51:41 UTC 2015 +2: Tue Jun 2 21:51:42 UTC 2015 +3: Tue Jun 2 21:51:43 UTC 2015 +4: Tue Jun 2 21:51:44 UTC 2015 +5: Tue Jun 2 21:51:45 UTC 2015 +6: Tue Jun 2 21:51:46 UTC 2015 +7: Tue Jun 2 21:51:47 UTC 2015 +8: Tue Jun 2 21:51:48 UTC 2015 +``` + +We've lost the log lines from the first invocation of the container in this pod! Ideally, we want to preserve all the log lines from each invocation of each container in the pod. Furthermore, even if the pod is restarted we would still like to preserve all the log lines that were ever emitted by the containers in the pod. But don't fear, this is the functionality provided by cluster level logging in Kubernetes. When a cluster is created, the standard output and standard error output of each container can be ingested using a [Fluentd](http://www.fluentd.org/) agent running on each node into either [Google Cloud Logging](https://cloud.google.com/logging/docs/) or into Elasticsearch and viewed with Kibana. + +When a Kubernetes cluster is created with logging to Google Cloud Logging enabled, the system creates a pod called `fluentd-cloud-logging` on each node of the cluster to collect Docker container logs. These pods were shown at the start of this blog article in the response to the first get pods command. + +This log collection pod has a specification which looks something like this: + + + +```yaml +apiVersion: v1 +kind: Pod +metadata: + name: fluentd-cloud-logging + namespace: kube-system +spec: + containers: + - name: fluentd-cloud-logging + image: gcr.io/google_containers/fluentd-gcp:1.14 + resources: + limits: + cpu: 100m + memory: 200Mi + env: + - name: FLUENTD_ARGS + value: -q + volumeMounts: + - name: varlog + mountPath: /var/log + - name: varlibdockercontainers + mountPath: /var/lib/docker/containers + readOnly: true + terminationGracePeriodSeconds: 30 + volumes: + - name: varlog + hostPath: + path: /var/log + - name: varlibdockercontainers + hostPath: + path: /var/lib/docker/containers +``` + +[Download example](https://releases.k8s.io/{{page.githubbranch}}/cluster/saltbase/salt/fluentd-gcp/fluentd-gcp.yaml) + + +This pod specification maps the directory on the host containing the Docker log files, `/var/lib/docker/containers`, to a directory inside the container which has the same path. The pod runs one image, `gcr.io/google_containers/fluentd-gcp:1.6`, which is configured to collect the Docker log files from the logs directory and ingest them into Google Cloud Logging. One instance of this pod runs on each node of the cluster. Kubernetes will notice if this pod fails and automatically restart it. + +We can click on the Logs item under the Monitoring section of the Google Developer Console and select the logs for the counter container, which will be called kubernetes.counter_default_count. This identifies the name of the pod (counter), the namespace (default) and the name of the container (count) for which the log collection occurred. Using this name we can select just the logs for our counter container from the drop down menu: + +![Cloud Logging Console](/images/docs/cloud-logging-console.png) + +When we view the logs in the Developer Console we observe the logs for both invocations of the container. + +![Both Logs](/images/docs/all-lines.png) + +Note the first container counted to 108 and then it was terminated. When the next container image restarted the counting process resumed from 0. Similarly if we deleted the pod and restarted it we would capture the logs for all instances of the containers in the pod whenever the pod was running. + + Logs ingested into Google Cloud Logging may be exported to various other destinations including [Google Cloud Storage](https://cloud.google.com/storage/) buckets and [BigQuery](https://cloud.google.com/bigquery/). Use the Exports tab in the Cloud Logging console to specify where logs should be streamed to. You can also follow this link to the + [settings tab](https://pantheon.corp.google.com/project/_/logs/settings). + + We could query the ingested logs from BigQuery using the SQL query which reports the counter log lines showing the newest lines first: + +```shell +SELECT metadata.timestamp, structPayload.log + FROM [mylogs.kubernetes_counter_default_count_20150611] + ORDER BY metadata.timestamp DESC +``` + +Here is some sample output: + +![BigQuery](/images/docs/bigquery-logging.png) + +We could also fetch the logs from Google Cloud Storage buckets to our desktop or laptop and then search them locally. The following command fetches logs for the counter pod running in a cluster which is itself in a Compute Engine project called `myproject`. Only logs for the date 2015-06-11 are fetched. + + +```shell +$ gsutil -m cp -r gs://myproject/kubernetes.counter_default_count/2015/06/11 . +``` + +Now we can run queries over the ingested logs. The example below uses the [jq](http://stedolan.github.io/jq/) program to extract just the log lines. + +```shell +$ cat 21\:00\:00_21\:59\:59_S0.json | jq '.structPayload.log' +"0: Thu Jun 11 21:39:38 UTC 2015\n" +"1: Thu Jun 11 21:39:39 UTC 2015\n" +"2: Thu Jun 11 21:39:40 UTC 2015\n" +"3: Thu Jun 11 21:39:41 UTC 2015\n" +"4: Thu Jun 11 21:39:42 UTC 2015\n" +"5: Thu Jun 11 21:39:43 UTC 2015\n" +"6: Thu Jun 11 21:39:44 UTC 2015\n" +"7: Thu Jun 11 21:39:45 UTC 2015\n" +... +``` + +This page has touched briefly on the underlying mechanisms that support gathering cluster level logs on a Kubernetes deployment. The approach here only works for gathering the standard output and standard error output of the processes running in the pod's containers. To gather other logs that are stored in files one can use a sidecar container to gather the required files as described at the page [Collecting log files within containers with Fluentd](http://releases.k8s.io/{{page.githubbranch}}/contrib/logging/fluentd-sidecar-gcp/README.md) and sending them to the Google Cloud Logging service. + +Some of the material in this section also appears in the blog article [Cluster Level Logging with Kubernetes](http://blog.kubernetes.io/2015/06/cluster-level-logging-with-kubernetes) \ No newline at end of file diff --git a/_includes/docs/docs/getting-started-guides/meanstack.md b/_includes/docs/docs/getting-started-guides/meanstack.md new file mode 100644 index 0000000000..ec690a41b8 --- /dev/null +++ b/_includes/docs/docs/getting-started-guides/meanstack.md @@ -0,0 +1,442 @@ + +**By: Sandeep Dinesh** - _July 29, 2015_ + +![image](/images/docs/meanstack/image_0.png) + +In [a recent post](http://blog.sandeepdinesh.com/2015/07/running-mean-web-application-in-docker.html), I talked about running a MEAN stack with [Docker Containers.](http://docker.com/) + +Manually deploying Containers is all fine and dandy, but is rather fragile and clumsy. What happens if the app crashes? How can the app be updated? Rolled back? + +Thankfully, there is a system we can use to manage our containers in a cluster environment called Kubernetes. Even better, Google has a managed version of Kubernetes called [Google Container Engine](https://cloud.google.com/container-engine/) so you can get up and running in minutes. + +* TOC +{:toc} + +## The Basics of Using Kubernetes + +Before we jump in and start kube’ing it up, it’s important to understand some of the fundamentals of Kubernetes. + +* Containers: These are the Docker, rtk, AppC, or whatever Container you are running. You can think of these like subatomic particles; everything is made up of them, but you rarely (if ever) interact with them directly. +* Pods: Pods are the basic component of Kubernetes. They are a group of Containers that are scheduled, live, and die together. Why would you want to have a group of containers instead of just a single container? Let’s say you had a log processor, a web server, and a database. If you couldn't use Pods, you would have to bundle the log processor in the web server and database containers, and each time you updated one you would have to update the other. With Pods, you can just reuse the same log processor for both the web server and database. +* Replication Controllers: This is the management component of Kubernetes, and it’s pretty cool. You give it a set of Pods, tell it "I want three copies of this," and it creates those copies on your cluster. It will do its best to keep those copies always running, so if one crashes it will start another. +* Services: This is the other side to Replication Controllers. A service is the single point of contact for a group of Pods. For example, let’s say you have a Replication Controller that creates four copies of a web server pod. A Service will split the traffic to each of the four copies. Services are "permanent" while the pods behind them can come and go, so it’s a good idea to use Services. + + +## Step 1: Creating the Container + +In my previous post, I used off-the-shelf containers to keep things simple. + +I had a stock MongoDB container and a stock Node.js container. The Mongo container ran fine without any modification. However, I had to manually enter the Node container to pull and run the code. Obviously this isn't ideal in Kubernetes land, as you aren't supposed to log into your servers! + +Instead, you have to build a custom container that has the code already inside it and runs automatically. + +To do this, you need to use more Docker. Make sure you have the latest version installed for the rest of this tutorial. + +Getting the code: + +Before starting, let’s get some code to run. You can follow along on your personal machine or a Linux VM in the cloud. I recommend using Linux or a Linux VM; running Docker on Mac and Windows is outside the scope of this tutorial. + +```shell +$ git clone https://github.com/ijason/NodeJS-Sample-App.git app +$ mv app/EmployeeDB/* app/ +$ sed -i -- 's/localhost/mongo/g' ./app/app.js +``` + +This is the same sample app we ran before. The second line just moves everything from the `EmployeeDB` subfolder up into the app folder so it’s easier to access. The third line, once again, replaces the hardcoded `localhost` with the `mongo` proxy. + +Building the Docker image: + +First, you need a `Dockerfile`. This is basically the list of instructions Docker uses to build a container image. + +Here is the `Dockerfile` for the web server: + +```shell +FROM node:0.10.40 + +RUN mkdir -p /usr/src/app +WORKDIR /usr/src/app +COPY ./app/ ./ +RUN npm install + +CMD ["node", "app.js"] +``` + +A `Dockerfile` is pretty self explanatory, and this one is dead simple. + +First, it uses the official Node.js image as the base image. + +Then, it creates a folder to store the code, `cd`s into that directory, copies the code in, and installs the dependencies with npm. + +Finally, it specifies the command Docker should run when the container starts, which is to start the app. + + +## Step 2: Building our Container + +Right now, the directory should look like this: + +```shell +$ ls + +Dockerfile app +``` + +Let’s build. + +```shell +$ docker build -t myapp . +``` + +This will build a new Docker image for your app. This might take a few minutes as it is downloading and building everything. + +After that is done, test it out: + +```shell +$ docker run myapp +``` + +At this point, you should have a server running on `http://localhost:3000` (or wherever Docker tells you). The website will error out as there is no database running, but we know it works! + +![image](/images/docs/meanstack/image_1.png) + + +## Step 3: Pushing our Container + +Now you have a custom Docker image, you have to actually access it from the cloud. + +As we are going to be using the image with Google Container Engine, the best place to push the image is the [Google Container Registry](https://cloud.google.com/tools/container-registry/). The Container Registry is built on top of [Google Cloud Storage](https://cloud.google.com/storage/), so you get the advantage of scalable storage and very fast access from Container Engine. + +First, make sure you have the latest version of the [Google Cloud SDK installed](https://cloud.google.com/sdk/). + +[Windows users click here.](https://dl.google.com/dl/cloudsdk/release/GoogleCloudSDKInstaller.exe) + +For Linux/Mac: + +```shell +$ curl https://sdk.cloud.google.com | bash +``` + +Then, make sure you log in and update. + +```shell +$ gcloud auth login +$ gcloud components update +``` + +You're ready to push your container live, but you'll need a destination. Create a Project in [the Google Cloud Platform Console](https://console.developers.google.com/), and leave it blank. Use the Project ID below, and push your project live. + +```shell +$ docker tag myapp gcr.io//myapp +$ gcloud docker push gcr.io//myapp +``` + +After some time, it will finish. You can check the console to see the container has been pushed up. + +![image](/images/docs/meanstack/image_2.png) + + +## **Step 4: Creating the Cluster** + +So now you have the custom container, let’s create a cluster to run it. + +Currently, a cluster can be as small as one machine to as big as 100 machines. You can pick any machine type you want, so you can have a cluster of a single `f1-micro` instance, 100 `n1-standard-32` instances (3,200 cores!), and anything in between. + +For this tutorial I'm going to use the following: + +* Create a cluster named `mean-cluster` +* Give it a size of 2 nodes +* Machine type will be `n1-standard-1` +* Zone will be `us-central-1f` (Use a zone close to you) + +There are two ways to create this cluster. Take your pick. + +**Command Line:** + +```shell +$ gcloud beta container \ + --project "" \ + clusters create "mean-cluster" \ + --zone "us-central1-f" \ + --machine-type "n1-standard-1" \ + --num-nodes "2" \ + --network "default" +``` + +**GUI:** + +![image](/images/docs/meanstack/image_3.png) + +After a few minutes, you should see this in the console. + +![image](/images/docs/meanstack/image_4.png) + + +## **Step 5: Creating the Database Service** + +Three things need to be created: + +1. Persistent Disk to store the data (pods are ephemeral, so we shouldn't save data locally) +2. Replication Controller running MongoDB +3. Service mapping to that Replication Controller + +To create the disk, run this: + +```shell +$ gcloud compute disks create \ + --project "" \ + --zone "us-central1-f" \ + --size 200GB \ + mongo-disk +``` + +Pick the same zone as your cluster and an appropriate disk size for your application. + +Now, we need to create a Replication Controller that will run the database. I’m using a Replication Controller and not a Pod, because if a standalone Pod dies, it won't restart automatically. + +### `db-controller.yml` + +```yaml +apiVersion: v1 +kind: ReplicationController +metadata: + labels: + name: mongo + name: mongo-controller +spec: + replicas: 1 + template: + metadata: + labels: + name: mongo + spec: + containers: + - image: mongo + name: mongo + ports: + - name: mongo + containerPort: 27017 + hostPort: 27017 + volumeMounts: + - name: mongo-persistent-storage + mountPath: /data/db + volumes: + - name: mongo-persistent-storage + gcePersistentDisk: + pdName: mongo-disk + fsType: ext4 +``` + +We call the controller `mongo-controller`, specify one replica, and open the appropriate ports. The image is `mongo`, which is the off the shelf MongoDB image. + +The `volumes` section creates the volume for Kubernetes to use. There is a Google Container Engine-specific `gcePersistentDisk` section that maps the disk we made into a Kubernetes volume, and we mount the volume into the `/data/db` directory (as described in the MongoDB Docker documentation) + +Now we have the Controller, let’s create the Service: + +### `db-service.yml` + +```yaml +apiVersion: v1 +kind: Service +metadata: + labels: + name: mongo + name: mongo +spec: + ports: + - port: 27017 + targetPort: 27017 + selector: + name: mongo +``` + +Again, pretty simple stuff. We "select" the mongo Controller to be served, open up the ports, and call the service `mongo`. + +This is just like the "link" command line option we used with Docker in my previous post. Instead of connecting to `localhost`, we connect to `mongo`, and Kubernetes redirects traffic to the mongo service! + +At this point, the local directory looks like this: + +```shell +$ ls + +Dockerfile +app +db-controller.yml +db-service.yml +``` + +## Step 6: Running the Database + +First, let’s "log in" to the cluster + +```shell +$ gcloud container clusters get-credentials mean-cluster +``` + +Now create the controller. + +```shell +$ kubectl create -f db-controller.yml +``` + +And the Service. + +```shell +$ kubectl create -f db-service.yml +``` + +`kubectl` is the Kubernetes command line tool (automatically installed with the Google Cloud SDK). We are just creating the resources specified in the files. + +At this point, the database is spinning up! You can check progress with the following command: + +```shell +$ kubectl get pods +``` + +Once you see the mongo pod in running status, we are good to go! + +```shell +$ kubectl get pods + +NAME READY REASON RESTARTS AGE +mongo-controller-xxxx 1/1 Running 0 3m +``` + + +## Step 7: Creating the Web Server + +Now the database is running, let’s start the web server. + +We need two things: + +1. Replication Controller to spin up and down web server pods +2. Service to expose our website to the interwebs + +Let’s look at the Replication Controller configuration: + +### `web-controller.yml` + +```yaml +apiVersion: v1 +kind: ReplicationController +metadata: + labels: + name: web + name: web-controller +spec: + replicas: 2 + template: + metadata: + labels: + name: web + spec: + containers: + - image: gcr.io//myapp + name: web + ports: + - containerPort: 3000 + name: http-server +``` + +Here, we create a controller called `web-controller`, and we tell it to create two replicas. Replicas of what you ask? You may notice the `template` section looks just like a Pod configuration, and that's because it is. We are creating a Pod with our custom Node.js container and exposing port 3000. + + +Now for the Service + +### `web-service.yml` + +```yaml +apiVersion: v1 +kind: Service +metadata: + name: web + labels: + name: web +spec: + type: LoadBalancer + ports: + - port: 80 + targetPort: 3000 + protocol: TCP + selector: + name: web +``` + +Notice two things here: + +1. The type is *LoadBalancer*. This is a cool feature that will make Google Cloud Platform create an external network load balancer automatically for this service! +2. We map external port 80 to the internal port 3000, so we can serve HTTP traffic without messing with Firewalls. + +At this point, the local directory looks like this + +```shell +$ ls + +Dockerfile app db-pod.yml db-service.yml web-service.yml web-controller.yml +``` + + +## Step 8: Running the Web Server + +Create the Controller. + +```shell +$ kubectl create -f web-controller.yml +``` + +And the Service. + +```shell +$ kubectl create -f web-service.yml +``` + +And check the status. + +```shell +$ kubectl get pods +``` + +Once you see the web pods in running status, we are good to go! + +```shell +$ kubectl get pods + +NAME READY REASON RESTARTS AGE +mongo-controller-xxxx 1/1 Running 0 4m +web-controller-xxxx 1/1 Running 0 1m +web-controller-xxxx 1/1 Running 0 1m +``` + + +## Step 9: Accessing the App + +At this point, everything is up and running. The architecture looks something like this: + +![image](/images/docs/meanstack/image_5.png) + +By default, port 80 should be open on the load balancer. In order to find the IP address of our app, run this command: + +```shell +$ gcloud compute forwarding-rules list + +NAME REGION IP_ADDRESS IP_PROTOCOL TARGET +abcdef us-central1 104.197.XXX.XXX TCP us-xxxx +``` + +If you go to the IP address listed, you should see the app up and running! + +![image](/images/docs/meanstack/image_6.png) + +And the Database works! + +![image](/images/docs/meanstack/image_7.png) + + +#### **Final Thoughts** + +By using Container Engine and Kubernetes, we have a very robust, container based MEAN stack running in production. + +[In anoter post](https://medium.com/google-cloud/mongodb-replica-sets-with-kubernetes-d96606bd9474#.e93x7kuq5), I cover how to setup a MongoDB replica set. This is very important for running in production. + +Hopefully I can do some more posts about advanced Kubernetes topics such as changing the cluster size and number of Node.js web server replicas, using different environments (dev, staging, prod) on the same cluster, and doing rolling updates. + +Thanks to [Mark Mandel](https://medium.com/@markmandel), [Aja Hammerly](https://medium.com/@thagomizer), and [Jack Wilber](https://medium.com/@jack.g.wilber). [Some rights reserved](http://creativecommons.org/licenses/by/4.0/) by the author. + diff --git a/_includes/docs/docs/getting-started-guides/mesos-docker.md b/_includes/docs/docs/getting-started-guides/mesos-docker.md new file mode 100644 index 0000000000..2fc6f7e68d --- /dev/null +++ b/_includes/docs/docs/getting-started-guides/mesos-docker.md @@ -0,0 +1,260 @@ + + +The mesos/docker provider uses docker-compose to launch Kubernetes as a Mesos framework, running in docker with its +dependencies (etcd & mesos). + +* TOC +{:toc} + +## Cluster Goals + +- kubernetes development +- pod/service development +- demoing +- fast deployment +- minimal hardware requirements +- minimal configuration +- entry point for exploration +- simplified networking +- fast end-to-end tests +- local deployment + +Non-Goals: + +- high availability +- fault tolerance +- remote deployment +- production usage +- monitoring +- long running +- state persistence across restarts + +## Cluster Topology + +The cluster consists of several docker containers linked together by docker-managed hostnames: + +| Component | Hostname | Description | +|-------------------------------|-----------------------------|-----------------------------------------------------------------------------------------| +| docker-grand-ambassador | | Proxy to allow circular hostname linking in docker | +| etcd | etcd | Key/Value store used by Mesos | +| Mesos Master | mesosmaster1 | REST endpoint for interacting with Mesos | +| Mesos Slave (x2) | mesosslave1, mesosslave2 | Mesos agents that offer resources and run framework executors (e.g. Kubernetes Kublets) | +| Kubernetes API Server | apiserver | REST endpoint for interacting with Kubernetes | +| Kubernetes Controller Manager | controller | | +| Kubernetes Scheduler | scheduler | Schedules container deployment by accepting Mesos offers | + +## Prerequisites + +Required: + +- [Git](https://git-scm.com/book/en/v2/Getting-Started-Installing-Git) - version control system +- [Docker CLI](https://docs.docker.com/) - container management command line client +- [Docker Engine](https://docs.docker.com/) - container management daemon + - On Mac, use [Boot2Docker](http://boot2docker.io/) or [Docker Machine](https://docs.docker.com/machine/install-machine/) +- [Docker Compose](https://docs.docker.com/compose/install/) - multi-container application orchestration + +Optional: + +- [Virtual Box](https://www.virtualbox.org/wiki/Downloads) - x86 hardware virtualizer + - Required by Boot2Docker and Docker Machine +- [Golang](https://golang.org/doc/install) - Go programming language + - Required to build Kubernetes locally +- [Make](https://en.wikipedia.org/wiki/Make_(software)) - Utility for building executables from source + - Required to build Kubernetes locally with make + +### Install on Mac (Homebrew) + +It's possible to install all of the above via [Homebrew](http://brew.sh/) on a Mac. + +Some steps print instructions for configuring or launching. Make sure each is properly set up before continuing to the next step. + + brew install git + brew install caskroom/cask/brew-cask + brew cask install virtualbox + brew install docker + brew install boot2docker + boot2docker init + boot2docker up + brew install docker-compose + +### Install on Linux + +Most of the above are available via apt and yum, but depending on your distribution, you may have to install via other +means to get the latest versions. + +It is recommended to use Ubuntu, simply because it best supports AUFS, used by docker to mount volumes. Alternate file +systems may not fully support docker-in-docker. + +In order to build Kubernetes, the current user must be in a docker group with sudo privileges. +See the docker docs for [instructions](https://docs.docker.com/installation/ubuntulinux/#create-a-docker-group). + + +### Boot2Docker Config (Mac) + +If on a mac using boot2docker, the following steps will make the docker IPs (in the virtualbox VM) reachable from the +host machine (mac). + +1. Set the VM's host-only network to "promiscuous mode": + + boot2docker stop + VBoxManage modifyvm boot2docker-vm --nicpromisc2 allow-all + boot2docker start + + This allows the VM to accept packets that were sent to a different IP. + + Since the host-only network routes traffic between VMs and the host, other VMs will also be able to access the docker + IPs, if they have the following route. + +1. Route traffic to docker through the boot2docker IP: + + sudo route -n add -net 172.17.0.0 $(boot2docker ip) + + Since the boot2docker IP can change when the VM is restarted, this route may need to be updated over time. + To delete the route later: `sudo route delete 172.17.0.0` + + +## Walkthrough + +1. Checkout source + + git clone https://github.com/kubernetes/kubernetes + cd kubernetes + + By default, that will get you the bleeding edge of master branch. + You may want a [release branch](https://github.com/kubernetes/kubernetes/releases) instead, + if you have trouble with master. + +1. Build binaries + + You'll need to build kubectl (CLI) for your local architecture and operating system and the rest of the server binaries for linux/amd64. + + Building a new release covers both cases: + + KUBERNETES_CONTRIB=mesos build/release.sh + + For developers, it may be faster to [build locally](#build-locally). + +1. [Optional] Build docker images + + The following docker images are built as part of `./cluster/kube-up.sh`, but it may make sense to build them manually the first time because it may take a while. + + 1. Test image includes all the dependencies required for running e2e tests. + + ./cluster/mesos/docker/test/build.sh + + In the future, this image may be available to download. It doesn't contain anything specific to the current release, except its build dependencies. + + 1. Kubernetes-Mesos image includes the compiled linux binaries. + + ./cluster/mesos/docker/km/build.sh + + This image needs to be built every time you recompile the server binaries. + +1. [Optional] Configure Mesos resources + + By default, the mesos-slaves are configured to offer a fixed amount of resources (cpus, memory, disk, ports). + If you want to customize these values, update the `MESOS_RESOURCES` environment variables in `./cluster/mesos/docker/docker-compose.yml`. + If you delete the `MESOS_RESOURCES` environment variables, the resource amounts will be auto-detected based on the host resources, which will over-provision by > 2x. + + If the configured resources are not available on the host, you may want to increase the resources available to Docker Engine. + You may have to increase you VM disk, memory, or cpu allocation in VirtualBox, + [Docker Machine](https://docs.docker.com/machine/#oracle-virtualbox), or + [Boot2Docker](https://ryanfb.github.io/etc/2015/01/28/increasing_boot2docker_allocations_on_os_x). + +1. Configure provider + + export KUBERNETES_PROVIDER=mesos/docker + + This tells cluster scripts to use the code within `cluster/mesos/docker`. + +1. Create cluster + + ./cluster/kube-up.sh + + If you manually built all the above docker images, you can skip that step during kube-up: + + MESOS_DOCKER_SKIP_BUILD=true ./cluster/kube-up.sh + + After deploying the cluster, `~/.kube/config` will be created or updated to configure kubectl to target the new cluster. + +1. Explore examples + + To learn more about Pods, Volumes, Labels, Services, and Replication Controllers, start with the + [Kubernetes Walkthrough](/{{page.version}}/docs/user-guide/walkthrough/). + + To skip to a more advanced example, see the [Guestbook Example](https://github.com/kubernetes/kubernetes/tree/{{page.githubbranch}}/examples/guestbook/) + +1. Destroy cluster + + ./cluster/kube-down.sh + +## Addons + +The `kube-up` for the mesos/docker provider will automatically deploy KubeDNS and KubeUI addons as pods/services. + +Check their status with: + + ./cluster/kubectl.sh get pods --namespace=kube-system + +### KubeUI + +The web-based Kubernetes UI is accessible in a browser through the API Server proxy: `https://:6443/ui/`. + +By default, basic-auth is configured with user `admin` and password `admin`. + +The IP of the API Server can be found using `./cluster/kubectl.sh cluster-info`. + + +## End To End Testing + +Warning: e2e tests can take a long time to run. You may not want to run them immediately if you're just getting started. + +While your cluster is up, you can run the end-to-end tests: + + ./cluster/test-e2e.sh + +Notable parameters: +- Increase the logging verbosity: `-v=2` +- Run only a subset of the tests (regex matching): `-ginkgo.focus=` + +To build, deploy, test, and destroy, all in one command (plus unit & integration tests): + + make test_e2e + +## Kubernetes CLI + +When compiling from source, it's simplest to use the `./cluster/kubectl.sh` script, which detects your platform & +architecture and proxies commands to the appropriate `kubectl` binary. + + `./cluster/kubectl.sh get pods` + + +## Helpful scripts + +Kill all docker containers + + docker ps -q -a | xargs docker rm -f + +Clean up unused docker volumes + + docker run -v /var/run/docker.sock:/var/run/docker.sock -v /var/lib/docker:/var/lib/docker --rm martin/docker-cleanup-volumes + +## Build Locally + +The steps above tell you how to build in a container, for minimal local dependencies. But if you have Go and Make installed you can build locally much faster: + + KUBERNETES_CONTRIB=mesos make + +However, if you're not on linux, you'll still need to compile the linux/amd64 server binaries: + + KUBERNETES_CONTRIB=mesos build/run.sh hack/build-go.sh + +The above two steps should be significantly faster than cross-compiling a whole new release for every supported platform (which is what `./build/release.sh` does). + +Breakdown: + +- `KUBERNETES_CONTRIB=mesos` - enables building of the contrib/mesos binaries +- `hack/build-go.sh` - builds the Go binaries for the current architecture (linux/amd64 when in a docker container) +- `make` - delegates to `hack/build-go.sh` +- `build/run.sh` - executes a command in the build container +- `build/release.sh` - cross compiles Kubernetes for all supported architectures and operating systems (slow) \ No newline at end of file diff --git a/_includes/docs/docs/getting-started-guides/mesos.md b/_includes/docs/docs/getting-started-guides/mesos.md new file mode 100644 index 0000000000..84e765b4b0 --- /dev/null +++ b/_includes/docs/docs/getting-started-guides/mesos.md @@ -0,0 +1,333 @@ + +* TOC +{:toc} + +## About Kubernetes on Mesos + + + +Mesos allows dynamic sharing of cluster resources between Kubernetes and other first-class Mesos frameworks such as [Hadoop][1], [Spark][2], and [Chronos][3]. +Mesos also ensures applications from different frameworks running on your cluster are isolated and that resources are allocated fairly among them. + +Mesos clusters can be deployed on nearly every IaaS cloud provider infrastructure or in your own physical datacenter. Kubernetes on Mesos runs on-top of that and therefore allows you to easily move Kubernetes workloads from one of these environments to the other. + +This tutorial will walk you through setting up Kubernetes on a Mesos cluster. +It provides a step by step walk through of adding Kubernetes to a Mesos cluster and starting your first pod with an nginx webserver. + +**NOTE:** There are [known issues with the current implementation][7] and support for centralized logging and monitoring is not yet available. +Please [file an issue against the kubernetes-mesos project][8] if you have problems completing the steps below. + +Further information is available in the Kubernetes on Mesos [contrib directory][13]. + +### Prerequisites + +- Understanding of [Apache Mesos][6] +- A running [Mesos cluster on Google Compute Engine][5] +- A [VPN connection][10] to the cluster +- A machine in the cluster which should become the Kubernetes *master node* with: + - GoLang > 1.2 + - make (i.e. build-essential) + - Docker + +**Note**: You *can*, but you *don't have to* deploy Kubernetes-Mesos on the same machine the Mesos master is running on. + +### Deploy Kubernetes-Mesos + +Log into the future Kubernetes *master node* over SSH, replacing the placeholder below with the correct IP address. + +```shell +ssh jclouds@${ip_address_of_master_node} +``` + +Build Kubernetes-Mesos. + +```shell +git clone https://github.com/kubernetes/kubernetes +cd kubernetes +export KUBERNETES_CONTRIB=mesos +make +``` + +Set some environment variables. +The internal IP address of the master may be obtained via `hostname -i`. + +```shell +export KUBERNETES_MASTER_IP=$(hostname -i) +export KUBERNETES_MASTER=http://${KUBERNETES_MASTER_IP}:8888 +``` + +Note that KUBERNETES_MASTER is used as the api endpoint. If you have existing `~/.kube/config` and point to another endpoint, you need to add option `--server=${KUBERNETES_MASTER}` to kubectl in later steps. + +### Deploy etcd + +Start etcd and verify that it is running: + +```shell +sudo docker run -d --hostname $(uname -n) --name etcd \ + -p 4001:4001 -p 7001:7001 quay.io/coreos/etcd:v2.0.12 \ + --listen-client-urls http://0.0.0.0:4001 \ + --advertise-client-urls http://${KUBERNETES_MASTER_IP}:4001 +``` + +```shell +$ sudo docker ps +CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES +fd7bac9e2301 quay.io/coreos/etcd:v2.0.12 "/etcd" 5s ago Up 3s 2379/tcp, 2380/... etcd +``` + +It's also a good idea to ensure your etcd instance is reachable by testing it + +```shell +curl -L http://${KUBERNETES_MASTER_IP}:4001/v2/keys/ +``` + +If connectivity is OK, you will see an output of the available keys in etcd (if any). + +### Start Kubernetes-Mesos Services + +Update your PATH to more easily run the Kubernetes-Mesos binaries: + +```shell +export PATH="$(pwd)/_output/local/go/bin:$PATH" +``` + +Identify your Mesos master: depending on your Mesos installation this is either a `host:port` like `mesos-master:5050` or a ZooKeeper URL like `zk://zookeeper:2181/mesos`. +In order to let Kubernetes survive Mesos master changes, the ZooKeeper URL is recommended for production environments. + +```shell +export MESOS_MASTER= +``` + +Create a cloud config file `mesos-cloud.conf` in the current directory with the following contents: + +```shell +$ cat <mesos-cloud.conf +[mesos-cloud] + mesos-master = ${MESOS_MASTER} +EOF +``` + +Now start the kubernetes-mesos API server, controller manager, and scheduler on the master node: + +```shell +$ km apiserver \ + --address=${KUBERNETES_MASTER_IP} \ + --etcd-servers=http://${KUBERNETES_MASTER_IP}:4001 \ + --service-cluster-ip-range=10.10.10.0/24 \ + --port=8888 \ + --cloud-provider=mesos \ + --cloud-config=mesos-cloud.conf \ + --secure-port=0 \ + --v=1 >apiserver.log 2>&1 & + +$ km controller-manager \ + --master=${KUBERNETES_MASTER_IP}:8888 \ + --cloud-provider=mesos \ + --cloud-config=./mesos-cloud.conf \ + --v=1 >controller.log 2>&1 & + +$ km scheduler \ + --address=${KUBERNETES_MASTER_IP} \ + --mesos-master=${MESOS_MASTER} \ + --etcd-servers=http://${KUBERNETES_MASTER_IP}:4001 \ + --mesos-user=root \ + --api-servers=${KUBERNETES_MASTER_IP}:8888 \ + --cluster-dns=10.10.10.10 \ + --cluster-domain=cluster.local \ + --v=2 >scheduler.log 2>&1 & +``` + +Disown your background jobs so that they'll stay running if you log out. + +```shell +disown -a +``` + +#### Validate KM Services + +Add the appropriate binary folder to your `PATH` to access kubectl: + +```shell +export PATH=/platforms/linux/amd64:$PATH +``` + +Interact with the kubernetes-mesos framework via `kubectl`: + +```shell +$ kubectl get pods +NAME READY STATUS RESTARTS AGE +``` + +```shell +# NOTE: your service IPs will likely differ +$ kubectl get services +NAME LABELS SELECTOR IP(S) PORT(S) +k8sm-scheduler component=scheduler,provider=k8sm 10.10.10.113 10251/TCP +kubernetes component=apiserver,provider=kubernetes 10.10.10.1 443/TCP +``` + +Lastly, look for Kubernetes in the Mesos web GUI by pointing your browser to +`http://`. Make sure you have an active VPN connection. +Go to the Frameworks tab, and look for an active framework named "Kubernetes". + +## Spin up a pod + +Write a JSON pod description to a local file: + +```shell +$ cat <nginx.yaml +``` + +```yaml +apiVersion: v1 +kind: Pod +metadata: + name: nginx +spec: + containers: + - name: nginx + image: nginx + ports: + - containerPort: 80 +EOPOD +``` + +Send the pod description to Kubernetes using the `kubectl` CLI: + +```shell +$ kubectl create -f ./nginx.yaml +pods/nginx +``` + +Wait a minute or two while `dockerd` downloads the image layers from the internet. +We can use the `kubectl` interface to monitor the status of our pod: + +```shell +$ kubectl get pods +NAME READY STATUS RESTARTS AGE +nginx 1/1 Running 0 14s +``` + +Verify that the pod task is running in the Mesos web GUI. Click on the +Kubernetes framework. The next screen should show the running Mesos task that +started the Kubernetes pod. + +## Launching kube-dns + +Kube-dns is an addon for Kubernetes which adds DNS-based service discovery to the cluster. For a detailed explanation see [DNS in Kubernetes][4]. + +The kube-dns addon runs as a pod inside the cluster. The pod consists of three co-located containers: + +- a local etcd instance +- the [skydns][11] DNS server +- the kube2sky process to glue skydns to the state of the Kubernetes cluster. + +The skydns container offers DNS service via port 53 to the cluster. The etcd communication works via local 127.0.0.1 communication + +We assume that kube-dns will use + +- the service IP `10.10.10.10` +- and the `cluster.local` domain. + +Note that we have passed these two values already as parameter to the apiserver above. + +A template for an replication controller spinning up the pod with the 3 containers can be found at [cluster/addons/dns/skydns-rc.yaml.in][11] in the repository. The following steps are necessary in order to get a valid replication controller yaml file: + +- replace `{{ pillar['dns_replicas'] }}` with `1` +- replace `{{ pillar['dns_domain'] }}` with `cluster.local.` +- add `--kube_master_url=${KUBERNETES_MASTER}` parameter to the kube2sky container command. + +In addition the service template at [cluster/addons/dns/skydns-svc.yaml.in][12] needs the following replacement: + +- `{{ pillar['dns_server'] }}` with `10.10.10.10`. + +To do this automatically: + +```shell +sed -e "s/{{ pillar\['dns_replicas'\] }}/1/g;"\ +"s,\(command = \"/kube2sky\"\),\\1\\"$'\n'" - --kube_master_url=${KUBERNETES_MASTER},;"\ +"s/{{ pillar\['dns_domain'\] }}/cluster.local/g" \ + cluster/addons/dns/skydns-rc.yaml.in > skydns-rc.yaml +sed -e "s/{{ pillar\['dns_server'\] }}/10.10.10.10/g" \ + cluster/addons/dns/skydns-svc.yaml.in > skydns-svc.yaml +``` + +Now the kube-dns pod and service are ready to be launched: + +```shell +kubectl create -f ./skydns-rc.yaml +kubectl create -f ./skydns-svc.yaml +``` + +Check with `kubectl get pods --namespace=kube-system` that 3/3 containers of the pods are eventually up and running. Note that the kube-dns pods run in the `kube-system` namespace, not in `default`. + +To check that the new DNS service in the cluster works, we start a busybox pod and use that to do a DNS lookup. First create the `busybox.yaml` pod spec: + +```shell +cat <busybox.yaml +``` + +```yaml +apiVersion: v1 +kind: Pod +metadata: + name: busybox + namespace: default +spec: + containers: + - image: busybox + command: + - sleep + - "3600" + imagePullPolicy: IfNotPresent + name: busybox + restartPolicy: Always +EOF +``` + +Then start the pod: + +```shell +kubectl create -f ./busybox.yaml +``` + +When the pod is up and running, start a lookup for the Kubernetes master service, made available on 10.10.10.1 by default: + +```shell +kubectl exec busybox -- nslookup kubernetes +``` + +If everything works fine, you will get this output: + +```shell +Server: 10.10.10.10 +Address 1: 10.10.10.10 + +Name: kubernetes +Address 1: 10.10.10.1 +``` + +## What next? + +Try out some of the standard [Kubernetes examples][9]. + +Read about Kubernetes on Mesos' architecture in the [contrib directory][13]. + +**NOTE:** Some examples require Kubernetes DNS to be installed on the cluster. +Future work will add instructions to this guide to enable support for Kubernetes DNS. + +**NOTE:** Please be aware that there are [known issues with the current Kubernetes-Mesos implementation][7]. + +[1]: http://mesosphere.com/docs/tutorials/run-hadoop-on-mesos-using-installer +[2]: http://mesosphere.com/docs/tutorials/run-spark-on-mesos +[3]: http://mesosphere.com/docs/tutorials/run-chronos-on-mesos +[4]: https://releases.k8s.io/{{page.githubbranch}}/cluster/addons/dns/README.md +[5]: http://open.mesosphere.com/getting-started/cloud/google/mesosphere/ +[6]: http://mesos.apache.org/ +[7]: https://releases.k8s.io/{{page.githubbranch}}/contrib/mesos/docs/issues.md +[8]: https://github.com/mesosphere/kubernetes-mesos/issues +[9]: https://github.com/kubernetes/kubernetes/tree/{{page.githubbranch}}/examples +[10]: http://open.mesosphere.com/getting-started/cloud/google/mesosphere/#vpn-setup +[11]: https://releases.k8s.io/{{page.githubbranch}}/cluster/addons/dns/skydns-rc.yaml.in +[12]: https://releases.k8s.io/{{page.githubbranch}}/cluster/addons/dns/skydns-svc.yaml.in +[13]: https://releases.k8s.io/{{page.githubbranch}}/contrib/mesos/README.md \ No newline at end of file diff --git a/_includes/docs/docs/getting-started-guides/ovirt.md b/_includes/docs/docs/getting-started-guides/ovirt.md new file mode 100644 index 0000000000..1aedfbd8f8 --- /dev/null +++ b/_includes/docs/docs/getting-started-guides/ovirt.md @@ -0,0 +1,48 @@ + +* TOC +{:toc} + +## What is oVirt + +oVirt is a virtual datacenter manager that delivers powerful management of multiple virtual machines on multiple hosts. Using KVM and libvirt, oVirt can be installed on Fedora, CentOS, or Red Hat Enterprise Linux hosts to set up and manage your virtual data center. + +## oVirt Cloud Provider Deployment + +The oVirt cloud provider allows to easily discover and automatically add new VM instances as nodes to your Kubernetes cluster. +At the moment there are no community-supported or pre-loaded VM images including Kubernetes but it is possible to [import] or [install] Project Atomic (or Fedora) in a VM to [generate a template]. Any other distribution that includes Kubernetes may work as well. + +It is mandatory to [install the ovirt-guest-agent] in the guests for the VM ip address and hostname to be reported to ovirt-engine and ultimately to Kubernetes. + +Once the Kubernetes template is available it is possible to start instantiating VMs that can be discovered by the cloud provider. + +[import]: http://ovedou.blogspot.it/2014/03/importing-glance-images-as-ovirt.html +[install]: http://www.ovirt.org/Quick_Start_Guide#Create_Virtual_Machines +[generate a template]: http://www.ovirt.org/Quick_Start_Guide#Using_Templates +[install the ovirt-guest-agent]: http://www.ovirt.org/How_to_install_the_guest_agent_in_Fedora + +## Using the oVirt Cloud Provider + +The oVirt Cloud Provider requires access to the oVirt REST-API to gather the proper information, the required credential should be specified in the `ovirt-cloud.conf` file: + + [connection] + uri = https://localhost:8443/ovirt-engine/api + username = admin@internal + password = admin + +In the same file it is possible to specify (using the `filters` section) what search query to use to identify the VMs to be reported to Kubernetes: + + [filters] + # Search query used to find nodes + vms = tag=kubernetes + +In the above example all the VMs tagged with the `kubernetes` label will be reported as nodes to Kubernetes. + +The `ovirt-cloud.conf` file then must be specified in kube-controller-manager: + + kube-controller-manager ... --cloud-provider=ovirt --cloud-config=/path/to/ovirt-cloud.conf ... + +## oVirt Cloud Provider Screencast + +This short screencast demonstrates how the oVirt Cloud Provider can be used to dynamically add VMs to your Kubernetes cluster. + +[![Screencast](http://img.youtube.com/vi/JyyST4ZKne8/0.jpg)](http://www.youtube.com/watch?v=JyyST4ZKne8) \ No newline at end of file diff --git a/_includes/docs/docs/getting-started-guides/rackspace.md b/_includes/docs/docs/getting-started-guides/rackspace.md new file mode 100644 index 0000000000..7a862f960e --- /dev/null +++ b/_includes/docs/docs/getting-started-guides/rackspace.md @@ -0,0 +1,61 @@ + +* Supported Version: v0.18.1 + +In general, the dev-build-and-up.sh workflow for Rackspace is the similar to Google Compute Engine. The specific implementation is different due to the use of CoreOS, Rackspace Cloud Files and the overall network design. + +These scripts should be used to deploy development environments for Kubernetes. If your account leverages RackConnect or non-standard networking, these scripts will most likely not work without modification. + +NOTE: The rackspace scripts do NOT rely on `saltstack` and instead rely on cloud-init for configuration. + +The current cluster design is inspired by: + +- [corekube](https://github.com/metral/corekube) +- [Angus Lees](https://github.com/anguslees/kube-openstack) + +* TOC +{:toc} + +## Prerequisites + +1. Python2.7 +2. You need to have both `nova` and `swiftly` installed. It's recommended to use a python virtualenv to install these packages into. +3. Make sure you have the appropriate environment variables set to interact with the OpenStack APIs. See [Rackspace Documentation](http://docs.rackspace.com/servers/api/v2/cs-gettingstarted/content/section_gs_install_nova) for more details. + +## Provider: Rackspace + +- To build your own released version from source use `export KUBERNETES_PROVIDER=rackspace` and run the `bash hack/dev-build-and-up.sh` +- Note: The get.k8s.io install method is not working yet for our scripts. + * To install the latest released version of Kubernetes use `export KUBERNETES_PROVIDER=rackspace; wget -q -O - https://get.k8s.io | bash` + +## Build + +1. The Kubernetes binaries will be built via the common build scripts in `build/`. +2. If you've set the ENV `KUBERNETES_PROVIDER=rackspace`, the scripts will upload `kubernetes-server-linux-amd64.tar.gz` to Cloud Files. +2. A cloud files container will be created via the `swiftly` CLI and a temp URL will be enabled on the object. +3. The built `kubernetes-server-linux-amd64.tar.gz` will be uploaded to this container and the URL will be passed to master/nodes when booted. + +## Cluster + +There is a specific `cluster/rackspace` directory with the scripts for the following steps: + +1. A cloud network will be created and all instances will be attached to this network. + - flanneld uses this network for next hop routing. These routes allow the containers running on each node to communicate with one another on this private network. +2. A SSH key will be created and uploaded if needed. This key must be used to ssh into the machines (we do not capture the password). +3. The master server and additional nodes will be created via the `nova` CLI. A `cloud-config.yaml` is generated and provided as user-data with the entire configuration for the systems. +4. We then boot as many nodes as defined via `$NUM_MINIONS`. + +## Some notes + +- The scripts expect `eth2` to be the cloud network that the containers will communicate across. +- A number of the items in `config-default.sh` are overridable via environment variables. +- For older versions please either: + * Sync back to `v0.9` with `git checkout v0.9` + * Download a [snapshot of `v0.9`](https://github.com/kubernetes/kubernetes/archive/v0.9.tar.gz) + * Sync back to `v0.3` with `git checkout v0.3` + * Download a [snapshot of `v0.3`](https://github.com/kubernetes/kubernetes/archive/v0.3.tar.gz) + +## Network Design + +- eth0 - Public Interface used for servers/containers to reach the internet +- eth1 - ServiceNet - Intra-cluster communication (k8s, etcd, etc) communicate via this interface. The `cloud-config` files use the special CoreOS identifier `$private_ipv4` to configure the services. +- eth2 - Cloud Network - Used for k8s pods to communicate with one another. The proxy service will pass traffic via this interface. \ No newline at end of file diff --git a/_includes/docs/docs/getting-started-guides/rkt/index.md b/_includes/docs/docs/getting-started-guides/rkt/index.md new file mode 100644 index 0000000000..635b2f56ee --- /dev/null +++ b/_includes/docs/docs/getting-started-guides/rkt/index.md @@ -0,0 +1,136 @@ + +This document describes how to run Kubernetes using [rkt](https://github.com/coreos/rkt) as a container runtime. +We still have [a bunch of work](http://issue.k8s.io/8262) to do to make the experience with rkt wonderful, please stay tuned! + +### **Prerequisite** + +- [systemd](http://www.freedesktop.org/wiki/Software/systemd/) should be installed on the machine and should be enabled. The minimum version required at this moment (2015/09/01) is 219 + *(Note that systemd is not required by rkt itself, we are using it here to monitor and manage the pods launched by kubelet.)* + +- Install the latest rkt release according to the instructions [here](https://github.com/coreos/rkt). + The minimum version required for now is [v0.8.0](https://github.com/coreos/rkt/releases/tag/v0.8.0). + +- Note that for rkt version later than v0.7.0, `metadata service` is not required for running pods in private networks. So now rkt pods will not register the metadata service be default. + +### Local cluster + +To use rkt as the container runtime, we need to supply `--container-runtime=rkt` and `--rkt-path=$PATH_TO_RKT_BINARY` to kubelet. Additionally we can provide `--rkt-stage1-image` flag +as well to select which [stage1 image](https://github.com/coreos/rkt/blob/master/Documentation/running-lkvm-stage1.md) we want to use. + +If you are using the [hack/local-up-cluster.sh](https://releases.k8s.io/{{page.githubbranch}}/hack/local-up-cluster.sh) script to launch the local cluster, then you can edit the environment variable `CONTAINER_RUNTIME`, `RKT_PATH` and `RKT_STAGE1_IMAGE` to +set these flags: + +```shell +$ export CONTAINER_RUNTIME=rkt +$ export RKT_PATH=$PATH_TO_RKT_BINARY +$ export RKT_STAGE1_IMAGE=PATH=$PATH_TO_STAGE1_IMAGE +``` + +Then we can launch the local cluster using the script: + +```shell +$ hack/local-up-cluster.sh +``` + +### CoreOS cluster on Google Compute Engine (GCE) + +To use rkt as the container runtime for your CoreOS cluster on GCE, you need to specify the OS distribution, project, image: + +```shell +$ export KUBE_OS_DISTRIBUTION=coreos +$ export KUBE_GCE_MINION_IMAGE= +$ export KUBE_GCE_MINION_PROJECT=coreos-cloud +$ export KUBE_CONTAINER_RUNTIME=rkt +``` + +You can optionally choose the version of rkt used by setting `KUBE_RKT_VERSION`: + +```shell +$ export KUBE_RKT_VERSION=0.8.0 +``` + +Then you can launch the cluster by: + +```shell +$ kube-up.sh +``` + +Note that we are still working on making all containerized the master components run smoothly in rkt. Before that we are not able to run the master node with rkt yet. + +### CoreOS cluster on AWS + +To use rkt as the container runtime for your CoreOS cluster on AWS, you need to specify the provider and OS distribution: + +```shell +$ export KUBERNETES_PROVIDER=aws +$ export KUBE_OS_DISTRIBUTION=coreos +$ export KUBE_CONTAINER_RUNTIME=rkt +``` + +You can optionally choose the version of rkt used by setting `KUBE_RKT_VERSION`: + +```shell +$ export KUBE_RKT_VERSION=0.8.0 +``` + +You can optionally choose the CoreOS channel by setting `COREOS_CHANNEL`: + +```shell +$ export COREOS_CHANNEL=stable +``` + +Then you can launch the cluster by: + +```shell +$ kube-up.sh +``` + +Note: CoreOS is not supported as the master using the automated launch +scripts. The master node is always Ubuntu. + +### Getting started with your cluster + +See [a simple nginx example](/{{page.version}}/docs/user-guide/simple-nginx) to try out your new cluster. + +For more complete applications, please look in the [examples directory](https://github.com/kubernetes/kubernetes/tree/{{page.githubbranch}}/examples/). + + +### Debugging + +Here are severals tips for you when you run into any issues. + +##### Check logs + +By default, the log verbose level is 2. In order to see more logs related to rkt, we can set the verbose level to 4. +For local cluster, we can set the environment variable: `LOG_LEVEL=4`. +If the cluster is using salt, we can edit the [logging.sls](https://releases.k8s.io/{{page.githubbranch}}/cluster/saltbase/pillar/logging.sls) in the saltbase. + +##### Check rkt pod status + +To check the pods' status, we can use rkt command, such as `rkt list`, `rkt status`, `rkt image list`, etc. +More information about rkt command line can be found [here](https://github.com/coreos/rkt/blob/master/Documentation/commands.md) + +##### Check journal logs + +As we use systemd to launch rkt pods(by creating service files which will run `rkt run-prepared`, we can check the pods' log +using `journalctl`: + +- Check the running state of the systemd service: + +```shell +$ sudo journalctl -u $SERVICE_FILE +``` + +where `$SERVICE_FILE` is the name of the service file created for the pod, you can find it in the kubelet logs. + +##### Check the log of the container in the pod: + +```shell +$ sudo journalctl -M rkt-$UUID -u $CONTAINER_NAME +``` + +where `$UUID` is the rkt pod's UUID, which you can find via `rkt list --full`, and `$CONTAINER_NAME` is the container's name. + +##### Check Kubernetes events, logs. + +Besides above tricks, Kubernetes also provides us handy tools for debugging the pods. More information can be found [here](/{{page.version}}/docs/user-guide/application-troubleshooting) \ No newline at end of file diff --git a/_includes/docs/docs/getting-started-guides/scratch.md b/_includes/docs/docs/getting-started-guides/scratch.md new file mode 100644 index 0000000000..f1fc11a147 --- /dev/null +++ b/_includes/docs/docs/getting-started-guides/scratch.md @@ -0,0 +1,826 @@ + +This guide is for people who want to craft a custom Kubernetes cluster. If you +can find an existing Getting Started Guide that meets your needs on [this +list](/{{page.version}}/docs/getting-started-guides/README/), then we recommend using it, as you will be able to benefit +from the experience of others. However, if you have specific IaaS, networking, +configuration management, or operating system requirements not met by any of +those guides, then this guide will provide an outline of the steps you need to +take. Note that it requires considerably more effort than using one of the +pre-defined guides. + +This guide is also useful for those wanting to understand at a high level some of the +steps that existing cluster setup scripts are making. + +* TOC +{:toc} + +## Designing and Preparing + +### Learning + + 1. You should be familiar with using Kubernetes already. We suggest you set + up a temporary cluster by following one of the other Getting Started Guides. + This will help you become familiar with the CLI ([kubectl](/{{page.version}}/docs/user-guide/kubectl/kubectl)) and concepts ([pods](/{{page.version}}/docs/user-guide/pods), [services](/{{page.version}}/docs/user-guide/services), etc.) first. + 1. You should have `kubectl` installed on your desktop. This will happen as a side + effect of completing one of the other Getting Started Guides. If not, follow the instructions + [here](/{{page.version}}/docs/user-guide/prereqs). + +### Cloud Provider + +Kubernetes has the concept of a Cloud Provider, which is a module which provides +an interface for managing TCP Load Balancers, Nodes (Instances) and Networking Routes. +The interface is defined in `pkg/cloudprovider/cloud.go`. It is possible to +create a custom cluster without implementing a cloud provider (for example if using +bare-metal), and not all parts of the interface need to be implemented, depending +on how flags are set on various components. + +### Nodes + +- You can use virtual or physical machines. +- While you can build a cluster with 1 machine, in order to run all the examples and tests you + need at least 4 nodes. +- Many Getting-started-guides make a distinction between the master node and regular nodes. This + is not strictly necessary. +- Nodes will need to run some version of Linux with the x86_64 architecture. It may be possible + to run on other OSes and Architectures, but this guide does not try to assist with that. +- Apiserver and etcd together are fine on a machine with 1 core and 1GB RAM for clusters with 10s of nodes. + Larger or more active clusters may benefit from more cores. +- Other nodes can have any reasonable amount of memory and any number of cores. They need not + have identical configurations. + +### Network + +Kubernetes has a distinctive [networking model](/{{page.version}}/docs/admin/networking). + +Kubernetes allocates an IP address to each pod. When creating a cluster, you +need to allocate a block of IPs for Kubernetes to use as Pod IPs. The simplest +approach is to allocate a different block of IPs to each node in the cluster as +the node is added. A process in one pod should be able to communicate with +another pod using the IP of the second pod. This connectivity can be +accomplished in two ways: + +- Configure network to route Pod IPs + - Harder to setup from scratch. + - Google Compute Engine ([GCE](/{{page.version}}/docs/getting-started-guides/gce)) and [AWS](/{{page.version}}/docs/getting-started-guides/aws) guides use this approach. + - Need to make the Pod IPs routable by programming routers, switches, etc. + - Can be configured external to Kubernetes, or can implement in the "Routes" interface of a Cloud Provider module. + - Generally highest performance. +- Create an Overlay network + - Easier to setup + - Traffic is encapsulated, so per-pod IPs are routable. + - Examples: + - [Flannel](https://github.com/coreos/flannel) + - [Weave](http://weave.works/) + - [Open vSwitch (OVS)](http://openvswitch.org/) + - Does not require "Routes" portion of Cloud Provider module. + - Reduced performance (exactly how much depends on your solution). + +You need to select an address range for the Pod IPs. + +- Various approaches: + - GCE: each project has its own `10.0.0.0/8`. Carve off a `/16` for each + Kubernetes cluster from that space, which leaves room for several clusters. + Each node gets a further subdivision of this space. + - AWS: use one VPC for whole organization, carve off a chunk for each + cluster, or use different VPC for different clusters. + - IPv6 is not supported yet. +- Allocate one CIDR subnet for each node's PodIPs, or a single large CIDR + from which smaller CIDRs are automatically allocated to each node (if nodes + are dynamically added). + - You need max-pods-per-node * max-number-of-nodes IPs in total. A `/24` per + node supports 254 pods per machine and is a common choice. If IPs are + scarce, a `/26` (62 pods per machine) or even a `/27` (30 pods) may be sufficient. + - e.g. use `10.10.0.0/16` as the range for the cluster, with up to 256 nodes + using `10.10.0.0/24` through `10.10.255.0/24`, respectively. + - Need to make these routable or connect with overlay. + +Kubernetes also allocates an IP to each [service](/{{page.version}}/docs/user-guide/services). However, +service IPs do not necessarily need to be routable. The kube-proxy takes care +of translating Service IPs to Pod IPs before traffic leaves the node. You do +need to Allocate a block of IPs for services. Call this +`SERVICE_CLUSTER_IP_RANGE`. For example, you could set +`SERVICE_CLUSTER_IP_RANGE="10.0.0.0/16"`, allowing 65534 distinct services to +be active at once. Note that you can grow the end of this range, but you +cannot move it without disrupting the services and pods that already use it. + +Also, you need to pick a static IP for master node. +- Call this `MASTER_IP`. +- Open any firewalls to allow access to the apiserver ports 80 and/or 443. +- Enable ipv4 forwarding sysctl, `net.ipv4.ip_forward = 1` + +### Cluster Naming + +You should pick a name for your cluster. Pick a short name for each cluster +which is unique from future cluster names. This will be used in several ways: + + - by kubectl to distinguish between various clusters you have access to. You will probably want a + second one sometime later, such as for testing new Kubernetes releases, running in a different +region of the world, etc. + - Kubernetes clusters can create cloud provider resources (e.g. AWS ELBs) and different clusters + need to distinguish which resources each created. Call this `CLUSTERNAME`. + +### Software Binaries + +You will need binaries for: + + - etcd + - A container runner, one of: + - docker + - rkt + - Kubernetes + - kubelet + - kube-proxy + - kube-apiserver + - kube-controller-manager + - kube-scheduler + +#### Downloading and Extracting Kubernetes Binaries + +A Kubernetes binary release includes all the Kubernetes binaries as well as the supported release of etcd. +You can use a Kubernetes binary release (recommended) or build your Kubernetes binaries following the instructions in the +[Developer Documentation](/{{page.version}}/docs/devel/). Only using a binary release is covered in this guide. + +Download the [latest binary release](https://github.com/kubernetes/kubernetes/releases/latest) and unzip it. +Then locate `./kubernetes/server/kubernetes-server-linux-amd64.tar.gz` and unzip *that*. +Then, within the second set of unzipped files, locate `./kubernetes/server/bin`, which contains +all the necessary binaries. + +#### Selecting Images + +You will run docker, kubelet, and kube-proxy outside of a container, the same way you would run any system daemon, so +you just need the bare binaries. For etcd, kube-apiserver, kube-controller-manager, and kube-scheduler, +we recommend that you run these as containers, so you need an image to be built. + +You have several choices for Kubernetes images: + +- Use images hosted on Google Container Registry (GCR): + - e.g `gcr.io/google_containers/hyperkube:$TAG`, where `TAG` is the latest + release tag, which can be found on the [latest releases page](https://github.com/kubernetes/kubernetes/releases/latest). + - Ensure $TAG is the same tag as the release tag you are using for kubelet and kube-proxy. + - The [hyperkube](https://releases.k8s.io/{{page.githubbranch}}/cmd/hyperkube) binary is an all in one binary + - `hyperkube kubelet ...` runs the kublet, `hyperkube apiserver ...` runs an apiserver, etc. +- Build your own images. + - Useful if you are using a private registry. + - The release contains files such as `./kubernetes/server/bin/kube-apiserver.tar` which + can be converted into docker images using a command like + `docker load -i kube-apiserver.tar` + - You can verify if the image is loaded successfully with the right repository and tag using + command like `docker images` + +For etcd, you can: + +- Use images hosted on Google Container Registry (GCR), such as `gcr.io/google_containers/etcd:2.0.12` +- Use images hosted on [Docker Hub](https://hub.docker.com/search/?q=etcd) or [Quay.io](https://quay.io/repository/coreos/etcd), such as `quay.io/coreos/etcd:v2.2.0` +- Use etcd binary included in your OS distro. +- Build your own image + - You can do: `cd kubernetes/cluster/images/etcd; make` + +We recommend that you use the etcd version which is provided in the Kubernetes binary distribution. The Kubernetes binaries in the release +were tested extensively with this version of etcd and not with any other version. +The recommended version number can also be found as the value of `ETCD_VERSION` in `kubernetes/cluster/images/etcd/Makefile`. + +The remainder of the document assumes that the image identifiers have been chosen and stored in corresponding env vars. Examples (replace with latest tags and appropriate registry): + + - `HYPERKUBE_IMAGE==gcr.io/google_containers/hyperkube:$TAG` + - `ETCD_IMAGE=gcr.io/google_containers/etcd:$ETCD_VERSION` + +### Security Models + +There are two main options for security: + +- Access the apiserver using HTTP. + - Use a firewall for security. + - This is easier to setup. +- Access the apiserver using HTTPS + - Use https with certs, and credentials for user. + - This is the recommended approach. + - Configuring certs can be tricky. + +If following the HTTPS approach, you will need to prepare certs and credentials. + +#### Preparing Certs + +You need to prepare several certs: + +- The master needs a cert to act as an HTTPS server. +- The kubelets optionally need certs to identify themselves as clients of the master, and when + serving its own API over HTTPS. + +Unless you plan to have a real CA generate your certs, you will need to generate a root cert and use that to sign the master, kubelet, and kubectl certs: + +- See function `create-certs` in `cluster/gce/util.sh` +- See also `cluster/saltbase/salt/generate-cert/make-ca-cert.sh` and + `cluster/saltbase/salt/generate-cert/make-cert.sh` + +You will end up with the following files (we will use these variables later on): + +- `CA_CERT` + - put in on node where apiserver runs, in e.g. `/srv/kubernetes/ca.crt`. +- `MASTER_CERT` + - signed by CA_CERT + - put in on node where apiserver runs, in e.g. `/srv/kubernetes/server.crt` +- `MASTER_KEY ` + - put in on node where apiserver runs, in e.g. `/srv/kubernetes/server.key` +- `KUBELET_CERT` + - optional +- `KUBELET_KEY` + - optional + +#### Preparing Credentials + +The admin user (and any users) need: + + - a token or a password to identify them. + - tokens are just long alphanumeric strings, e.g. 32 chars. See + - `TOKEN=$(dd if=/dev/urandom bs=128 count=1 2>/dev/null | base64 | tr -d "=+/" | dd bs=32 count=1 2>/dev/null)` + +Your tokens and passwords need to be stored in a file for the apiserver +to read. This guide uses `/var/lib/kube-apiserver/known_tokens.csv`. +The format for this file is described in the [authentication documentation](/{{page.version}}/docs/admin/authentication). + +For distributing credentials to clients, the convention in Kubernetes is to put the credentials +into a [kubeconfig file](/{{page.version}}/docs/user-guide/kubeconfig-file). + +The kubeconfig file for the administrator can be created as follows: + + - If you have already used Kubernetes with a non-custom cluster (for example, used a Getting Started + Guide), you will already have a `$HOME/.kube/config` file. + - You need to add certs, keys, and the master IP to the kubeconfig file: + - If using the firewall-only security option, set the apiserver this way: + - `kubectl config set-cluster $CLUSTER_NAME --server=http://$MASTER_IP --insecure-skip-tls-verify=true` + - Otherwise, do this to set the apiserver ip, client certs, and user credentials. + - `kubectl config set-cluster $CLUSTER_NAME --certificate-authority=$CA_CERT --embed-certs=true --server=https://$MASTER_IP` + - `kubectl config set-credentials $USER --client-certificate=$CLI_CERT --client-key=$CLI_KEY --embed-certs=true --token=$TOKEN` + - Set your cluster as the default cluster to use: + - `kubectl config set-context $CONTEXT_NAME --cluster=$CLUSTER_NAME --user=$USER` + - `kubectl config use-context $CONTEXT_NAME` + +Next, make a kubeconfig file for the kubelets and kube-proxy. There are a couple of options for how +many distinct files to make: + + 1. Use the same credential as the admin + - This is simplest to setup. + 1. One token and kubeconfig file for all kubelets, one for all kube-proxy, one for admin. + - This mirrors what is done on GCE today + 1. Different credentials for every kubelet, etc. + - We are working on this but all the pieces are not ready yet. + +You can make the files by copying the `$HOME/.kube/config`, by following the code +in `cluster/gce/configure-vm.sh` or by using the following template: + +```yaml +apiVersion: v1 +kind: Config +users: +- name: kubelet + user: + token: ${KUBELET_TOKEN} +clusters: +- name: local + cluster: + certificate-authority-data: ${CA_CERT_BASE64_ENCODED} +contexts: +- context: + cluster: local + user: kubelet + name: service-account-context +current-context: service-account-context +``` + +Put the kubeconfig(s) on every node. The examples later in this +guide assume that there are kubeconfigs in `/var/lib/kube-proxy/kubeconfig` and +`/var/lib/kubelet/kubeconfig`. + +## Configuring and Installing Base Software on Nodes + +This section discusses how to configure machines to be Kubernetes nodes. + +You should run three daemons on every node: + + - docker or rkt + - kubelet + - kube-proxy + +You will also need to do assorted other configuration on top of a +base OS install. + +Tip: One possible starting point is to setup a cluster using an existing Getting +Started Guide. After getting a cluster running, you can then copy the init.d scripts or systemd unit files from that +cluster, and then modify them for use on your custom cluster. + +### Docker + +The minimum required Docker version will vary as the kubelet version changes. The newest stable release is a good choice. Kubelet will log a warning and refuse to start pods if the version is too old, so pick a version and try it. + +If you previously had Docker installed on a node without setting Kubernetes-specific +options, you may have a Docker-created bridge and iptables rules. You may want to remove these +as follows before proceeding to configure Docker for Kubernetes. + +```shell +iptables -t nat -F +ifconfig docker0 down +brctl delbr docker0 +``` + +The way you configure docker will depend in whether you have chosen the routable-vip or overlay-network approaches for your network. +Some suggested docker options: + + - create your own bridge for the per-node CIDR ranges, call it cbr0, and set `--bridge=cbr0` option on docker. + - set `--iptables=false` so docker will not manipulate iptables for host-ports (too coarse on older docker versions, may be fixed in newer versions) +so that kube-proxy can manage iptables instead of docker. + - `--ip-masq=false` + - if you have setup PodIPs to be routable, then you want this false, otherwise, docker will + rewrite the PodIP source-address to a NodeIP. + - some environments (e.g. GCE) still need you to masquerade out-bound traffic when it leaves the cloud environment. This is very environment specific. + - if you are using an overlay network, consult those instructions. + - `--mtu=` + - may be required when using Flannel, because of the extra packet size due to udp encapsulation + - `--insecure-registry $CLUSTER_SUBNET` + - to connect to a private registry, if you set one up, without using SSL. + +You may want to increase the number of open files for docker: + + - `DOCKER_NOFILE=1000000` + +Where this config goes depends on your node OS. For example, GCE's Debian-based distro uses `/etc/default/docker`. + +Ensure docker is working correctly on your system before proceeding with the rest of the +installation, by following examples given in the Docker documentation. + +### rkt + +[rkt](https://github.com/coreos/rkt) is an alternative to Docker. You only need to install one of Docker or rkt. +The minimum version required is [v0.5.6](https://github.com/coreos/rkt/releases/tag/v0.5.6). + +[systemd](http://www.freedesktop.org/wiki/Software/systemd/) is required on your node to run rkt. The +minimum version required to match rkt v0.5.6 is +[systemd 215](http://lists.freedesktop.org/archives/systemd-devel/2014-July/020903). + +[rkt metadata service](https://github.com/coreos/rkt/blob/master/Documentation/networking.md) is also required +for rkt networking support. You can start rkt metadata service by using command like +`sudo systemd-run rkt metadata-service` + +Then you need to configure your kubelet with flag: + + - `--container-runtime=rkt` + +### kubelet + +All nodes should run kubelet. See [Selecting Binaries](#selecting-binaries). + +Arguments to consider: + + - If following the HTTPS security approach: + - `--api-servers=https://$MASTER_IP` + - `--kubeconfig=/var/lib/kubelet/kubeconfig` + - Otherwise, if taking the firewall-based security approach + - `--api-servers=http://$MASTER_IP` + - `--config=/etc/kubernetes/manifests` + - `--cluster-dns=` to the address of the DNS server you will setup (see [Starting Addons](#starting-addons).) + - `--cluster-domain=` to the dns domain prefix to use for cluster DNS addresses. + - `--docker-root=` + - `--root-dir=` + - `--configure-cbr0=` (described above) + - `--register-node` (described in [Node](/{{page.version}}/docs/admin/node) documentation.) + +### kube-proxy + +All nodes should run kube-proxy. (Running kube-proxy on a "master" node is not +strictly required, but being consistent is easier.) Obtain a binary as described for +kubelet. + +Arguments to consider: + + - If following the HTTPS security approach: + - `--api-servers=https://$MASTER_IP` + - `--kubeconfig=/var/lib/kube-proxy/kubeconfig` + - Otherwise, if taking the firewall-based security approach + - `--api-servers=http://$MASTER_IP` + +### Networking + +Each node needs to be allocated its own CIDR range for pod networking. +Call this `NODE_X_POD_CIDR`. + +A bridge called `cbr0` needs to be created on each node. The bridge is explained +further in the [networking documentation](/{{page.version}}/docs/admin/networking). The bridge itself +needs an address from `$NODE_X_POD_CIDR` - by convention the first IP. Call +this `NODE_X_BRIDGE_ADDR`. For example, if `NODE_X_POD_CIDR` is `10.0.0.0/16`, +then `NODE_X_BRIDGE_ADDR` is `10.0.0.1/16`. NOTE: this retains the `/16` suffix +because of how this is used later. + +Recommended, automatic approach: + + 1. Set `--configure-cbr0=true` option in kubelet init script and restart kubelet service. Kubelet will configure cbr0 automatically. + It will wait to do this until the node controller has set Node.Spec.PodCIDR. Since you have not setup apiserver and node controller + yet, the bridge will not be setup immediately. + +Alternate, manual approach: + + 1. Set `--configure-cbr0=false` on kubelet and restart. + 1. Create a bridge + - e.g. `brctl addbr cbr0`. + 1. Set appropriate MTU + - `ip link set dev cbr0 mtu 1460` (NOTE: the actual value of MTU will depend on your network environment) + 1. Add the clusters network to the bridge (docker will go on other side of bridge). + - e.g. `ip addr add $NODE_X_BRIDGE_ADDR dev eth0` + 1. Turn it on + - e.g. `ip link set dev cbr0 up` + +If you have turned off Docker's IP masquerading to allow pods to talk to each +other, then you may need to do masquerading just for destination IPs outside +the cluster network. For example: + +```shell +iptables -w -t nat -A POSTROUTING -o eth0 -j MASQUERADE \! -d ${CLUSTER_SUBNET} +``` + +This will rewrite the source address from +the PodIP to the Node IP for traffic bound outside the cluster, and kernel +[connection tracking](http://www.iptables.info/en/connection-state) +will ensure that responses destined to the node still reach +the pod. + +NOTE: This is environment specific. Some environments will not need +any masquerading at all. Others, such as GCE, will not allow pod IPs to send +traffic to the internet, but have no problem with them inside your GCE Project. + +### Other + +- Enable auto-upgrades for your OS package manager, if desired. +- Configure log rotation for all node components (e.g. using [logrotate](http://linux.die.net/man/8/logrotate)). +- Setup liveness-monitoring (e.g. using [supervisord](http://supervisord.org/)). +- Setup volume plugin support (optional) + - Install any client binaries for optional volume types, such as `glusterfs-client` for GlusterFS + volumes. + +### Using Configuration Management + +The previous steps all involved "conventional" system administration techniques for setting up +machines. You may want to use a Configuration Management system to automate the node configuration +process. There are examples of [Saltstack](/{{page.version}}/docs/admin/salt), Ansible, Juju, and CoreOS Cloud Config in the +various Getting Started Guides. + +## Bootstrapping the Cluster + +While the basic node services (kubelet, kube-proxy, docker) are typically started and managed using +traditional system administration/automation approaches, the remaining *master* components of Kubernetes are +all configured and managed *by Kubernetes*: + + - their options are specified in a Pod spec (yaml or json) rather than an /etc/init.d file or + systemd unit. + - they are kept running by Kubernetes rather than by init. + +### etcd + +You will need to run one or more instances of etcd. + + - Recommended approach: run one etcd instance, with its log written to a directory backed + by durable storage (RAID, GCE PD) + - Alternative: run 3 or 5 etcd instances. + - Log can be written to non-durable storage because storage is replicated. + - run a single apiserver which connects to one of the etc nodes. + +See [cluster-troubleshooting](/{{page.version}}/docs/admin/cluster-troubleshooting) for more discussion on factors affecting cluster +availability. + +To run an etcd instance: + +1. copy `cluster/saltbase/salt/etcd/etcd.manifest` +1. make any modifications needed +1. start the pod by putting it into the kubelet manifest directory + +### Apiserver, Controller Manager, and Scheduler + +The apiserver, controller manager, and scheduler will each run as a pod on the master node. + +For each of these components, the steps to start them running are similar: + +1. Start with a provided template for a pod. +1. Set the `HYPERKUBE_IMAGE` to the values chosen in [Selecting Images](#selecting-images). +1. Determine which flags are needed for your cluster, using the advice below each template. +1. Set the flags to be individual strings in the command array (e.g. $ARGN below) +1. Start the pod by putting the completed template into the kubelet manifest directory. +1. Verify that the pod is started. + +#### Apiserver pod template + +```json +{ + "kind": "Pod", + "apiVersion": "v1", + "metadata": { + "name": "kube-apiserver" + }, + "spec": { + "hostNetwork": true, + "containers": [ + { + "name": "kube-apiserver", + "image": "${HYPERKUBE_IMAGE}", + "command": [ + "/hyperkube", + "apiserver", + "$ARG1", + "$ARG2", + ... + "$ARGN" + ], + "ports": [ + { + "name": "https", + "hostPort": 443, + "containerPort": 443 + }, + { + "name": "local", + "hostPort": 8080, + "containerPort": 8080 + } + ], + "volumeMounts": [ + { + "name": "srvkube", + "mountPath": "/srv/kubernetes", + "readOnly": true + }, + { + "name": "etcssl", + "mountPath": "/etc/ssl", + "readOnly": true + } + ], + "livenessProbe": { + "httpGet": { + "path": "/healthz", + "port": 8080 + }, + "initialDelaySeconds": 15, + "timeoutSeconds": 15 + } + } + ], + "volumes": [ + { + "name": "srvkube", + "hostPath": { + "path": "/srv/kubernetes" + } + }, + { + "name": "etcssl", + "hostPath": { + "path": "/etc/ssl" + } + } + ] + } +} +``` + +Here are some apiserver flags you may need to set: + +- `--cloud-provider=` see [cloud providers](#cloud-providers) +- `--cloud-config=` see [cloud providers](#cloud-providers) +- `--address=${MASTER_IP}` *or* `--bind-address=127.0.0.1` and `--address=127.0.0.1` if you want to run a proxy on the master node. +- `--cluster-name=$CLUSTER_NAME` +- `--service-cluster-ip-range=$SERVICE_CLUSTER_IP_RANGE` +- `--etcd-servers=http://127.0.0.1:4001` +- `--tls-cert-file=/srv/kubernetes/server.cert` +- `--tls-private-key-file=/srv/kubernetes/server.key` +- `--admission-control=$RECOMMENDED_LIST` + - See [admission controllers](/{{page.version}}/docs/admin/admission-controllers) for recommended arguments. +- `--allow-privileged=true`, only if you trust your cluster user to run pods as root. + +If you are following the firewall-only security approach, then use these arguments: + +- `--token-auth-file=/dev/null` +- `--insecure-bind-address=$MASTER_IP` +- `--advertise-address=$MASTER_IP` + +If you are using the HTTPS approach, then set: +- `--client-ca-file=/srv/kubernetes/ca.crt` +- `--token-auth-file=/srv/kubernetes/known_tokens.csv` +- `--basic-auth-file=/srv/kubernetes/basic_auth.csv` + +This pod mounts several node file system directories using the `hostPath` volumes. Their purposes are: + +- The `/etc/ssl` mount allows the apiserver to find the SSL root certs so it can + authenticate external services, such as a cloud provider. + - This is not required if you do not use a cloud provider (e.g. bare-metal). +- The `/srv/kubernetes` mount allows the apiserver to read certs and credentials stored on the + node disk. These could instead be stored on a persistent disk, such as a GCE PD, or baked into the image. +- Optionally, you may want to mount `/var/log` as well and redirect output there (not shown in template). + - Do this if you prefer your logs to be accessible from the root filesystem with tools like journalctl. + +*TODO* document proxy-ssh setup. + +##### Cloud Providers + +Apiserver supports several cloud providers. + +- options for `--cloud-provider` flag are `aws`, `gce`, `mesos`, `openshift`, `ovirt`, `rackspace`, `vagrant`, or unset. +- unset used for e.g. bare metal setups. +- support for new IaaS is added by contributing code [here](https://releases.k8s.io/{{page.githubbranch}}/pkg/cloudprovider/providers) + +Some cloud providers require a config file. If so, you need to put config file into apiserver image or mount through hostPath. + +- `--cloud-config=` set if cloud provider requires a config file. +- Used by `aws`, `gce`, `mesos`, `openshift`, `ovirt` and `rackspace`. +- You must put config file into apiserver image or mount through hostPath. +- Cloud config file syntax is [Gcfg](https://code.google.com/p/gcfg/). +- AWS format defined by type [AWSCloudConfig](https://releases.k8s.io/{{page.githubbranch}}/pkg/cloudprovider/providers/aws/aws.go) +- There is a similar type in the corresponding file for other cloud providers. +- GCE example: search for `gce.conf` in [this file](https://releases.k8s.io/{{page.githubbranch}}/cluster/gce/configure-vm.sh) + +#### Scheduler pod template + +Complete this template for the scheduler pod: + +```json +{ + "kind": "Pod", + "apiVersion": "v1", + "metadata": { + "name": "kube-scheduler" + }, + "spec": { + "hostNetwork": true, + "containers": [ + { + "name": "kube-scheduler", + "image": "$HYBERKUBE_IMAGE", + "command": [ + "/hyperkube", + "scheduler", + "--master=127.0.0.1:8080", + "$SCHEDULER_FLAG1", + ... + "$SCHEDULER_FLAGN" + ], + "livenessProbe": { + "httpGet": { + "host" : "127.0.0.1", + "path": "/healthz", + "port": 10251 + }, + "initialDelaySeconds": 15, + "timeoutSeconds": 15 + } + } + ] + } +} +``` + +Typically, no additional flags are required for the scheduler. + +Optionally, you may want to mount `/var/log` as well and redirect output there. + +#### Controller Manager Template + +Template for controller manager pod: + +```json +{ + "kind": "Pod", + "apiVersion": "v1", + "metadata": { + "name": "kube-controller-manager" + }, + "spec": { + "hostNetwork": true, + "containers": [ + { + "name": "kube-controller-manager", + "image": "$HYPERKUBE_IMAGE", + "command": [ + "/hyperkube", + "controller-manager", + "$CNTRLMNGR_FLAG1", + ... + "$CNTRLMNGR_FLAGN" + ], + "volumeMounts": [ + { + "name": "srvkube", + "mountPath": "/srv/kubernetes", + "readOnly": true + }, + { + "name": "etcssl", + "mountPath": "/etc/ssl", + "readOnly": true + } + ], + "livenessProbe": { + "httpGet": { + "host": "127.0.0.1", + "path": "/healthz", + "port": 10252 + }, + "initialDelaySeconds": 15, + "timeoutSeconds": 15 + } + } + ], + "volumes": [ + { + "name": "srvkube", + "hostPath": { + "path": "/srv/kubernetes" + } + }, + { + "name": "etcssl", + "hostPath": { + "path": "/etc/ssl" + } + } + ] + } +} +``` + +Flags to consider using with controller manager: + + - `--cluster-name=$CLUSTER_NAME` + - `--cluster-cidr=` + - *TODO*: explain this flag. + - `--allocate-node-cidrs=` + - *TODO*: explain when you want controller to do this and when you want to do it another way. + - `--cloud-provider=` and `--cloud-config` as described in apiserver section. + - `--service-account-private-key-file=/srv/kubernetes/server.key`, used by the [service account](/{{page.version}}/docs/user-guide/service-accounts) feature. + - `--master=127.0.0.1:8080` + +#### Starting and Verifying Apiserver, Scheduler, and Controller Manager + +Place each completed pod template into the kubelet config dir +(whatever `--config=` argument of kubelet is set to, typically +`/etc/kubernetes/manifests`). The order does not matter: scheduler and +controller manager will retry reaching the apiserver until it is up. + +Use `ps` or `docker ps` to verify that each process has started. For example, verify that kubelet has started a container for the apiserver like this: + +```shell +$ sudo docker ps | grep apiserver: +5783290746d5 gcr.io/google_containers/kube-apiserver:e36bf367342b5a80d7467fd7611ad873 "/bin/sh -c '/usr/lo'" 10 seconds ago Up 9 seconds k8s_kube-apiserver.feb145e7_kube-apiserver-kubernetes-master_default_eaebc600cf80dae59902b44225f2fc0a_225a4695 +``` + +Then try to connect to the apiserver: + +```shell +$ echo $(curl -s http://localhost:8080/healthz) +ok +$ curl -s http://localhost:8080/api +{ + "versions": [ + "v1" + ] +} +``` + +If you have selected the `--register-node=true` option for kubelets, they will now begin self-registering with the apiserver. +You should soon be able to see all your nodes by running the `kubectl get nodes` command. +Otherwise, you will need to manually create node objects. + +### Logging + +**TODO** talk about starting Logging. + +### Monitoring + +**TODO** talk about starting Monitoring. + +### DNS + +**TODO** talk about starting DNS. + +## Troubleshooting + +### Running validate-cluster + +**TODO** explain how to use `cluster/validate-cluster.sh` + +### Inspect pods and services + +Try to run through the "Inspect your cluster" section in one of the other Getting Started Guides, such as [GCE](/{{page.version}}/docs/getting-started-guides/gce/#inspect-your-cluster). +You should see some services. You should also see "mirror pods" for the apiserver, scheduler and controller-manager, plus any add-ons you started. + +### Try Examples + +At this point you should be able to run through one of the basic examples, such as the [nginx example](/{{page.version}}/examples/simple-nginx). + +### Running the Conformance Test + +You may want to try to run the [Conformance test](http://releases.k8s.io/{{page.githubbranch}}/hack/conformance-test.sh). Any failures may give a hint as to areas that need more attention. + +### Networking + +The nodes must be able to connect to each other using their private IP. Verify this by +pinging or SSH-ing from one node to another. + +### Getting Help + +If you run into trouble, please see the section on [troubleshooting](/{{page.version}}/docs/getting-started-guides/gce#troubleshooting), post to the +[google-containers group](https://groups.google.com/forum/#!forum/google-containers), or come ask questions on [Slack](/{{page.version}}/docs/troubleshooting#slack). \ No newline at end of file diff --git a/_includes/docs/docs/getting-started-guides/ubuntu-calico.md b/_includes/docs/docs/getting-started-guides/ubuntu-calico.md new file mode 100644 index 0000000000..5084829107 --- /dev/null +++ b/_includes/docs/docs/getting-started-guides/ubuntu-calico.md @@ -0,0 +1,271 @@ + +This document describes how to deploy Kubernetes on Ubuntu bare metal nodes with Calico Networking plugin. See [projectcalico.org](http://projectcalico.org) for more information on what Calico is, and [the calicoctl github](https://github.com/projectcalico/calico-docker) for more information on the command-line tool, `calicoctl`. + +This guide will set up a simple Kubernetes cluster with a master and two nodes. We will start the following processes with systemd: + +On the Master: + +- `etcd` +- `kube-apiserver` +- `kube-controller-manager` +- `kube-scheduler` +- `calico-node` + +On each Node: + +- `kube-proxy` +- `kube-kubelet` +- `calico-node` + +## Prerequisites + +1. This guide uses `systemd` and thus uses Ubuntu 15.04 which supports systemd natively. +2. All machines should have the latest docker stable version installed. At the time of writing, that is Docker 1.7.0. + - To install docker, follow [these instructions](https://docs.docker.com/installation/ubuntulinux/) +3. All hosts should be able to communicate with each other, as well as the internet, to download the necessary files. +4. This demo assumes that none of the hosts have been configured with any Kubernetes or Calico software yet. + +## Setup Master + +First, get the sample configurations for this tutorial + +```shell +wget https://github.com/Metaswitch/calico-kubernetes-ubuntu-demo/archive/master.tar.gz +tar -xvf master.tar.gz +``` + +### Setup environment variables for systemd services on Master + +Many of the sample systemd services provided rely on environment variables on a per-node basis. Here we'll edit those environment variables and move them into place. + +1.) Copy the network-environment-template from the `master` directory for editing. + +```shell +cp calico-kubernetes-ubuntu-demo-master/master/network-environment-template network-environment +``` + +2.) Edit `network-environment` to represent your current host's settings. + +3.) Move the `network-environment` into `/etc` + +```shell +sudo mv -f network-environment /etc +``` + +### Install Kubernetes on Master + +1.) Build & Install Kubernetes binaries + +```shell +# Get the Kubernetes Source +wget https://github.com/kubernetes/kubernetes/releases/download/v1.0.3/kubernetes.tar.gz + +# Untar it +tar -xf kubernetes.tar.gz +tar -xf kubernetes/server/kubernetes-server-linux-amd64.tar.gz +kubernetes/cluster/ubuntu/build.sh + +# Add binaries to /usr/bin +sudo cp -f binaries/master/* /usr/bin +sudo cp -f binaries/kubectl /usr/bin +``` + +2.) Install the sample systemd processes settings for launching kubernetes services + +```shell +sudo cp -f calico-kubernetes-ubuntu-demo-master/master/*.service /etc/systemd +sudo systemctl enable /etc/systemd/etcd.service +sudo systemctl enable /etc/systemd/kube-apiserver.service +sudo systemctl enable /etc/systemd/kube-controller-manager.service +sudo systemctl enable /etc/systemd/kube-scheduler.service +``` + +3.) Launch the processes. + +```shell +sudo systemctl start etcd.service +sudo systemctl start kube-apiserver.service +sudo systemctl start kube-controller-manager.service +sudo systemctl start kube-scheduler.service +``` + +### Install Calico on Master + +In order to allow the master to route to pods on our nodes, we will launch the calico-node daemon on our master. This will allow it to learn routes over BGP from the other calico-node daemons in the cluster. The docker daemon should already be running before calico is started. + +```shell +# Install the calicoctl binary, which will be used to launch calico +wget https://github.com/projectcalico/calico-docker/releases/download/v0.5.5/calicoctl +chmod +x calicoctl +sudo cp -f calicoctl /usr/bin + +# Install and start the calico service +sudo cp -f calico-kubernetes-ubuntu-demo-master/master/calico-node.service /etc/systemd +sudo systemctl enable /etc/systemd/calico-node.service +sudo systemctl start calico-node.service +``` + +> Note: calico-node may take a few minutes on first boot while it downloads the calico-node docker image. + +## Setup Nodes + +Perform these steps **once on each node**, ensuring you appropriately set the environment variables on each node + +### Setup environment variables for systemd services on the Node + +1.) Get the sample configurations for this tutorial + +```shell +wget https://github.com/Metaswitch/calico-kubernetes-ubuntu-demo/archive/master.tar.gz +tar -xvf master.tar.gz +``` + +2.) Copy the network-environment-template from the `node` directory + +```shell +cp calico-kubernetes-ubuntu-demo-master/node/network-environment-template network-environment +``` + +3.) Edit `network-environment` to represent your current host's settings. + +4.) Move `network-environment` into `/etc` + +```shell +sudo mv -f network-environment /etc +``` + +### Configure Docker on the Node + +#### Create the veth + +Instead of using docker's default interface (docker0), we will configure a new one to use desired IP ranges + +```shell +sudo apt-get install -y bridge-utils +sudo brctl addbr cbr0 +sudo ifconfig cbr0 up +sudo ifconfig cbr0 /24 +``` + +> Replace \ with the subnet for this host's containers. Example topology: + + Node | cbr0 IP +------- | ------------- +node-1 | 192.168.1.1/24 +node-2 | 192.168.2.1/24 +node-X | 192.168.X.1/24 + +#### Start docker on cbr0 + +The Docker daemon must be started and told to use the already configured cbr0 instead of using the usual docker0, as well as disabling ip-masquerading and modification of the ip-tables. + +1.) Edit the ubuntu-15.04 docker.service for systemd at: `/lib/systemd/system/docker.service` + +2.) Find the line that reads `ExecStart=/usr/bin/docker -d -H fd://` and append the following flags: `--bridge=cbr0 --iptables=false --ip-masq=false` + +3.) Reload systemctl and restart docker. + +```shell +sudo systemctl daemon-reload +sudo systemctl restart docker +``` + +### Install Calico on the Node + +1.) Install Calico + +```shell +# Get the calicoctl binary +wget https://github.com/projectcalico/calico-docker/releases/download/v0.5.5/calicoctl +chmod +x calicoctl +sudo cp -f calicoctl /usr/bin + +# Start calico on this node +sudo cp calico-kubernetes-ubuntu-demo-master/node/calico-node.service /etc/systemd +sudo systemctl enable /etc/systemd/calico-node.service +sudo systemctl start calico-node.service +``` + +>The calico-node service will automatically get the kubernetes-calico plugin binary and install it on the host system. + +2.) Use calicoctl to add an IP pool. We must specify the IP and port that the master's etcd is listening on. +**NOTE: This step only needs to be performed once per Kubernetes deployment, as it covers all the node's IP ranges.** + +```shell +ETCD_AUTHORITY=:4001 calicoctl pool add 192.168.0.0/16 +``` + +### Install Kubernetes on the Node + +1.) Build & Install Kubernetes binaries + +```shell +# Get the Kubernetes Source +wget https://github.com/kubernetes/kubernetes/releases/download/v1.0.3/kubernetes.tar.gz + +# Untar it +tar -xf kubernetes.tar.gz +tar -xf kubernetes/server/kubernetes-server-linux-amd64.tar.gz +kubernetes/cluster/ubuntu/build.sh + +# Add binaries to /usr/bin +sudo cp -f binaries/minion/* /usr/bin + +# Get the iptables based kube-proxy reccomended for this demo +wget https://github.com/projectcalico/calico-kubernetes/releases/download/v0.1.1/kube-proxy +sudo cp kube-proxy /usr/bin/ +sudo chmod +x /usr/bin/kube-proxy +``` + +2.) Install and launch the sample systemd processes settings for launching Kubernetes services + +```shell +sudo cp calico-kubernetes-ubuntu-demo-master/node/kube-proxy.service /etc/systemd/ +sudo cp calico-kubernetes-ubuntu-demo-master/node/kube-kubelet.service /etc/systemd/ +sudo systemctl enable /etc/systemd/kube-proxy.service +sudo systemctl enable /etc/systemd/kube-kubelet.service +sudo systemctl start kube-proxy.service +sudo systemctl start kube-kubelet.service +``` + +> *You may want to consider checking their status after to ensure everything is running* + +## Install the DNS Addon + +Most Kubernetes deployments will require the DNS addon for service discovery. For more on DNS service discovery, check [here](https://releases.k8s.io/{{page.githubbranch}}/cluster/addons/dns). + +The config repository for this guide comes with manifest files to start the DNS addon. To install DNS, do the following on your Master node. + +Replace `` in `calico-kubernetes-ubuntu-demo-master/dns/skydns-rc.yaml` with your Master's IP address. Then, create `skydns-rc.yaml` and `skydns-svc.yaml` using `kubectl create -f `. + +## Launch other Services With Calico-Kubernetes + +At this point, you have a fully functioning cluster running on kubernetes with a master and 2 nodes networked with Calico. You can now follow any of the [standard documentation](https://github.com/kubernetes/kubernetes/tree/{{page.githubbranch}}/examples/) to set up other services on your cluster. + +## Connectivity to outside the cluster + +With this sample configuration, because the containers have private `192.168.0.0/16` IPs, you will need NAT to allow connectivity between containers and the internet. However, in a full datacenter deployment, NAT is not always necessary, since Calico can peer with the border routers over BGP. + +### NAT on the nodes + +The simplest method for enabling connectivity from containers to the internet is to use an iptables masquerade rule. This is the standard mechanism [recommended](/{{page.version}}/docs/admin/networking/#google-compute-engine-gce) in the Kubernetes GCE environment. + +We need to NAT traffic that has a destination outside of the cluster. Internal traffic includes the master/nodes, and the container IP pools. A suitable masquerade chain would follow the pattern below, replacing the following variables: + +- `CONTAINER_SUBNET`: The cluster-wide subnet from which container IPs are chosen. All cbr0 bridge subnets fall within this range. The above example uses `192.168.0.0/16`. +- `KUBERNETES_HOST_SUBNET`: The subnet from which Kubernetes node / master IP addresses have been chosen. +- `HOST_INTERFACE`: The interface on the Kubernetes node which is used for external connectivity. The above example uses `eth0` + +```shell +sudo iptables -t nat -N KUBE-OUTBOUND-NAT +sudo iptables -t nat -A KUBE-OUTBOUND-NAT -d -o -j RETURN +sudo iptables -t nat -A KUBE-OUTBOUND-NAT -d -o -j RETURN +sudo iptables -t nat -A KUBE-OUTBOUND-NAT -j MASQUERADE +sudo iptables -t nat -A POSTROUTING -j KUBE-OUTBOUND-NAT +``` + +This chain should be applied on the master and all nodes. In production, these rules should be persisted, e.g. with `iptables-persistent`. + +### NAT at the border router + +In a datacenter environment, it is recommended to configure Calico to peer with the border routers over BGP. This means that the container IPs will be routable anywhere in the datacenter, and so NAT is not needed on the nodes (though it may be enabled at the datacenter edge to allow outbound-only internet connectivity). \ No newline at end of file diff --git a/_includes/docs/docs/getting-started-guides/ubuntu.md b/_includes/docs/docs/getting-started-guides/ubuntu.md new file mode 100644 index 0000000000..6d947e279a --- /dev/null +++ b/_includes/docs/docs/getting-started-guides/ubuntu.md @@ -0,0 +1,259 @@ + +This document describes how to deploy kubernetes on ubuntu nodes, 1 master and 3 nodes involved +in the given examples. You can scale to **any number of nodes** by changing some settings with ease. +The original idea was heavily inspired by @jainvipin 's ubuntu single node +work, which has been merge into this document. + +[Cloud team from Zhejiang University](https://github.com/ZJU-SEL) will maintain this work. + +* TOC +{:toc} + +## Prerequisites + +1. The nodes have installed docker version 1.2+ and bridge-utils to manipulate linux bridge. +2. All machines can communicate with each other. Master node needs to connect the Internet to download the necessary files, while working nodes do not. +3. These guide is tested OK on Ubuntu 14.04 LTS 64bit server, but it can not work with +Ubuntu 15 which use systemd instead of upstart. We are working around fixing this. +4. Dependencies of this guide: etcd-2.0.12, flannel-0.4.0, k8s-1.0.3, may work with higher versions. +5. All the remote servers can be ssh logged in without a password by using key authentication. + + +## Starting a Cluster + +### Download binaries + +First clone the kubernetes github repo + +```shell +$ git clone https://github.com/kubernetes/kubernetes.git +``` + +Then download all the needed binaries into given directory (cluster/ubuntu/binaries) + +```shell +$ cd kubernetes/cluster/ubuntu +$ ./build.sh +``` + +You can customize your etcd version, flannel version, k8s version by changing corresponding variables +`ETCD_VERSION` , `FLANNEL_VERSION` and `KUBE_VERSION` in build.sh, by default etcd version is 2.0.12, +flannel version is 0.4.0 and k8s version is 1.0.3. + +Make sure that the involved binaries are located properly in the binaries/master +or binaries/minion directory before you go ahead to the next step . + +Note that we use flannel here to set up overlay network, yet it's optional. Actually you can build up k8s +cluster natively, or use flannel, Open vSwitch or any other SDN tool you like. + +#### Configure and start the Kubernetes cluster + +An example cluster is listed below: + +| IP Address | Role | +|-------------|----------| +|10.10.103.223| node | +|10.10.103.162| node | +|10.10.103.250| both master and node| + +First configure the cluster information in cluster/ubuntu/config-default.sh, below is a simple sample. + +```shell +export nodes="vcap@10.10.103.250 vcap@10.10.103.162 vcap@10.10.103.223" + +export role="ai i i" + +export NUM_MINIONS=${NUM_MINIONS:-3} + +export SERVICE_CLUSTER_IP_RANGE=192.168.3.0/24 + +export FLANNEL_NET=172.16.0.0/16 +``` + +The first variable `nodes` defines all your cluster nodes, MASTER node comes first and +separated with blank space like ` ` + +Then the `role` variable defines the role of above machine in the same order, "ai" stands for machine +acts as both master and node, "a" stands for master, "i" stands for node. + +The `NUM_MINIONS` variable defines the total number of nodes. + +The `SERVICE_CLUSTER_IP_RANGE` variable defines the kubernetes service IP range. Please make sure +that you do have a valid private ip range defined here, because some IaaS provider may reserve private ips. +You can use below three private network range according to rfc1918. Besides you'd better not choose the one +that conflicts with your own private network range. + +```shell +10.0.0.0 - 10.255.255.255 (10/8 prefix) + +172.16.0.0 - 172.31.255.255 (172.16/12 prefix) + +192.168.0.0 - 192.168.255.255 (192.168/16 prefix) +``` + +The `FLANNEL_NET` variable defines the IP range used for flannel overlay network, +should not conflict with above `SERVICE_CLUSTER_IP_RANGE`. + +**Note:** When deploying, master needs to connect the Internet to download the necessary files. If your machines locate in a private network that need proxy setting to connect the Internet, you can set the config `PROXY_SETTING` in cluster/ubuntu/config-default.sh such as: + +```shell +PROXY_SETTING="http_proxy=http://server:port https_proxy=https://server:port" +``` + +After all the above variables being set correctly, we can use following command in cluster/ directory to bring up the whole cluster. + +```shell +KUBERNETES_PROVIDER=ubuntu ./kube-up.sh +``` + +The scripts automatically scp binaries and config files to all the machines and start the k8s service on them. +The only thing you need to do is to type the sudo password when promoted. + +```shell +Deploying minion on machine 10.10.103.223 +... +[sudo] password to copy files and start minion: +``` + +If all things goes right, you will see the below message from console indicating the k8s is up. + +```shell +Cluster validation succeeded +``` + +### Test it out + +You can use `kubectl` command to check if the newly created k8s is working correctly. +The `kubectl` binary is under the `cluster/ubuntu/binaries` directory. +You can make it available via PATH, then you can use the below command smoothly. + +For example, use `$ kubectl get nodes` to see if all of your nodes are ready. + +```shell +$ kubectl get nodes +NAME LABELS STATUS +10.10.103.162 kubernetes.io/hostname=10.10.103.162 Ready +10.10.103.223 kubernetes.io/hostname=10.10.103.223 Ready +10.10.103.250 kubernetes.io/hostname=10.10.103.250 Ready +``` + +Also you can run Kubernetes [guest-example](https://github.com/kubernetes/kubernetes/tree/{{page.githubbranch}}/examples/guestbook/) to build a redis backend cluster on the k8s. + + +### Deploy addons + +Assuming you have a starting cluster now, this section will tell you how to deploy addons like DNS +and UI onto the existing cluster. + +The configuration of DNS is configured in cluster/ubuntu/config-default.sh. + +```shell +ENABLE_CLUSTER_DNS="${KUBE_ENABLE_CLUSTER_DNS:-true}" + +DNS_SERVER_IP="192.168.3.10" + +DNS_DOMAIN="cluster.local" + +DNS_REPLICAS=1 +``` + +The `DNS_SERVER_IP` is defining the ip of dns server which must be in the `SERVICE_CLUSTER_IP_RANGE`. +The `DNS_REPLICAS` describes how many dns pod running in the cluster. + +By default, we also take care of kube-ui addon. + +```shell +ENABLE_CLUSTER_UI="${KUBE_ENABLE_CLUSTER_UI:-true}" +``` + +After all the above variables have been set, just type the following command. + +```shell +$ cd cluster/ubuntu +$ KUBERNETES_PROVIDER=ubuntu ./deployAddons.sh +``` + +After some time, you can use `$ kubectl get pods --namespace=kube-system` to see the DNS and UI pods are running in the cluster. + +### On going + +We are working on these features which we'd like to let everybody know: + +1. Run kubernetes binaries in Docker using [kube-in-docker](https://github.com/ZJU-SEL/kube-in-docker/tree/baremetal-kube), +to eliminate OS-distro differences. +2. Tearing Down scripts: clear and re-create the whole stack by one click. + +### Trouble shooting + +Generally, what this approach does is quite simple: + +1. Download and copy binaries and configuration files to proper directories on every node +2. Configure `etcd` using IPs based on input from user +3. Create and start flannel network + +So if you encounter a problem, **check etcd configuration first** + +Please try: + +1. Check `/var/log/upstart/etcd.log` for suspicious etcd log +2. Check `/etc/default/etcd`, as we do not have much input validation, a right config should be like: + +```shell +ETCD_OPTS="-name infra1 -initial-advertise-peer-urls -listen-peer-urls -initial-cluster-token etcd-cluster-1 -initial-cluster infra1=,infra2=,infra3= -initial-cluster-state new" +``` + +3. You may find following commands useful, the former one to bring down the cluster, while +the latter one could start it again. + +```shell +KUBERNETES_PROVIDER=ubuntu ./kube-down.sh +KUBERNETES_PROVIDER=ubuntu ./kube-up.sh +``` + +4. You can also customize your own settings in `/etc/default/{component_name}`. + + +### Upgrading a Cluster + +If you already have a kubernetes cluster, and want to upgrade to a new version, +you can use following command in cluster/ directory to update the whole cluster or a specified node to a new version. + +```shell +KUBERNETES_PROVIDER=ubuntu ./kube-push.sh [-m|-n ] +``` + +It can be done for all components (by default), master(`-m`) or specified node(`-n`). +If the version is not specified, the script will try to use local binaries.You should ensure all the binaries are well prepared in path `cluster/ubuntu/binaries`. + +```shell +$ tree cluster/ubuntu/binaries +binaries/ +'��'��'�� kubectl +'��'��'�� master +'��   '��'��'�� etcd +'��   '��'��'�� etcdctl +'��   '��'��'�� flanneld +'��   '��'��'�� kube-apiserver +'��   '��'��'�� kube-controller-manager +'��   '��'��'�� kube-scheduler +'��'��'�� minion + '��'��'�� flanneld + '��'��'�� kubelet + '��'��'�� kube-proxy +``` + +Upgrading single node is experimental now. You can use following command to get a help. + +```shell +KUBERNETES_PROVIDER=ubuntu ./kube-push.sh -h +``` + +Some examples are as follows: + +* upgrade master to version 1.0.5: `$ KUBERNETES_PROVIDER=ubuntu ./kube-push.sh -m 1.0.5` +* upgrade node 10.10.103.223 to version 1.0.5 : `$ KUBERNETES_PROVIDER=ubuntu ./kube-push.sh -n 10.10.103.223 1.0.5` +* upgrade master and all nodes to version 1.0.5: `$ KUBERNETES_PROVIDER=ubuntu ./kube-push.sh 1.0.5` + +The script will not delete any resources of your cluster, it just replaces the binaries. +You can use `kubectl` command to check if the newly upgraded k8s is working correctly. +For example, use `$ kubectl get nodes` to see if all of your nodes are ready.Or refer to [test-it-out](/{{page.version}}/docs/getting-started-guides/ubuntu/#test-it-out) \ No newline at end of file diff --git a/_includes/docs/docs/getting-started-guides/vagrant.md b/_includes/docs/docs/getting-started-guides/vagrant.md new file mode 100644 index 0000000000..5358c42852 --- /dev/null +++ b/_includes/docs/docs/getting-started-guides/vagrant.md @@ -0,0 +1,334 @@ + +Running Kubernetes with Vagrant (and VirtualBox) is an easy way to run/test/develop on your local machine (Linux, Mac OS X). + +* TOC +{:toc} + +### Prerequisites + +1. Install latest version >= 1.6.2 of vagrant from http://www.vagrantup.com/downloads.html +2. Install one of: + 1. Version 4.3.28 of Virtual Box from https://www.virtualbox.org/wiki/Download_Old_Builds_4_3 + 2. [VMWare Fusion](https://www.vmware.com/products/fusion/) version 5 or greater as well as the appropriate [Vagrant VMWare Fusion provider](https://www.vagrantup.com/vmware) + 3. [VMWare Workstation](https://www.vmware.com/products/workstation/) version 9 or greater as well as the [Vagrant VMWare Workstation provider](https://www.vagrantup.com/vmware) + 4. [Parallels Desktop](https://www.parallels.com/products/desktop/) version 9 or greater as well as the [Vagrant Parallels provider](https://parallels.github.io/vagrant-parallels/) + 5. libvirt with KVM and enable support of hardware virtualisation. [Vagrant-libvirt](https://github.com/pradels/vagrant-libvirt). For fedora provided official rpm, and possible to use `yum install vagrant-libvirt` + +### Setup + +Setting up a cluster is as simple as running: + +```shell +export KUBERNETES_PROVIDER=vagrant +curl -sS https://get.k8s.io | bash +``` + +Alternatively, you can download [Kubernetes release](https://github.com/kubernetes/kubernetes/releases) and extract the archive. To start your local cluster, open a shell and run: + +```shell +cd kubernetes + +export KUBERNETES_PROVIDER=vagrant +./cluster/kube-up.sh +``` + +The `KUBERNETES_PROVIDER` environment variable tells all of the various cluster management scripts which variant to use. If you forget to set this, the assumption is you are running on Google Compute Engine. + +By default, the Vagrant setup will create a single master VM (called kubernetes-master) and one node (called kubernetes-minion-1). Each VM will take 1 GB, so make sure you have at least 2GB to 4GB of free memory (plus appropriate free disk space). + +Vagrant will provision each machine in the cluster with all the necessary components to run Kubernetes. The initial setup can take a few minutes to complete on each machine. + +If you installed more than one Vagrant provider, Kubernetes will usually pick the appropriate one. However, you can override which one Kubernetes will use by setting the [`VAGRANT_DEFAULT_PROVIDER`](https://docs.vagrantup.com/v2/providers/default) environment variable: + +```shell +export VAGRANT_DEFAULT_PROVIDER=parallels +export KUBERNETES_PROVIDER=vagrant +./cluster/kube-up.sh +``` + +By default, each VM in the cluster is running Fedora. + +To access the master or any node: + +```shell +vagrant ssh master +vagrant ssh minion-1 +``` + +If you are running more than one node, you can access the others by: + +```shell +vagrant ssh minion-2 +vagrant ssh minion-3 +``` + +Each node in the cluster installs the docker daemon and the kubelet. + +The master node instantiates the Kubernetes master components as pods on the machine. + +To view the service status and/or logs on the kubernetes-master: + +```shell +[vagrant@kubernetes-master ~] $ vagrant ssh master +[vagrant@kubernetes-master ~] $ sudo su + +[root@kubernetes-master ~] $ systemctl status kubelet +[root@kubernetes-master ~] $ journalctl -ru kubelet + +[root@kubernetes-master ~] $ systemctl status docker +[root@kubernetes-master ~] $ journalctl -ru docker + +[root@kubernetes-master ~] $ tail -f /var/log/kube-apiserver.log +[root@kubernetes-master ~] $ tail -f /var/log/kube-controller-manager.log +[root@kubernetes-master ~] $ tail -f /var/log/kube-scheduler.log +``` + +To view the services on any of the nodes: + +```shell +[vagrant@kubernetes-master ~] $ vagrant ssh minion-1 +[vagrant@kubernetes-master ~] $ sudo su + +[root@kubernetes-master ~] $ systemctl status kubelet +[root@kubernetes-master ~] $ journalctl -ru kubelet + +[root@kubernetes-master ~] $ systemctl status docker +[root@kubernetes-master ~] $ journalctl -ru docker +``` + +### Interacting with your Kubernetes cluster with Vagrant. + +With your Kubernetes cluster up, you can manage the nodes in your cluster with the regular Vagrant commands. + +To push updates to new Kubernetes code after making source changes: + +```shell +./cluster/kube-push.sh +``` + +To stop and then restart the cluster: + +```shell +vagrant halt +./cluster/kube-up.sh +``` + +To destroy the cluster: + +```shell +vagrant destroy +``` + +Once your Vagrant machines are up and provisioned, the first thing to do is to check that you can use the `kubectl.sh` script. + +You may need to build the binaries first, you can do this with `make` + +```shell +$ ./cluster/kubectl.sh get nodes + +NAME LABELS +10.245.1.4 +10.245.1.5 +10.245.1.3 +``` + +### Authenticating with your master + +When using the vagrant provider in Kubernetes, the `cluster/kubectl.sh` script will cache your credentials in a `~/.kubernetes_vagrant_auth` file so you will not be prompted for them in the future. + +```shell +cat ~/.kubernetes_vagrant_auth +``` + +```json +{ "User": "vagrant", + "Password": "vagrant", + "CAFile": "/home/k8s_user/.kubernetes.vagrant.ca.crt", + "CertFile": "/home/k8s_user/.kubecfg.vagrant.crt", + "KeyFile": "/home/k8s_user/.kubecfg.vagrant.key" +} +``` + +You should now be set to use the `cluster/kubectl.sh` script. For example try to list the nodes that you have started with: + +```shell +./cluster/kubectl.sh get nodes +``` + +### Running containers + +Your cluster is running, you can list the nodes in your cluster: + +```shell +$ ./cluster/kubectl.sh get nodes + +NAME LABELS +10.245.2.4 +10.245.2.3 +10.245.2.2 +``` + +Now start running some containers! + +You can now use any of the `cluster/kube-*.sh` commands to interact with your VM machines. +Before starting a container there will be no pods, services and replication controllers. + +```shell +$ ./cluster/kubectl.sh get pods +NAME READY STATUS RESTARTS AGE + +$ ./cluster/kubectl.sh get services +NAME CLUSTER_IP EXTERNAL_IP PORT(S) SELECTOR AGE + +$ ./cluster/kubectl.sh get replicationcontrollers +CONTROLLER CONTAINER(S) IMAGE(S) SELECTOR REPLICAS +``` + +Start a container running nginx with a replication controller and three replicas + +```shell +$ ./cluster/kubectl.sh run my-nginx --image=nginx --replicas=3 --port=80 +``` + +When listing the pods, you will see that three containers have been started and are in Waiting state: + +```shell +$ ./cluster/kubectl.sh get pods +NAME READY STATUS RESTARTS AGE +my-nginx-5kq0g 0/1 Pending 0 10s +my-nginx-gr3hh 0/1 Pending 0 10s +my-nginx-xql4j 0/1 Pending 0 10s +``` + +You need to wait for the provisioning to complete, you can monitor the nodes by doing: + +```shell +$ vagrant ssh minion-1 -c 'sudo docker images' +kubernetes-minion-1: + REPOSITORY TAG IMAGE ID CREATED VIRTUAL SIZE + 96864a7d2df3 26 hours ago 204.4 MB + google/cadvisor latest e0575e677c50 13 days ago 12.64 MB + kubernetes/pause latest 6c4579af347b 8 weeks ago 239.8 kB +``` + +Once the docker image for nginx has been downloaded, the container will start and you can list it: + +```shell +$ vagrant ssh minion-1 -c 'sudo docker ps' +kubernetes-minion-1: + CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES + dbe79bf6e25b nginx:latest "nginx" 21 seconds ago Up 19 seconds k8s--mynginx.8c5b8a3a--7813c8bd_-_3ffe_-_11e4_-_9036_-_0800279696e1.etcd--7813c8bd_-_3ffe_-_11e4_-_9036_-_0800279696e1--fcfa837f + fa0e29c94501 kubernetes/pause:latest "/pause" 8 minutes ago Up 8 minutes 0.0.0.0:8080->80/tcp k8s--net.a90e7ce4--7813c8bd_-_3ffe_-_11e4_-_9036_-_0800279696e1.etcd--7813c8bd_-_3ffe_-_11e4_-_9036_-_0800279696e1--baf5b21b + aa2ee3ed844a google/cadvisor:latest "/usr/bin/cadvisor" 38 minutes ago Up 38 minutes k8s--cadvisor.9e90d182--cadvisor_-_agent.file--4626b3a2 + 65a3a926f357 kubernetes/pause:latest "/pause" 39 minutes ago Up 39 minutes 0.0.0.0:4194->8080/tcp k8s--net.c5ba7f0e--cadvisor_-_agent.file--342fd561 +``` + +Going back to listing the pods, services and replicationcontrollers, you now have: + +```shell +$ ./cluster/kubectl.sh get pods +NAME READY STATUS RESTARTS AGE +my-nginx-5kq0g 1/1 Running 0 1m +my-nginx-gr3hh 1/1 Running 0 1m +my-nginx-xql4j 1/1 Running 0 1m + +$ ./cluster/kubectl.sh get services +NAME CLUSTER_IP EXTERNAL_IP PORT(S) SELECTOR AGE +my-nginx 10.0.0.1 80/TCP run=my-nginx 1h +``` + +We did not start any services, hence there are none listed. But we see three replicas displayed properly. +Check the [guestbook](https://github.com/kubernetes/kubernetes/tree/{{page.githubbranch}}/examples/guestbook/) application to learn how to create a service. +You can already play with scaling the replicas with: + +```shell +$ ./cluster/kubectl.sh scale rc my-nginx --replicas=2 +$ ./cluster/kubectl.sh get pods +NAME READY STATUS RESTARTS AGE +my-nginx-5kq0g 1/1 Running 0 2m +my-nginx-gr3hh 1/1 Running 0 2m +``` + +Congratulations! + +### Troubleshooting + +#### I keep downloading the same (large) box all the time! + +By default the Vagrantfile will download the box from S3. You can change this (and cache the box locally) by providing a name and an alternate URL when calling `kube-up.sh` + +```shell +export KUBERNETES_BOX_NAME=choose_your_own_name_for_your_kuber_box +export KUBERNETES_BOX_URL=path_of_your_kuber_box +export KUBERNETES_PROVIDER=vagrant +./cluster/kube-up.sh +``` + +#### I just created the cluster, but I am getting authorization errors! + +You probably have an incorrect ~/.kubernetes_vagrant_auth file for the cluster you are attempting to contact. + +```shell +rm ~/.kubernetes_vagrant_auth +``` + +After using kubectl.sh make sure that the correct credentials are set: + +```shell +cat ~/.kubernetes_vagrant_auth +``` + +```json +{ + "User": "vagrant", + "Password": "vagrant" +} +``` + +#### I just created the cluster, but I do not see my container running! + +If this is your first time creating the cluster, the kubelet on each node schedules a number of docker pull requests to fetch prerequisite images. This can take some time and as a result may delay your initial pod getting provisioned. + +#### I want to make changes to Kubernetes code! + +To set up a vagrant cluster for hacking, follow the [vagrant developer guide](/{{page.version}}/docs/devel/developer-guides/vagrant). + +#### I have brought Vagrant up but the nodes cannot validate! + +Log on to one of the nodes (`vagrant ssh minion-1`) and inspect the salt minion log (`sudo cat /var/log/salt/minion`). + +#### I want to change the number of nodes! + +You can control the number of nodes that are instantiated via the environment variable `NUM_MINIONS` on your host machine. If you plan to work with replicas, we strongly encourage you to work with enough nodes to satisfy your largest intended replica size. If you do not plan to work with replicas, you can save some system resources by running with a single node. You do this, by setting `NUM_MINIONS` to 1 like so: + +```shell +export NUM_MINIONS=1 +``` + +#### I want my VMs to have more memory! + +You can control the memory allotted to virtual machines with the `KUBERNETES_MEMORY` environment variable. +Just set it to the number of megabytes you would like the machines to have. For example: + +```shell +export KUBERNETES_MEMORY=2048 +``` + +If you need more granular control, you can set the amount of memory for the master and nodes independently. For example: + +```shell +export KUBERNETES_MASTER_MEMORY=1536 +export KUBERNETES_MINION_MEMORY=2048 +``` + +#### I ran vagrant suspend and nothing works! + +`vagrant suspend` seems to mess up the network. This is not supported at this time. + +#### I want vagrant to sync folders via nfs! + +You can ensure that vagrant uses nfs to sync folders with virtual machines by setting the KUBERNETES_VAGRANT_USE_NFS environment variable to 'true'. nfs is faster than virtualbox or vmware's 'shared folders' and does not require guest additions. See the [vagrant docs](http://docs.vagrantup.com/v2/synced-folders/nfs) for details on configuring nfs on the host. This setting will have no effect on the libvirt provider, which uses nfs by default. For example: + +```shell +export KUBERNETES_VAGRANT_USE_NFS=true +``` \ No newline at end of file diff --git a/_includes/docs/docs/getting-started-guides/vsphere.md b/_includes/docs/docs/getting-started-guides/vsphere.md new file mode 100644 index 0000000000..25ad6a2d8a --- /dev/null +++ b/_includes/docs/docs/getting-started-guides/vsphere.md @@ -0,0 +1,82 @@ + +The example below creates a Kubernetes cluster with 4 worker node Virtual +Machines and a master Virtual Machine (i.e. 5 VMs in your cluster). This +cluster is set up and controlled from your workstation (or wherever you find +convenient). + +* TOC +{:toc} + +### Prerequisites + +1. You need administrator credentials to an ESXi machine or vCenter instance. +2. You must have Go (version 1.2 or later) installed: [www.golang.org](http://www.golang.org). +3. You must have your `GOPATH` set up and include `$GOPATH/bin` in your `PATH`. + +```shell +export GOPATH=$HOME/src/go + mkdir -p $GOPATH + export PATH=$PATH:$GOPATH/bin +``` + +4. Install the govc tool to interact with ESXi/vCenter: + +```shell +go get github.com/vmware/govmomi/govc +``` + +5. Get or build a [binary release](/{{page.version}}/docs/getting-started-guides/binary_release) + +### Setup + +Download a prebuilt Debian 7.7 VMDK that we'll use as a base image: + +```shell +curl --remote-name-all https://storage.googleapis.com/govmomi/vmdk/2014-11-11/kube.vmdk.gz{,.md5} +md5sum -c kube.vmdk.gz.md5 +gzip -d kube.vmdk.gz +``` + +Import this VMDK into your vSphere datastore: + +```shell +export GOVC_URL='user:pass@hostname' +export GOVC_INSECURE=1 # If the host above uses a self-signed cert +export GOVC_DATASTORE='target datastore' +export GOVC_RESOURCE_POOL='resource pool or cluster with access to datastore' + +govc import.vmdk kube.vmdk ./kube/ +``` + +Verify that the VMDK was correctly uploaded and expanded to ~3GiB: + +```shell +govc datastore.ls ./kube/ +``` + +Take a look at the file `cluster/vsphere/config-common.sh` fill in the required +parameters. The guest login for the image that you imported is `kube:kube`. + +### Starting a cluster + +Now, let's continue with deploying Kubernetes. +This process takes about ~10 minutes. + +```shell +cd kubernetes # Extracted binary release OR repository root +export KUBERNETES_PROVIDER=vsphere +cluster/kube-up.sh +``` + +Refer to the top level README and the getting started guide for Google Compute +Engine. Once you have successfully reached this point, your vSphere Kubernetes +deployment works just as any other one! + +**Enjoy!** + +### Extra: debugging deployment failure + +The output of `kube-up.sh` displays the IP addresses of the VMs it deploys. You +can log into any VM as the `kube` user to poke around and figure out what is +going on (find yourself authorized with your SSH key, or use the password +`kube` otherwise). \ No newline at end of file diff --git a/_includes/docs/docs/index.md b/_includes/docs/docs/index.md new file mode 100644 index 0000000000..ee18b8df76 --- /dev/null +++ b/_includes/docs/docs/index.md @@ -0,0 +1,29 @@ + +* The [User's guide](/{{page.version}}/docs/user-guide/) is for anyone who wants to run programs and + services on an existing Kubernetes cluster. + +* The [Cluster Admin's guide](/{{page.version}}/docs/admin/) is for anyone setting up + a Kubernetes cluster or administering it. + +* The [Developer guide](/{{page.version}}/docs/devel/) is for anyone wanting to write + programs that access the Kubernetes API, write plugins or extensions, or + modify the core code of Kubernetes. + +* The [Kubectl Command Line Interface](/{{page.version}}/docs/user-guide/kubectl/kubectl) is a detailed reference on + the `kubectl` CLI. + +* The [API object documentation](http://kubernetes.io/third_party/swagger-ui/) + is a detailed description of all fields found in core API objects. + +* An overview of the [Design of Kubernetes](https://github.com/kubernetes/kubernetes/blob/{{page.githubbranch}}/docs/design/) + +* There are example files and walkthroughs in the [examples](https://github.com/kubernetes/kubernetes/tree/{{page.githubbranch}}/examples) + folder. + +* If something went wrong, see the [troubleshooting](/{{page.version}}/docs/troubleshooting) document for how to debug. +You should also check the [known issues](/{{page.version}}/docs/user-guide/known-issues) for the release you're using. + +* To report a security issue, see [Reporting a Security Issue](/{{page.version}}/docs/reporting-security-issues). + + + diff --git a/_includes/docs/docs/reporting-security-issues.md b/_includes/docs/docs/reporting-security-issues.md new file mode 100644 index 0000000000..651f61295c --- /dev/null +++ b/_includes/docs/docs/reporting-security-issues.md @@ -0,0 +1,22 @@ + +If you believe you have discovered a vulnerability or a have a security incident to report, please follow the steps below. This applies to Kubernetes releases v1.0 or later. + +To watch for security and major API announcements, please join our [kubernetes-announce](https://groups.google.com/forum/#!forum/kubernetes-announce) group. + +## Reporting a security issue + +To report an issue, please: + +- Submit a bug report [here](http://goo.gl/vulnz). + - Select 'I want to report a technical security bug in a Google product (SQLi, XSS, etc.).'? + - Select 'Other'? as the Application Type. +- Under reproduction steps, please additionally include + - the words "Kubernetes Security issue" + - Description of the issue + - Kubernetes release (e.g. output of `kubectl version` command, which includes server version.) + - Environment setup (e.g. which "Getting Started Guide" you followed, if any; what node operating system used; what service or software creates your virtual machines, if any) + +An online submission will have the fastest response; however, if you prefer email, please send mail to security@google.com. If you feel the need, please use the [PGP public key](https://services.google.com/corporate/publickey.txt) to encrypt communications. + + + diff --git a/_includes/docs/docs/roadmap.md b/_includes/docs/docs/roadmap.md new file mode 100644 index 0000000000..8a82c7f85c --- /dev/null +++ b/_includes/docs/docs/roadmap.md @@ -0,0 +1,46 @@ + +## Kubernetes 1.1 + +### Timeline + +We are targetting late October for our 1.1 release of Kubernetes. We plan on cutting a first release candidate +in early October. We will enter feature freeze for the 1.1 release on September 21st. Note this does not mean +that the master branch is fully frozen, but all 1.1 features *must* be in by September 21st and large-scale +refactors of the codebase will be blocked until the 1.1 release is finalized to ensure easy cherry-picks. + +### Scope + +The 1.1 release of Kubernetes will be a purely additive releases, the `v1` API will be maintained, with a set +of newly added features. + +#### Blocking Features + +The following features are considered blocking for the 1.1 release: + * Docker 1.8.x + * Graceful pod termination + * IPtables based kube-proxy (tbd if this is the default for all platforms) + * Improvements to kubectl usability and features + * Support for 250 node clusters + * Horizontal Pod autoscaling + * Support for experimental APIs and API groups. + * Job objects + +#### Nice to have features + +The following features will be part of 1.1 if complete, but will not block the release: + * Deployment API + * ScheduledJob API + * Daemon Controller + * ConfigData API + * HTTP(S) load balancer support + * Rolling update improvements + * Third party CRUD resources + +## Post 1.1 + +We're in the process of prioritizing changes to be made after 1.1. + +Please watch the [Github milestones] (https://github.com/kubernetes/kubernetes/milestones) for our future plans. + + + diff --git a/_includes/docs/docs/troubleshooting.md b/_includes/docs/docs/troubleshooting.md new file mode 100644 index 0000000000..3b91cbfaed --- /dev/null +++ b/_includes/docs/docs/troubleshooting.md @@ -0,0 +1,56 @@ + +## Troubleshooting + +Sometimes things go wrong. This guide is aimed at making them right. It has two sections: + + * [Troubleshooting your application](/{{page.version}}/docs/user-guide/application-troubleshooting) - Useful for users who are deploying code into Kubernetes and wondering why it is not working. + * [Troubleshooting your cluster](/{{page.version}}/docs/admin/cluster-troubleshooting) - Useful for cluster administrators and people whose Kubernetes cluster is unhappy. + +You should also check the [known issues](/{{page.version}}/docs/user-guide/known-issues) for the release you're using. + +# Getting help + +If your problem isn't answered by any of the guides above, there are variety of ways for you to get help from the Kubernetes team. + +## Questions + +If you aren't familiar with it, many of your questions may be answered by the [user guide](/{{page.version}}/docs/user-guide/). + +We also have a number of FAQ pages: + + * [User FAQ](https://github.com/kubernetes/kubernetes/wiki/User-FAQ) + * [Debugging FAQ](https://github.com/kubernetes/kubernetes/wiki/Debugging-FAQ) + * [Services FAQ](https://github.com/kubernetes/kubernetes/wiki/Services-FAQ) + +You may also find the Stack Overflow topics relevant: + + * [Kubernetes](http://stackoverflow.com/questions/tagged/kubernetes) + * [Google Container Engine - GKE](http://stackoverflow.com/questions/tagged/google-container-engine) + +# Help! My question isn't covered! I need help now! + +## Stack Overflow + +Someone else from the community may have already asked a similar question or may be able to help with your problem. The Kubernetes team will also monitor [posts tagged kubernetes](http://stackoverflow.com/questions/tagged/kubernetes). If there aren't any existing questions that help, please [ask a new one](http://stackoverflow.com/questions/ask?tags=kubernetes)! + +## Slack + +The Kubernetes team hangs out on Slack in the `#kubernetes-users` channel. You can participate in the Kubernetes team [here](https://kubernetes.slack.com). Slack requires registration, but the Kubernetes team is open invitation to anyone to register [here](http://slack.kubernetes.io). Feel free to come and ask any and all questions. + +## Mailing List + +The Google Container Engine mailing list is [google-containers@googlegroups.com](https://groups.google.com/forum/#!forum/google-containers) + +## Bugs and Feature requests + +If you have what looks like a bug, or you would like to make a feature request, please use the [Github issue tracking system](https://github.com/kubernetes/kubernetes/issues). + +Before you file an issue, please search existing issues to see if your issue is already covered. + +If filing a bug, please include detailed information about how to reproduce the problem, such as: + +* Kubernetes version: `kubectl version` +* Cloud provider, OS distro, network configuration, and Docker version +* Steps to reproduce the problem + + diff --git a/_includes/docs/docs/user-guide/accessing-the-cluster.md b/_includes/docs/docs/user-guide/accessing-the-cluster.md new file mode 100644 index 0000000000..a0b30f4820 --- /dev/null +++ b/_includes/docs/docs/user-guide/accessing-the-cluster.md @@ -0,0 +1,269 @@ + +* TOC +{:toc} + +## Accessing the cluster API + +### Accessing for the first time with kubectl + +When accessing the Kubernetes API for the first time, we suggest using the +Kubernetes CLI, `kubectl`. + +To access a cluster, you need to know the location of the cluster and have credentials +to access it. Typically, this is automatically set-up when you work through +though a [Getting started guide](/{{page.version}}/docs/getting-started-guides/), +or someone else setup the cluster and provided you with credentials and a location. + +Check the location and credentials that kubectl knows about with this command: + +```shell +$ kubectl config view +``` + +Many of the [examples](https://github.com/kubernetes/kubernetes/tree/{{page.githubbranch}}/examples/) provide an introduction to using +kubectl and complete documentation is found in the [kubectl manual](/{{page.version}}/docs/user-guide/kubectl/kubectl). + +### Directly accessing the REST API + +Kubectl handles locating and authenticating to the apiserver. +If you want to directly access the REST API with an http client like +curl or wget, or a browser, there are several ways to locate and authenticate: + + - Run kubectl in proxy mode. + - Recommended approach. + - Uses stored apiserver location. + - Verifies identity of apiserver using self-signed cert. No MITM possible. + - Authenticates to apiserver. + - In future, may do intelligent client-side load-balancing and failover. + - Provide the location and credentials directly to the http client. + - Alternate approach. + - Works with some types of client code that are confused by using a proxy. + - Need to import a root cert into your browser to protect against MITM. + +#### Using kubectl proxy + +The following command runs kubectl in a mode where it acts as a reverse proxy. It handles +locating the apiserver and authenticating. +Run it like this: + +```shell +$ kubectl proxy --port=8080 & +``` + +See [kubectl proxy](/{{page.version}}/docs/user-guide/kubectl/kubectl_proxy) for more details. + +Then you can explore the API with curl, wget, or a browser, like so: + +```shell +$ curl http://localhost:8080/api/ +{ + "versions": [ + "v1" + ] +} +``` + +#### Without kubectl proxy + +It is also possible to avoid using kubectl proxy by passing an authentication token +directly to the apiserver, like this: + +```shell +$ APISERVER=$(kubectl config view | grep server | cut -f 2- -d ":" | tr -d " ") +$ TOKEN=$(kubectl config view | grep token | cut -f 2 -d ":" | tr -d " ") +$ curl $APISERVER/api --header "Authorization: Bearer $TOKEN" --insecure +{ + "versions": [ + "v1" + ] +} +``` + +The above example uses the `--insecure` flag. This leaves it subject to MITM +attacks. When kubectl accesses the cluster it uses a stored root certificate +and client certificates to access the server. (These are installed in the +`~/.kube` directory). Since cluster certificates are typically self-signed, it +make take special configuration to get your http client to use root +certificate. + +On some clusters, the apiserver does not require authentication; it may serve +on localhost, or be protected by a firewall. There is not a standard +for this. [Configuring Access to the API](/{{page.version}}/docs/admin/accessing-the-api) +describes how a cluster admin can configure this. Such approaches may conflict +with future high-availability support. + +### Programmatic access to the API + +There are [client libraries](/{{page.version}}/docs/devel/client-libraries) for accessing the API +from several languages. The Kubernetes project-supported +[Go](http://releases.k8s.io/{{page.githubbranch}}/pkg/client/) +client library can use the same [kubeconfig file](/{{page.version}}/docs/user-guide/kubeconfig-file) +as the kubectl CLI does to locate and authenticate to the apiserver. + +See documentation for other libraries for how they authenticate. + +### Accessing the API from a Pod + +When accessing the API from a pod, locating and authenticating +to the api server are somewhat different. + +The recommended way to locate the apiserver within the pod is with +the `kubernetes` DNS name, which resolves to a Service IP which in turn +will be routed to an apiserver. + +The recommended way to authenticate to the apiserver is with a +[service account](/{{page.version}}/docs/user-guide/service-accounts) credential. By kube-system, a pod +is associated with a service account, and a credential (token) for that +service account is placed into the filesystem tree of each container in that pod, +at `/var/run/secrets/kubernetes.io/serviceaccount/token`. + +From within a pod the recommended ways to connect to API are: + + - run a kubectl proxy as one of the containers in the pod, or as a background + process within a container. This proxies the + Kubernetes API to the localhost interface of the pod, so that other processes + in any container of the pod can access it. See this [example of using kubectl proxy + in a pod](https://github.com/kubernetes/kubernetes/tree/{{page.githubbranch}}/examples/kubectl-container/). + - use the Go client library, and create a client using the `client.NewInCluster()` factory. + This handles locating and authenticating to the apiserver. + +In each case, the credentials of the pod are used to communicate securely with the apiserver. + + +## Accessing services running on the cluster + +The previous section was about connecting the Kubernetes API server. This section is about +connecting to other services running on Kubernetes cluster. In Kubernetes, the +[nodes](/{{page.version}}/docs/admin/node), [pods](/{{page.version}}/docs/user-guide/pods) and [services](/{{page.version}}/docs/user-guide/services) all have +their own IPs. In many cases, the node IPs, pod IPs, and some service IPs on a cluster will not be +routable, so they will not be reachable from a machine outside the cluster, +such as your desktop machine. + +### Ways to connect + +You have several options for connecting to nodes, pods and services from outside the cluster: + + - Access services through public IPs. + - Use a service with type `NodePort` or `LoadBalancer` to make the service reachable outside + the cluster. See the [services](/{{page.version}}/docs/user-guide/services) and + [kubectl expose](/{{page.version}}/docs/user-guide/kubectl/kubectl_expose) documentation. + - Depending on your cluster environment, this may just expose the service to your corporate network, + or it may expose it to the internet. Think about whether the service being exposed is secure. + Does it do its own authentication? + - Place pods behind services. To access one specific pod from a set of replicas, such as for debugging, + place a unique label on the pod it and create a new service which selects this label. + - In most cases, it should not be necessary for application developer to directly access + nodes via their nodeIPs. + - Access services, nodes, or pods using the Proxy Verb. + - Does apiserver authentication and authorization prior to accessing the remote service. + Use this if the services are not secure enough to expose to the internet, or to gain + access to ports on the node IP, or for debugging. + - Proxies may cause problems for some web applications. + - Only works for HTTP/HTTPS. + - Described [here](#discovering-builtin-services). + - Access from a node or pod in the cluster. + - Run a pod, and then connect to a shell in it using [kubectl exec](/{{page.version}}/docs/user-guide/kubectl/kubectl_exec). + Connect to other nodes, pods, and services from that shell. + - Some clusters may allow you to ssh to a node in the cluster. From there you may be able to + access cluster services. This is a non-standard method, and will work on some clusters but + not others. Browsers and other tools may or may not be installed. Cluster DNS may not work. + +### Discovering builtin services + +Typically, there are several services which are started on a cluster by kube-system. Get a list of these +with the `kubectl cluster-info` command: + +```shell +$ kubectl cluster-info + + Kubernetes master is running at https://104.197.5.247 + elasticsearch-logging is running at https://104.197.5.247/api/v1/proxy/namespaces/kube-system/services/elasticsearch-logging + kibana-logging is running at https://104.197.5.247/api/v1/proxy/namespaces/kube-system/services/kibana-logging + kube-dns is running at https://104.197.5.247/api/v1/proxy/namespaces/kube-system/services/kube-dns + grafana is running at https://104.197.5.247/api/v1/proxy/namespaces/kube-system/services/monitoring-grafana + heapster is running at https://104.197.5.247/api/v1/proxy/namespaces/kube-system/services/monitoring-heapster +``` + +This shows the proxy-verb URL for accessing each service. +For example, this cluster has cluster-level logging enabled (using Elasticsearch), which can be reached +at `https://104.197.5.247/api/v1/proxy/namespaces/kube-system/services/elasticsearch-logging/` if suitable credentials are passed, or through a kubectl proxy at, for example: +`http://localhost:8080/api/v1/proxy/namespaces/kube-system/services/elasticsearch-logging/`. +(See [above](#accessing-the-cluster-api) for how to pass credentials or use kubectl proxy.) + +#### Manually constructing apiserver proxy URLs + +As mentioned above, you use the `kubectl cluster-info` command to retrieve the service's proxy URL. To create proxy URLs that include service endpoints, suffixes, and parameters, you simply append to the service's proxy URL: +`http://`*`kubernetes_master_address`*`/`*`service_path`*`/`*`service_name`*`/`*`service_endpoint-suffix-parameter`* + + +##### Examples + + * To access the Elasticsearch service endpoint `_search?q=user:kimchy`, you would use: `http://104.197.5.247/api/v1/proxy/namespaces/kube-system/services/elasticsearch-logging/_search?q=user:kimchy` + * To access the Elasticsearch cluster health information `_cluster/health?pretty=true`, you would use: `https://104.197.5.247/api/v1/proxy/namespaces/kube-system/services/elasticsearch-logging/_cluster/health?pretty=true` + +```json +{ + "cluster_name" : "kubernetes_logging", + "status" : "yellow", + "timed_out" : false, + "number_of_nodes" : 1, + "number_of_data_nodes" : 1, + "active_primary_shards" : 5, + "active_shards" : 5, + "relocating_shards" : 0, + "initializing_shards" : 0, + "unassigned_shards" : 5 + } +``` + +#### Using web browsers to access services running on the cluster + +You may be able to put an apiserver proxy url into the address bar of a browser. However: + + - Web browsers cannot usually pass tokens, so you may need to use basic (password) auth. Apiserver can be configured to accept basic auth, + but your cluster may not be configured to accept basic auth. + - Some web apps may not work, particularly those with client side javascript that construct urls in a + way that is unaware of the proxy path prefix. + +## Requesting redirects + +The redirect capabilities have been deprecated and removed. Please use a proxy (see below) instead. + +## So Many Proxies + +There are several different proxies you may encounter when using Kubernetes: + + 1. The [kubectl proxy](#directly-accessing-the-rest-api): + - runs on a user's desktop or in a pod + - proxies from a localhost address to the Kubernetes apiserver + - client to proxy uses HTTP + - proxy to apiserver uses HTTPS + - locates apiserver + - adds authentication headers + 1. The [apiserver proxy](#discovering-builtin-services): + - is a bastion built into the apiserver + - connects a user outside of the cluster to cluster IPs which otherwise might not be reachable + - runs in the apiserver processes + - client to proxy uses HTTPS (or http if apiserver so configured) + - proxy to target may use HTTP or HTTPS as chosen by proxy using available information + - can be used to reach a Node, Pod, or Service + - does load balancing when used to reach a Service + 1. The [kube proxy](/{{page.version}}/docs/user-guide/services/#ips-and-vips): + - runs on each node + - proxies UDP and TCP + - does not understand HTTP + - provides load balancing + - is just used to reach services + 1. A Proxy/Load-balancer in front of apiserver(s): + - existence and implementation varies from cluster to cluster (e.g. nginx) + - sits between all clients and one or more apiservers + - acts as load balancer if there are several apiservers. + 1. Cloud Load Balancers on external services: + - are provided by some cloud providers (e.g. AWS ELB, Google Cloud Load Balancer) + - are created automatically when the Kubernetes service has type `LoadBalancer` + - use UDP/TCP only + - implementation varies by cloud provider. + +Kubernetes users will typically not need to worry about anything other than the first two types. The cluster admin +will typically ensure that the latter types are setup correctly. \ No newline at end of file diff --git a/_includes/docs/docs/user-guide/annotations.md b/_includes/docs/docs/user-guide/annotations.md new file mode 100644 index 0000000000..2c0b2621b6 --- /dev/null +++ b/_includes/docs/docs/user-guide/annotations.md @@ -0,0 +1,25 @@ + +We have [labels](/{{page.version}}/docs/user-guide/labels) for identifying metadata. + +It is also useful to be able to attach arbitrary non-identifying metadata, for retrieval by API clients such as tools, libraries, etc. This information may be large, may be structured or unstructured, may include characters not permitted by labels, etc. Such information would not be used for object selection and therefore doesn't belong in labels. + +Like labels, annotations are key-value maps. + +```json +"annotations": { + "key1" : "value1", + "key2" : "value2" +} +``` + +Possible information that could be recorded in annotations: + +* fields managed by a declarative configuration layer, to distinguish them from client- and/or server-set default values and other auto-generated fields, fields set by auto-sizing/auto-scaling systems, etc., in order to facilitate merging +* build/release/image information (timestamps, release ids, git branch, PR numbers, image hashes, registry address, etc.) +* pointers to logging/monitoring/analytics/audit repos +* client library/tool information (e.g. for debugging purposes -- name, version, build info) +* other user and/or tool/system provenance info, such as URLs of related objects from other ecosystem components +* lightweight rollout tool metadata (config and/or checkpoints) +* phone/pager number(s) of person(s) responsible, or directory entry where that info could be found, such as a team website + +Yes, this information could be stored in an external database or directory, but that would make it much harder to produce shared client libraries and tools for deployment, management, introspection, etc. \ No newline at end of file diff --git a/_includes/docs/docs/user-guide/application-troubleshooting.md b/_includes/docs/docs/user-guide/application-troubleshooting.md new file mode 100644 index 0000000000..4db58f174c --- /dev/null +++ b/_includes/docs/docs/user-guide/application-troubleshooting.md @@ -0,0 +1,189 @@ + +This guide is to help users debug applications that are deployed into Kubernetes and not behaving correctly. +This is *not* a guide for people who want to debug their cluster. For that you should check out +[this guide](/{{page.version}}/docs/admin/cluster-troubleshooting) + +* TOC +{:toc} + +## FAQ + +Users are highly encouraged to check out our [FAQ](https://github.com/kubernetes/kubernetes/wiki/User-FAQ) + +## Diagnosing the problem + +The first step in troubleshooting is triage. What is the problem? Is it your Pods, your Replication Controller or +your Service? + + * [Debugging Pods](#debugging-pods) + * [Debugging Replication Controllers](#debugging-replication-controllers) + * [Debugging Services](#debugging-services) + +### Debugging Pods + +The first step in debugging a Pod is taking a look at it. Check the current state of the Pod and recent events with the following command: + +```shell +$ kubectl describe pods ${POD_NAME} +``` + +Look at the state of the containers in the pod. Are they all `Running`? Have there been recent restarts? + +Continue debugging depending on the state of the pods. + +#### My pod stays pending + +If a Pod is stuck in `Pending` it means that it can not be scheduled onto a node. Generally this is because +there are insufficient resources of one type or another that prevent scheduling. Look at the output of the +`kubectl describe ...` command above. There should be messages from the scheduler about why it can not schedule +your pod. Reasons include: + +* **You don't have enough resources**: You may have exhausted the supply of CPU or Memory in your cluster, in this case +you need to delete Pods, adjust resource requests, or add new nodes to your cluster. See [Compute Resources document](/{{page.version}}/docs/user-guide/compute-resources/#my-pods-are-pending-with-event-message-failedscheduling) for more information. + +* **You are using `hostPort`**: When you bind a Pod to a `hostPort` there are a limited number of places that pod can be +scheduled. In most cases, `hostPort` is unnecessary, try using a Service object to expose your Pod. If you do require +`hostPort` then you can only schedule as many Pods as there are nodes in your Kubernetes cluster. + + +#### My pod stays waiting + +If a Pod is stuck in the `Waiting` state, then it has been scheduled to a worker node, but it can't run on that machine. +Again, the information from `kubectl describe ...` should be informative. The most common cause of `Waiting` pods is a failure to pull the image. There are three things to check: + +* Make sure that you have the name of the image correct +* Have you pushed the image to the repository? +* Run a manual `docker pull ` on your machine to see if the image can be pulled. + +#### My pod is crashing or otherwise unhealthy + +First, take a look at the logs of +the current container: + +```shell +$ kubectl logs ${POD_NAME} ${CONTAINER_NAME} +``` + +If your container has previously crashed, you can access the previous container's crash log with: + +```shell +$ kubectl logs --previous ${POD_NAME} ${CONTAINER_NAME} +``` + +Alternately, you can run commands inside that container with `exec`: + +```shell +$ kubectl exec ${POD_NAME} -c ${CONTAINER_NAME} -- ${CMD} ${ARG1} ${ARG2} ... ${ARGN} +``` + +Note that `-c ${CONTAINER_NAME}` is optional and can be omitted for Pods that only contain a single container. + +As an example, to look at the logs from a running Cassandra pod, you might run + +```shell +$ kubectl exec cassandra -- cat /var/log/cassandra/system.log +``` + +If none of these approaches work, you can find the host machine that the pod is running on and SSH into that host, +but this should generally not be necessary given tools in the Kubernetes API. Therefore, if you find yourself needing to ssh into a machine, please file a +feature request on GitHub describing your use case and why these tools are insufficient. + +#### My pod is running but not doing what I told it to do + +If your pod is not behaving as you expected, it may be that there was an error in your +pod description (e.g. `mypod.yaml` file on your local machine), and that the error +was silently ignored when you created the pod. Often a section of the pod description +is nested incorrectly, or a key name is typed incorrectly, and so the key is ignored. +For example, if you misspelled `command` as `commnd` then the pod will be created but +will not use the command line you intended it to use. + +The first thing to do is to delete your pod and try creating it again with the `--validate` option. +For example, run `kubectl create --validate -f mypod.yaml`. +If you misspelled `command` as `commnd` then will give an error like this: + +```shell +I0805 10:43:25.129850 46757 schema.go:126] unknown field: commnd +I0805 10:43:25.129973 46757 schema.go:129] this may be a false alarm, see https://github.com/kubernetes/kubernetes/issues/6842 +pods/mypod +``` + + + +The next thing to check is whether the pod on the apiserver +matches the pod you meant to create (e.g. in a yaml file on your local machine). +For example, run `kubectl get pods/mypod -o yaml > mypod-on-apiserver.yaml` and then +manually compare the original pod description, `mypod.yaml` with the one you got +back from apiserver, `mypod-on-apiserver.yaml`. There will typically be some +lines on the "apiserver" version that are not on the original version. This is +expected. However, if there are lines on the original that are not on the apiserver +version, then this may indicate a problem with your pod spec. + +### Debugging Replication Controllers + +Replication controllers are fairly straightforward. They can either create Pods or they can't. If they can't +create pods, then please refer to the [instructions above](#debugging-pods) to debug your pods. + +You can also use `kubectl describe rc ${CONTROLLER_NAME}` to introspect events related to the replication +controller. + +### Debugging Services + +Services provide load balancing across a set of pods. There are several common problems that can make Services +not work properly. The following instructions should help debug Service problems. + +First, verify that there are endpoints for the service. For every Service object, the apiserver makes an `endpoints` resource available. + +You can view this resource with: + +```shell +$ kubectl get endpoints ${SERVICE_NAME} +``` + +Make sure that the endpoints match up with the number of containers that you expect to be a member of your service. +For example, if your Service is for an nginx container with 3 replicas, you would expect to see three different +IP addresses in the Service's endpoints. + +#### My service is missing endpoints + +If you are missing endpoints, try listing pods using the labels that Service uses. Imagine that you have +a Service where the labels are: + +```yaml +... +spec: + - selector: + name: nginx + type: frontend +``` + +You can use: + +```shell +$ kubectl get pods --selector=name=nginx,type=frontend +``` + +to list pods that match this selector. Verify that the list matches the Pods that you expect to provide your Service. + +If the list of pods matches expectations, but your endpoints are still empty, it's possible that you don't +have the right ports exposed. If your service has a `containerPort` specified, but the Pods that are +selected don't have that port listed, then they won't be added to the endpoints list. + +Verify that the pod's `containerPort` matches up with the Service's `containerPort` + +#### Network traffic is not forwarded + +If you can connect to the service, but the connection is immediately dropped, and there are endpoints +in the endpoints list, it's likely that the proxy can't contact your pods. + +There are three things to +check: + + * Are your pods working correctly? Look for restart count, and [debug pods](#debugging-pods) + * Can you connect to your pods directly? Get the IP address for the Pod, and try to connect directly to that IP + * Is your application serving on the port that you configured? Kubernetes doesn't do port remapping, so if your application serves on 8080, the `containerPort` field needs to be 8080. + +#### More information + +If none of the above solves your problem, follow the instructions in [Debugging Service document](/{{page.version}}/docs/user-guide/debugging-services) to make sure that your `Service` is running, has `Endpoints`, and your `Pods` are actually serving; you have DNS working, iptables rules installed, and kube-proxy does not seem to be misbehaving. + +You may also visit [troubleshooting document](/{{page.version}}/docs/troubleshooting/) for more information. \ No newline at end of file diff --git a/_includes/docs/docs/user-guide/compute-resources.md b/_includes/docs/docs/user-guide/compute-resources.md new file mode 100644 index 0000000000..5c3734313a --- /dev/null +++ b/_includes/docs/docs/user-guide/compute-resources.md @@ -0,0 +1,247 @@ + +* TOC +{:toc} + +When specifying a [pod](/{{page.version}}/docs/user-guide/pods), you can optionally specify how much CPU and memory (RAM) each +container needs. When containers have their resource requests specified, the scheduler is +able to make better decisions about which nodes to place pods on; and when containers have their +limits specified, contention for resources on a node can be handled in a specified manner. For +more details about the difference between requests and limits, please refer to +[Resource QoS](https://github.com/kubernetes/kubernetes/blob/{{page.githubbranch}}/docs/proposals/resource-qos.md). + +*CPU* and *memory* are each a *resource type*. A resource type has a base unit. CPU is specified +in units of cores. Memory is specified in units of bytes. + +CPU and RAM are collectively referred to as *compute resources*, or just *resources*. Compute +resources are measureable quantities which can be requested, allocated, and consumed. They are +distinct from [API resources](/{{page.version}}/docs/user-guide/working-with-resources). API resources, such as pods and +[services](/{{page.version}}/docs/user-guide/services) are objects that can be written to and retrieved from the Kubernetes API +server. + +## Resource Requests and Limits of Pod and Container + +Each container of a Pod can optionally specify `spec.container[].resources.limits.cpu` and/or +`spec.container[].resources.limits.memory` and/or `spec.container[].resources.requests.cpu` +and/or `spec.container[].resources.requests.memory`. + +Specifying resource requests and/or limits is optional. In some clusters, unset limits or requests +may be replaced with default values when a pod is created or updated. The default value depends on +how the cluster is configured. If value of requests is not specified, they are set to be equal +to limits by default. Please note that resource limits must be greater than or equal to resource +requests. + +Although requests/limits can only be specified on individual containers, it is convenient to talk +about pod resource requests/limits. A *pod resource request/limit* for a particular resource +type is the sum of the resource requests/limits of that type for each container in the pod, with +unset values treated as zero (or equal to default values in some cluster configurations). + +The following pod has two containers. Each has a request of 0.25 core of cpu and 64MiB +(220 bytes) of memory and a limit of 0.5 core of cpu and 128MiB of memory. The pod can +be said to have a request of 0.5 core and 128 MiB of memory and a limit of 1 core and 256MiB of +memory. + +```yaml +apiVersion: v1 +kind: Pod +metadata: + name: frontend +spec: + containers: + - name: db + image: mysql + resources: + requests: + memory: "64Mi" + cpu: "250m" + limits: + memory: "128Mi" + cpu: "500m" + - name: wp + image: wordpress + resources: + requests: + memory: "64Mi" + cpu: "250m" + limits: + memory: "128Mi" + cpu: "500m" +``` + +## How Pods with Resource Requests are Scheduled + +When a pod is created, the Kubernetes scheduler selects a node for the pod to +run on. Each node has a maximum capacity for each of the resource types: the +amount of CPU and memory it can provide for pods. The scheduler ensures that, +for each resource type (CPU and memory), the sum of the resource requests of the +containers scheduled to the node is less than the capacity of the node. Note +that although actual memory or CPU resource usage on nodes is very low, the +scheduler will still refuse to place pods onto nodes if the capacity check +fails. This protects against a resource shortage on a node when resource usage +later increases, such as due to a daily peak in request rate. + +## How Pods with Resource Limits are Run + +When kubelet starts a container of a pod, it passes the CPU and memory limits to the container +runner (Docker or rkt). + +When using Docker: + +- The `spec.container[].resources.limits.cpu` is multiplied by 1024, converted to an integer, and + used as the value of the [`--cpu-shares`]( + https://docs.docker.com/reference/run/#runtime-constraints-on-resources) flag to the `docker run` + command. +- The `spec.container[].resources.limits.memory` is converted to an integer, and used as the value + of the [`--memory`](https://docs.docker.com/reference/run/#runtime-constraints-on-resources) flag + to the `docker run` command. + +**TODO: document behavior for rkt** + +If a container exceeds its memory limit, it may be terminated. If it is restartable, it will be +restarted by kubelet, as will any other type of runtime failure. + +A container may or may not be allowed to exceed its CPU limit for extended periods of time. +However, it will not be killed for excessive CPU usage. + +To determine if a container cannot be scheduled or is being killed due to resource limits, see the +"Troubleshooting" section below. + +## Monitoring Compute Resource Usage + +The resource usage of a pod is reported as part of the Pod status. + +If [optional monitoring](http://releases.k8s.io/{{page.githubbranch}}/cluster/addons/cluster-monitoring/README.md) is configured for your cluster, +then pod resource usage can be retrieved from the monitoring system. + +## Troubleshooting + +### My pods are pending with event message failedScheduling + +If the scheduler cannot find any node where a pod can fit, then the pod will remain unscheduled +until a place can be found. An event will be produced each time the scheduler fails to find a +place for the pod, like this: + +```shell +$ kubectl describe pod frontend | grep -A 3 Events +Events: + FirstSeen LastSeen Count From Subobject PathReason Message + 36s 5s 6 {scheduler } FailedScheduling Failed for reason PodExceedsFreeCPU and possibly others +``` + +In the case shown above, the pod "frontend" fails to be scheduled due to insufficient +CPU resource on the node. Similar error messages can also suggest failure due to insufficient +memory (PodExceedsFreeMemory). In general, if a pod or pods are pending with this message and +alike, then there are several things to try: + +- Add more nodes to the cluster. +- Terminate unneeded pods to make room for pending pods. +- Check that the pod is not larger than all the nodes. For example, if all the nodes +have a capacity of `cpu: 1`, then a pod with a limit of `cpu: 1.1` will never be scheduled. + +You can check node capacities and amounts allocated with the `kubectl describe nodes` command. +For example: + +```shell +$ kubectl describe nodes gke-cluster-4-386701dd-node-ww4p +Name: gke-cluster-4-386701dd-node-ww4p +[ ... lines removed for clarity ...] +Capacity: + cpu: 1 + memory: 464Mi + pods: 40 +Allocated resources (total requests): + cpu: 910m + memory: 2370Mi + pods: 4 +[ ... lines removed for clarity ...] +Pods: (4 in total) + Namespace Name CPU(milliCPU) Memory(bytes) + frontend webserver-ffj8j 500 (50% of total) 2097152000 (50% of total) + kube-system fluentd-cloud-logging-gke-cluster-4-386701dd-node-ww4p 100 (10% of total) 209715200 (5% of total) + kube-system kube-dns-v8-qopgw 310 (31% of total) 178257920 (4% of total) +TotalResourceLimits: + CPU(milliCPU): 910 (91% of total) + Memory(bytes): 2485125120 (59% of total) +[ ... lines removed for clarity ...] +``` + +Here you can see from the `Allocated resources` section that that a pod which ask for more than +90 millicpus or more than 1341MiB of memory will not be able to fit on this node. + +Looking at the `Pods` section, you can see which pods are taking up space on the node. + +The [resource quota](/{{page.version}}/docs/admin/resource-quota) feature can be configured +to limit the total amount of resources that can be consumed. If used in conjunction +with namespaces, it can prevent one team from hogging all the resources. + +### My container is terminated + +Your container may be terminated because it's resource-starved. To check if a container is being killed because it is hitting a resource limit, call `kubectl describe pod` +on the pod you are interested in: + +```shell +[12:54:41] $ ./cluster/kubectl.sh describe pod simmemleak-hra99 +Name: simmemleak-hra99 +Namespace: default +Image(s): saadali/simmemleak +Node: kubernetes-minion-tf0f/10.240.216.66 +Labels: name=simmemleak +Status: Running +Reason: +Message: +IP: 10.244.2.75 +Replication Controllers: simmemleak (1/1 replicas created) +Containers: + simmemleak: + Image: saadali/simmemleak + Limits: + cpu: 100m + memory: 50Mi + State: Running + Started: Tue, 07 Jul 2015 12:54:41 -0700 + Last Termination State: Terminated + Exit Code: 1 + Started: Fri, 07 Jul 2015 12:54:30 -0700 + Finished: Fri, 07 Jul 2015 12:54:33 -0700 + Ready: False + Restart Count: 5 +Conditions: + Type Status + Ready False +Events: + FirstSeen LastSeen Count From SubobjectPath Reason Message + Tue, 07 Jul 2015 12:53:51 -0700 Tue, 07 Jul 2015 12:53:51 -0700 1 {scheduler } scheduled Successfully assigned simmemleak-hra99 to kubernetes-minion-tf0f + Tue, 07 Jul 2015 12:53:51 -0700 Tue, 07 Jul 2015 12:53:51 -0700 1 {kubelet kubernetes-minion-tf0f} implicitly required container POD pulled Pod container image "gcr.io/google_containers/pause:0.8.0" already present on machine + Tue, 07 Jul 2015 12:53:51 -0700 Tue, 07 Jul 2015 12:53:51 -0700 1 {kubelet kubernetes-minion-tf0f} implicitly required container POD created Created with docker id 6a41280f516d + Tue, 07 Jul 2015 12:53:51 -0700 Tue, 07 Jul 2015 12:53:51 -0700 1 {kubelet kubernetes-minion-tf0f} implicitly required container POD started Started with docker id 6a41280f516d + Tue, 07 Jul 2015 12:53:51 -0700 Tue, 07 Jul 2015 12:53:51 -0700 1 {kubelet kubernetes-minion-tf0f} spec.containers{simmemleak} created Created with docker id 87348f12526a +``` + +The `Restart Count: 5` indicates that the `simmemleak` container in this pod was terminated and restarted 5 times. + +You can call `get pod` with the `-o go-template=...` option to fetch the status of previously terminated containers: + +```shell +[13:59:01] $ ./cluster/kubectl.sh get pod -o go-template='{{range.status.containerStatuses}}{{"Container Name: "}}{{.name}}{{"\r\nLastState: "}}{{.lastState}}{{end}}' simmemleak-60xbc +Container Name: simmemleak +LastState: map[terminated:map[exitCode:137 reason:OOM Killed startedAt:2015-07-07T20:58:43Z finishedAt:2015-07-07T20:58:43Z containerID:docker://0e4095bba1feccdfe7ef9fb6ebffe972b4b14285d5acdec6f0d3ae8a22fad8b2]][13:59:03] clusterScaleDoc ~/go/src/github.com/kubernetes/kubernetes $ +``` + +We can see that this container was terminated because `reason:OOM Killed`, where *OOM* stands for Out Of Memory. + +## Planned Improvements + +The current system only allows resource quantities to be specified on a container. +It is planned to improve accounting for resources which are shared by all containers in a pod, +such as [EmptyDir volumes](/{{page.version}}/docs/user-guide/volumes/#emptydir). + +The current system only supports container requests and limits for CPU and Memory. +It is planned to add new resource types, including a node disk space +resource, and a framework for adding custom [resource types](https://github.com/kubernetes/kubernetes/blob/{{page.githubbranch}}/docs/design/resources.md#resource-types). + +Kubernetes supports overcommitment of resources by supporting multiple levels of [Quality of Service](http://issue.k8s.io/168). + +Currently, one unit of CPU means different things on different cloud providers, and on different +machine types within the same cloud providers. For example, on AWS, the capacity of a node +is reported in [ECUs](http://aws.amazon.com/ec2/faqs/), while in GCE it is reported in logical +cores. We plan to revise the definition of the cpu resource to allow for more consistency +across providers and platforms. \ No newline at end of file diff --git a/_includes/docs/docs/user-guide/config-best-practices.md b/_includes/docs/docs/user-guide/config-best-practices.md new file mode 100644 index 0000000000..9f1fe2516e --- /dev/null +++ b/_includes/docs/docs/user-guide/config-best-practices.md @@ -0,0 +1,23 @@ + +This document is meant to highlight and consolidate in one place configuration best practices that are introduced throughout the user-guide and getting-started documentation and examples. This is a living document so if you think of something that is not on this list but might be useful to others, please don't hesitate to file an issue or submit a PR. + +1. When writing configuration, use the latest stable API version (currently v1). +1. Configuration should be stored in version control before being pushed to the cluster. This allows configuration to be quickly rolled back if needed and will aid with cluster re-creation and restoration if the worst were to happen. +1. Use YAML rather than JSON. They can be used interchangeably in almost all scenarios but YAML tends to be more user-friendly for config. +1. Group related objects together in a single file. This is often better than separate files. +1. Use `kubectl create -f ` where possible. This looks for config objects in all `.yaml`, `.yml`, and `.json` files in `` and passes them to create. +1. Create a service before corresponding replication controllers so that the scheduler can spread the pods comprising the service. You can also create the replication controller without specifying replicas, create the service, then scale up the replication controller, which may work better in an example using progressive disclosure and may have benefits in real scenarios also, such as ensuring one replica works before creating lots of them) +1. Don't use `hostPort` unless absolutely necessary (e.g., for a node daemon) as it will prevent certain scheduling configurations due to port conflicts. Use the apiserver proxying or port forwarding for debug/admin access, or a service for external service access. If you need to expose a pod's port on the host machine, consider using a [NodePort](/{{page.version}}/docs/user-guide/services/#type--loadbalancer) service before resorting to `hostPort`. If you only need access to the port for debugging purposes, you can also use the [kubectl proxy and apiserver proxy](/{{page.version}}/docs/user-guide/connecting-to-applications-proxy) or [kubectl port-forward](/{{page.version}}/docs/user-guide/connecting-to-applications-port-forward). +1. Don't use `hostNetwork` for the same reasons as `hostPort`. +1. Don't specify default values unnecessarily, to simplify and minimize configs. For example, omit the selector and labels in ReplicationController if you want them to be the same as the labels in its podTemplate, since those fields are populated from the podTemplate labels by default. +1. Instead of attaching one label to a set of pods to represent a service (e.g., `service: myservice`) and another to represent the replication controller managing the pods (e.g., `controller: mycontroller`), attach labels that identify semantic attributes of your application or deployment and select the appropriate subsets in your service and replication controller, such as `{ app: myapp, tier: frontend, deployment: v3 }`. A service can be made to span multiple deployments, such as across rolling updates, by simply omitting release-specific labels from its selector, rather than updating a service's selector to match the replication controller's selector fully. +1. Use kubectl bulk operations (via files and/or labels) for get and delete. See [label selectors](/{{page.version}}/docs/user-guide/labels/#label-selectors) and [using labels effectively](/{{page.version}}/docs/user-guide/managing-deployments/#using-labels-effectively). +1. Use kubectl run and expose to quickly create and expose single container replication controllers. See the [quick start guide](/{{page.version}}/docs/user-guide/quick-start) for an example. +1. Use headless services for easy service discovery when you don't need kube-proxy load balancing. See [headless services](/{{page.version}}/docs/user-guide/services/#headless-services). +1. Use kubectl delete rather than stop. Delete has a superset of the functionality of stop and stop is deprecated. +1. If there is a viable alternative to naked pods (i.e. pods not bound to a controller), go with the alternative. Controllers are almost always preferable to creating pods (except for some `restartPolicy: Never` scenarios). A minimal Job is coming. See [#1624](http://issue.k8s.io/1624). Naked pods will not be rescheduled in the event of node failure. +1. Put a version number or hash as a suffix to the name and in a label on a replication controller to facilitate rolling update, as we do for [--image](/{{page.version}}/docs/user-guide/kubectl/kubectl_rolling-update). This is necessary because rolling-update actually creates a new controller as opposed to modifying the existing controller. This does not play well with version agnostic controller names. +1. Put an object description in an annotation to allow better introspection. + + + diff --git a/_includes/docs/docs/user-guide/configuring-containers.md b/_includes/docs/docs/user-guide/configuring-containers.md new file mode 100644 index 0000000000..04c390aedc --- /dev/null +++ b/_includes/docs/docs/user-guide/configuring-containers.md @@ -0,0 +1,162 @@ + +* TOC +{:toc} + +## Configuration in Kubernetes + +In addition to the imperative-style commands, such as `kubectl run` and `kubectl expose`, described [elsewhere](/{{page.version}}/docs/user-guide/quick-start), Kubernetes supports declarative configuration. Often times, configuration files are preferable to imperative commands, since they can be checked into version control and changes to the files can be code reviewed, which is especially important for more complex configurations, producing a more robust, reliable and archival system. + +In the declarative style, all configuration is stored in YAML or JSON configuration files using Kubernetes's API resource schemas as the configuration schemas. `kubectl` can create, update, delete, and get API resources. The `apiVersion` (currently 'v1'?), resource `kind`, and resource `name` are used by `kubectl` to construct the appropriate API path to invoke for the specified operation. + +## Launching a container using a configuration file + +Kubernetes executes containers in [*Pods*](/{{page.version}}/docs/user-guide/pods). A pod containing a simple Hello World container can be specified in YAML as follows: + +```yaml +apiVersion: v1 +kind: Pod +metadata: + name: hello-world +spec: # specification of the pod's contents + restartPolicy: Never + containers: + - name: hello + image: "ubuntu:14.04" + command: ["/bin/echo","hello'?,'?world"] +``` + +The value of `metadata.name`, `hello-world`, will be the name of the pod resource created, and must be unique within the cluster, whereas `containers[0].name` is just a nickname for the container within that pod. `image` is the name of the Docker image, which Kubernetes expects to be able to pull from a registry, the [Docker Hub](https://registry.hub.docker.com/) by default. + +`restartPolicy: Never` indicates that we just want to run the container once and then terminate the pod. + +The [`command`](/{{page.version}}/docs/user-guide/containers/#containers-and-commands) overrides the Docker container's `Entrypoint`. Command arguments (corresponding to Docker's `Cmd`) may be specified using `args`, as follows: + +```yaml +command: ["/bin/echo"] + args: ["hello","world"] +``` + +This pod can be created using the `create` command: + +```shell +$ kubectl create -f ./hello-world.yaml +pods/hello-world +``` + +`kubectl` prints the resource type and name of the resource created when successful. + +## Validating configuration + +If you're not sure you specified the resource correctly, you can ask `kubectl` to validate it for you: + +```shell +$ kubectl create -f ./hello-world.yaml --validate +``` + +Let's say you specified `entrypoint` instead of `command`. You'd see output as follows: + +```shell +I0709 06:33:05.600829 14160 schema.go:126] unknown field: entrypoint +I0709 06:33:05.600988 14160 schema.go:129] this may be a false alarm, see http://issue.k8s.io/6842 +pods/hello-world +``` + +`kubectl create --validate` currently warns about problems it detects, but creates the resource anyway, unless a required field is absent or a field value is invalid. Unknown API fields are ignored, so be careful. This pod was created, but with no `command`, which is an optional field, since the image may specify an `Entrypoint`. +View the [Pod API +object](http://kubernetes.io/v1.1/docs/api-reference/v1/definitions/#_v1_pod) +to see the list of valid fields. + +## Environment variables and variable expansion + +Kubernetes [does not automatically run commands in a shell](https://github.com/kubernetes/kubernetes/wiki/User-FAQ#use-of-environment-variables-on-the-command-line) (not all images contain shells). If you would like to run your command in a shell, such as to expand environment variables (specified using `env`), you could do the following: + +```yaml +apiVersion: v1 +kind: Pod +metadata: + name: hello-world +spec: # specification of the pod's contents + restartPolicy: Never + containers: + - name: hello + image: "ubuntu:14.04" + env: + - name: MESSAGE + value: "hello world" + command: ["/bin/sh","-c"] + args: ["/bin/echo \"${MESSAGE}\""] +``` + +However, a shell isn't necessary just to expand environment variables. Kubernetes will do it for you if you use [`$(ENVVAR)` syntax](https://github.com/kubernetes/kubernetes/blob/{{page.githubbranch}}/docs/design/expansion): + +```yaml +command: ["/bin/echo"] + args: ["$(MESSAGE)"] +``` + +## Viewing pod status + +You can see the pod you created (actually all of your cluster's pods) using the `get` command. + +If you're quick, it will look as follows: + +```shell +$ kubectl get pods +NAME READY STATUS RESTARTS AGE +hello-world 0/1 Pending 0 0s +``` + +Initially, a newly created pod is unscheduled -- no node has been selected to run it. Scheduling happens after creation, but is fast, so you normally shouldn't see pods in an unscheduled state unless there's a problem. + +After the pod has been scheduled, the image may need to be pulled to the node on which it was scheduled, if it hadn't been pulled already. After a few seconds, you should see the container running: + +```shell +$ kubectl get pods +NAME READY STATUS RESTARTS AGE +hello-world 1/1 Running 0 5s +``` + +The `READY` column shows how many containers in the pod are running. + +Almost immediately after it starts running, this command will terminate. `kubectl` shows that the container is no longer running and displays the exit status: + +```shell +$ kubectl get pods +NAME READY STATUS RESTARTS AGE +hello-world 0/1 ExitCode:0 0 15s +``` + +## Viewing pod output + +You probably want to see the output of the command you ran. As with [`docker logs`](https://docs.docker.com/userguide/usingdocker/), `kubectl logs` will show you the output: + +```shell +$ kubectl logs hello-world +hello world +``` + +## Deleting pods + +When you're done looking at the output, you should delete the pod: + +```shell +$ kubectl delete pod hello-world +pods/hello-world +``` + +As with `create`, `kubectl` prints the resource type and name of the resource deleted when successful. + +You can also use the resource/name format to specify the pod: + +```shell +$ kubectl delete pods/hello-world +pods/hello-world +``` + +Terminated pods aren't currently automatically deleted, so that you can observe their final status, so be sure to clean up your dead pods. + +On the other hand, containers and their logs are eventually deleted automatically in order to free up disk space on the nodes. + +## What's next? + +[Learn about deploying continuously running applications.](/{{page.version}}/docs/user-guide/deploying-applications) diff --git a/_includes/docs/docs/user-guide/connecting-applications.md b/_includes/docs/docs/user-guide/connecting-applications.md new file mode 100644 index 0000000000..304685c82b --- /dev/null +++ b/_includes/docs/docs/user-guide/connecting-applications.md @@ -0,0 +1,384 @@ + +* TOC +{:toc} + +# The Kubernetes model for connecting containers + +Now that you have a continuously running, replicated application you can expose it on a network. Before discussing the Kubernetes approach to networking, it is worthwhile to contrast it with the "normal" way networking works with Docker. + +By default, Docker uses host-private networking, so containers can talk to other containers only if they are on the same machine. In order for Docker containers to communicate across nodes, they must be allocated ports on the machine's own IP address, which are then forwarded or proxied to the containers. This obviously means that containers must either coordinate which ports they use very carefully or else be allocated ports dynamically. + +Coordinating ports across multiple developers is very difficult to do at scale and exposes users to cluster-level issues outside of their control. Kubernetes assumes that pods can communicate with other pods, regardless of which host they land on. We give every pod its own cluster-private-IP address so you do not need to explicitly create links between pods or mapping container ports to host ports. This means that containers within a Pod can all reach each other's ports on localhost, and all pods in a cluster can see each other without NAT. The rest of this document will elaborate on how you can run reliable services on such a networking model. + +This guide uses a simple nginx server to demonstrate proof of concept. The same principles are embodied in a more complete [Jenkins CI application](http://blog.kubernetes.io/2015/07/strong-simple-ssl-for-kubernetes). + +## Exposing pods to the cluster + +We did this in a previous example, but lets do it once again and focus on the networking perspective. Create an nginx pod, and note that it has a container port specification: + +```yaml +$ cat nginxrc.yaml +apiVersion: v1 +kind: ReplicationController +metadata: + name: my-nginx +spec: + replicas: 2 + template: + metadata: + labels: + app: nginx + spec: + containers: + - name: nginx + image: nginx + ports: + - containerPort: 80 +``` + +This makes it accessible from any node in your cluster. Check the nodes the pod is running on: + +```shell +$ kubectl create -f ./nginxrc.yaml +$ kubectl get pods -l app=nginx -o wide +my-nginx-6isf4 1/1 Running 0 2h e2e-test-beeps-minion-93ly +my-nginx-t26zt 1/1 Running 0 2h e2e-test-beeps-minion-93ly +``` + +Check your pods' IPs: + +```shell +$ kubectl get pods -l app=nginx -o json | grep podIP + "podIP": "10.245.0.15", + "podIP": "10.245.0.14", +``` + +You should be able to ssh into any node in your cluster and curl both IPs. Note that the containers are *not* using port 80 on the node, nor are there any special NAT rules to route traffic to the pod. This means you can run multiple nginx pods on the same node all using the same containerPort and access them from any other pod or node in your cluster using IP. Like Docker, ports can still be published to the host node's interface(s), but the need for this is radically diminished because of the networking model. + +You can read more about [how we achieve this](/{{page.version}}/docs/admin/networking/#how-to-achieve-this) if you're curious. + +## Creating a Service + +So we have pods running nginx in a flat, cluster wide, address space. In theory, you could talk to these pods directly, but what happens when a node dies? The pods die with it, and the replication controller will create new ones, with different IPs. This is the problem a Service solves. + +A Kubernetes Service is an abstraction which defines a logical set of Pods running somewhere in your cluster, that all provide the same functionality. When created, each Service is assigned a unique IP address (also called clusterIP). This address is tied to the lifespan of the Service, and will not change while the Service is alive. Pods can be configured to talk to the Service, and know that communication to the Service will be automatically load-balanced out to some pod that is a member of the Service. + +You can create a Service for your 2 nginx replicas with the following yaml: + +```yaml +$ cat nginxsvc.yaml +apiVersion: v1 +kind: Service +metadata: + name: nginxsvc + labels: + app: nginx +spec: + ports: + - port: 80 + protocol: TCP + selector: + app: nginx +``` + +This specification will create a Service which targets TCP port 80 on any Pod with the `app=nginx` label, and expose it on an abstracted Service port (`targetPort`: is the port the container accepts traffic on, `port`: is the abstracted Service port, which can be any port other pods use to access the Service). View [service API object](http://kubernetes.io/v1.1/docs/api-reference/v1/definitions/#_v1_service) to see the list of supported fields in service definition. +Check your Service: + +```shell +$ kubectl get svc +NAME CLUSTER_IP EXTERNAL_IP PORT(S) SELECTOR AGE +kubernetes 10.179.240.1 443/TCP 8d +nginxsvc 10.179.252.126 122.222.183.144 80/TCP,81/TCP,82/TCP run=nginx2 11m +``` + +As mentioned previously, a Service is backed by a group of pods. These pods are exposed through `endpoints`. The Service's selector will be evaluated continuously and the results will be POSTed to an Endpoints object also named `nginxsvc`. When a pod dies, it is automatically removed from the endpoints, and new pods matching the Service's selector will automatically get added to the endpoints. Check the endpoints, and note that the IPs are the same as the pods created in the first step: + +```shell +$ kubectl describe svc nginxsvc +Name: nginxsvc +Namespace: default +Labels: app=nginx +Selector: app=nginx +Type: ClusterIP +IP: 10.0.116.146 +Port: 80/TCP +Endpoints: 10.245.0.14:80,10.245.0.15:80 +Session Affinity: None +No events. + +$ kubectl get ep +NAME ENDPOINTS +nginxsvc 10.245.0.14:80,10.245.0.15:80 +``` + +You should now be able to curl the nginx Service on `10.0.116.146:80` from any node in your cluster. Note that the Service IP is completely virtual, it never hits the wire, if you're curious about how this works you can read more about the [service proxy](/{{page.version}}/docs/user-guide/services/#virtual-ips-and-service-proxies). + +## Accessing the Service + +Kubernetes supports 2 primary modes of finding a Service - environment variables and DNS. The former works out of the box while the latter requires the [kube-dns cluster addon](http://releases.k8s.io/{{page.githubbranch}}/cluster/addons/dns/README.md). + +### Environment Variables + +When a Pod is run on a Node, the kubelet adds a set of environment variables for each active Service. This introduces an ordering problem. To see why, inspect the environment of your running nginx pods: + +```shell +$ kubectl exec my-nginx-6isf4 -- printenv | grep SERVICE +KUBERNETES_SERVICE_HOST=10.0.0.1 +KUBERNETES_SERVICE_PORT=443 +``` + +Note there's no mention of your Service. This is because you created the replicas before the Service. Another disadvantage of doing this is that the scheduler might put both pods on the same machine, which will take your entire Service down if it dies. We can do this the right way by killing the 2 pods and waiting for the replication controller to recreate them. This time around the Service exists *before* the replicas. This will given you scheduler level Service spreading of your pods (provided all your nodes have equal capacity), as well as the right environment variables: + +```shell +$ kubectl scale rc my-nginx --replicas=0; kubectl scale rc my-nginx --replicas=2; +$ kubectl get pods -l app=nginx -o wide +NAME READY STATUS RESTARTS AGE NODE +my-nginx-5j8ok 1/1 Running 0 2m node1 +my-nginx-90vaf 1/1 Running 0 2m node2 + +$ kubectl exec my-nginx-5j8ok -- printenv | grep SERVICE +KUBERNETES_SERVICE_PORT=443 +NGINXSVC_SERVICE_HOST=10.0.116.146 +KUBERNETES_SERVICE_HOST=10.0.0.1 +NGINXSVC_SERVICE_PORT=80 +``` + +### DNS + +Kubernetes offers a DNS cluster addon Service that uses skydns to automatically assign dns names to other Services. You can check if it's running on your cluster: + +```shell +$ kubectl get services kube-dns --namespace=kube-system +NAME CLUSTER_IP EXTERNAL_IP PORT(S) SELECTOR AGE +kube-dns 10.179.240.10 53/UDP,53/TCP k8s-app=kube-dns 8d +``` + +If it isn't running, you can [enable it](http://releases.k8s.io/{{page.githubbranch}}/cluster/addons/dns/README.md#how-do-i-configure-it). The rest of this section will assume you have a Service with a long lived IP (nginxsvc), and a dns server that has assigned a name to that IP (the kube-dns cluster addon), so you can talk to the Service from any pod in your cluster using standard methods (e.g. gethostbyname). Let's create another pod to test this: + +```yaml +$ cat curlpod.yaml +apiVersion: v1 +kind: Pod +metadata: + name: curlpod +spec: + containers: + - image: radial/busyboxplus:curl + command: + - sleep + - "3600" + imagePullPolicy: IfNotPresent + name: curlcontainer + restartPolicy: Always +``` + +And perform a lookup of the nginx Service + +```shell +$ kubectl create -f ./curlpod.yaml +default/curlpod +$ kubectl get pods curlpod +NAME READY STATUS RESTARTS AGE +curlpod 1/1 Running 0 18s + +$ kubectl exec curlpod -- nslookup nginxsvc +Server: 10.0.0.10 +Address 1: 10.0.0.10 +Name: nginxsvc +Address 1: 10.0.116.146 +``` + +## Securing the Service + +Till now we have only accessed the nginx server from within the cluster. Before exposing the Service to the internet, you want to make sure the communication channel is secure. For this, you will need: + +* Self signed certificates for https (unless you already have an identity certificate) +* An nginx server configured to use the certificates +* A [secret](/{{page.version}}/docs/user-guide/secrets) that makes the certificates accessible to pods + +You can acquire all these from the [nginx https example](https://github.com/kubernetes/kubernetes/tree/{{page.githubbranch}}/examples/https-nginx/), in short: + +```shell +$ make keys secret KEY=/tmp/nginx.key CERT=/tmp/nginx.crt SECRET=/tmp/secret.json +$ kubectl create -f /tmp/secret.json +secrets/nginxsecret +$ kubectl get secrets +NAME TYPE DATA +default-token-il9rc kubernetes.io/service-account-token 1 +nginxsecret Opaque 2 +``` + +Now modify your nginx replicas to start a https server using the certificate in the secret, and the Service, to expose both ports (80 and 443): + +```yaml +$ cat nginx-app.yaml +apiVersion: v1 +kind: Service +metadata: + name: nginxsvc + labels: + app: nginx +spec: + type: NodePort + ports: + - port: 8080 + targetPort: 80 + protocol: TCP + name: http + - port: 443 + protocol: TCP + name: https + selector: + app: nginx +--- +apiVersion: v1 +kind: ReplicationController +metadata: + name: my-nginx +spec: + replicas: 1 + template: + metadata: + labels: + app: nginx + spec: + volumes: + - name: secret-volume + secret: + secretName: nginxsecret + containers: + - name: nginxhttps + image: bprashanth/nginxhttps:1.0 + ports: + - containerPort: 443 + - containerPort: 80 + volumeMounts: + - mountPath: /etc/nginx/ssl + name: secret-volume +``` + +Noteworthy points about the nginx-app manifest: + +- It contains both rc and service specification in the same file +- The [nginx server](https://github.com/kubernetes/kubernetes/tree/{{page.githubbranch}}/examples/https-nginx/default.conf) serves http traffic on port 80 and https traffic on 443, and nginx Service exposes both ports. +- Each container has access to the keys through a volume mounted at /etc/nginx/ssl. This is setup *before* the nginx server is started. + +```shell +$ kubectl delete rc,svc -l app=nginx; kubectl create -f ./nginx-app.yaml +replicationcontrollers/my-nginx +services/nginxsvc +services/nginxsvc +replicationcontrollers/my-nginx +``` + +At this point you can reach the nginx server from any node. + +```shell +$ kubectl get pods -o json | grep -i podip + "podIP": "10.1.0.80", +node $ curl -k https://10.1.0.80 +... +

Welcome to nginx!

+``` + +Note how we supplied the `-k` parameter to curl in the last step, this is because we don't know anything about the pods running nginx at certificate generation time, +so we have to tell curl to ignore the CName mismatch. By creating a Service we linked the CName used in the certificate with the actual DNS name used by pods during Service lookup. +Lets test this from a pod (the same secret is being reused for simplicity, the pod only needs nginx.crt to access the Service): + +```shell +$ cat curlpod.yaml +vapiVersion: v1 +kind: ReplicationController +metadata: + name: curlrc +spec: + replicas: 1 + template: + metadata: + labels: + app: curlpod + spec: + volumes: + - name: secret-volume + secret: + secretName: nginxsecret + containers: + - name: curlpod + command: + - sh + - -c + - while true; do sleep 1; done + image: radial/busyboxplus:curl + volumeMounts: + - mountPath: /etc/nginx/ssl + name: secret-volume + +$ kubectl create -f ./curlpod.yaml +$ kubectl get pods +NAME READY STATUS RESTARTS AGE +curlpod 1/1 Running 0 2m +my-nginx-7006w 1/1 Running 0 24m + +$ kubectl exec curlpod -- curl https://nginxsvc --cacert /etc/nginx/ssl/nginx.crt +... +Welcome to nginx! +... +``` + +## Exposing the Service + +For some parts of your applications you may want to expose a Service onto an external IP address. Kubernetes supports two ways of doing this: NodePorts and LoadBalancers. The Service created in the last section already used `NodePort`, so your nginx https replica is ready to serve traffic on the internet if your node has a public IP. + +```shell +$ kubectl get svc nginxsvc -o json | grep -i nodeport -C 5 + { + "name": "http", + "protocol": "TCP", + "port": 80, + "targetPort": 80, + "nodePort": 32188 + }, + { + "name": "https", + "protocol": "TCP", + "port": 443, + "targetPort": 443, + "nodePort": 30645 + } + +$ kubectl get nodes -o json | grep ExternalIP -C 2 + { + "type": "ExternalIP", + "address": "104.197.63.17" + } +-- + }, + { + "type": "ExternalIP", + "address": "104.154.89.170" + } +$ curl https://104.197.63.17:30645 -k +... +

Welcome to nginx!

+``` + +Lets now recreate the Service to use a cloud load balancer, just change the `Type` of Service in the nginx-app.yaml from `NodePort` to `LoadBalancer`: + +```shell +$ kubectl delete rc, svc -l app=nginx +$ kubectl create -f ./nginx-app.yaml +$ kubectl get svc nginxsvc +NAME CLUSTER_IP EXTERNAL_IP PORT(S) SELECTOR AGE +nginxsvc 10.179.252.126 162.222.184.144 80/TCP,81/TCP,82/TCP run=nginx2 13m + +$ curl https://162.22.184.144 -k +... +Welcome to nginx! +``` + +The IP address in the `EXTERNAL_IP` column is the one that is available on the public internet. The `CLUSTER_IP` is only available inside your +cluster/private cloud network. + +## What's next? + +[Learn about more Kubernetes features that will help you run containers reliably in production.](/{{page.version}}/docs/user-guide/production-pods) diff --git a/_includes/docs/docs/user-guide/connecting-to-applications-port-forward.md b/_includes/docs/docs/user-guide/connecting-to-applications-port-forward.md new file mode 100644 index 0000000000..73e6245099 --- /dev/null +++ b/_includes/docs/docs/user-guide/connecting-to-applications-port-forward.md @@ -0,0 +1,44 @@ + +kubectl port-forward forwards connections to a local port to a port on a pod. Its man page is available [here](/{{page.version}}/docs/user-guide/kubectl/kubectl_port-forward). Compared to [kubectl proxy](/{{page.version}}/docs/user-guide/accessing-the-cluster/#using-kubectl-proxy), `kubectl port-forward` is more generic as it can forward TCP traffic while `kubectl proxy` can only forward HTTP traffic. This guide demonstrates how to use `kubectl port-forward` to connect to a Redis database, which may be useful for database debugging. + +## Creating a Redis master + +```shell +$ kubectl create examples/redis/redis-master.yaml +pods/redis-master +``` + +wait until the Redis master pod is Running and Ready, + +```shell +$ kubectl get pods +NAME READY STATUS RESTARTS AGE +redis-master 2/2 Running 0 41s +``` + +## Connecting to the Redis master[a] + +The Redis master is listening on port 6397, to verify this, + +```shell +$ kubectl get pods redis-master -t='{{(index (index .spec.containers 0).ports 0).containerPort}}{{"\n"}}' +6379 +``` + +then we forward the port 6379 on the local workstation to the port 6379 of pod redis-master, + +```shell +$ kubectl port-forward redis-master 6379:6379 +I0710 14:43:38.274550 3655 portforward.go:225] Forwarding from 127.0.0.1:6379 -> 6379 +I0710 14:43:38.274797 3655 portforward.go:225] Forwarding from [::1]:6379 -> 6379 +``` + +To verify the connection is successful, we run a redis-cli on the local workstation, + +```shell +$ redis-cli +127.0.0.1:6379> ping +PONG +``` + +Now one can debug the database from the local workstation. \ No newline at end of file diff --git a/_includes/docs/docs/user-guide/connecting-to-applications-proxy.md b/_includes/docs/docs/user-guide/connecting-to-applications-proxy.md new file mode 100644 index 0000000000..48c064e5dc --- /dev/null +++ b/_includes/docs/docs/user-guide/connecting-to-applications-proxy.md @@ -0,0 +1,26 @@ + +You have seen the [basics](/{{page.version}}/docs/user-guide/accessing-the-cluster) about `kubectl proxy` and `apiserver proxy`. This guide shows how to use them together to access a service([kube-ui](/{{page.version}}/docs/user-guide/ui)) running on the Kubernetes cluster from your workstation. + + +## Getting the apiserver proxy URL of kube-ui + +kube-ui is deployed as a cluster add-on. To find its apiserver proxy URL, + +```shell +$ kubectl cluster-info | grep "KubeUI" +KubeUI is running at https://173.255.119.104/api/v1/proxy/namespaces/kube-system/services/kube-ui +``` + +if this command does not find the URL, try the steps [here](/{{page.version}}/docs/user-guide/ui/#accessing-the-ui). + + +## Connecting to the kube-ui service from your local workstation + +The above proxy URL is an access to the kube-ui service provided by the apiserver. To access it, you still need to authenticate to the apiserver. `kubectl proxy` can handle the authentication. + +```shell +$ kubectl proxy --port=8001 +Starting to serve on localhost:8001 +``` + +Now you can access the kube-ui service on your local workstation at [http://localhost:8001/api/v1/proxy/namespaces/kube-system/services/kube-ui](http://localhost:8001/api/v1/proxy/namespaces/kube-system/services/kube-ui) \ No newline at end of file diff --git a/_includes/docs/docs/user-guide/container-environment.md b/_includes/docs/docs/user-guide/container-environment.md new file mode 100644 index 0000000000..699331a226 --- /dev/null +++ b/_includes/docs/docs/user-guide/container-environment.md @@ -0,0 +1,78 @@ + +This document describes the environment for Kubelet managed containers on a Kubernetes node (kNode).  In contrast to the Kubernetes cluster API, which provides an API for creating and managing containers, the Kubernetes container environment provides the container access to information about what else is going on in the cluster. + +This cluster information makes it possible to build applications that are *cluster aware*. +Additionally, the Kubernetes container environment defines a series of hooks that are surfaced to optional hook handlers defined as part of individual containers.  Container hooks are somewhat analogous to operating system signals in a traditional process model.   However these hooks are designed to make it easier to build reliable, scalable cloud applications in the Kubernetes cluster.  Containers that participate in this cluster lifecycle become *cluster native*. + +Another important part of the container environment is the file system that is available to the container. In Kubernetes, the filesystem is a combination of an [image](/{{page.version}}/docs/user-guide/images) and one or more [volumes](/{{page.version}}/docs/user-guide/volumes). + +The following sections describe both the cluster information provided to containers, as well as the hooks and life-cycle that allows containers to interact with the management system. + +* TOC +{:toc} + +## Cluster Information + +There are two types of information that are available within the container environment.  There is information about the container itself, and there is information about other objects in the system. + +### Container Information + +Currently, the Pod name for the pod in which the container is running is set as the hostname of the container, and is accessible through all calls to access the hostname within the container (e.g. the hostname command, or the [gethostname][1] function call in libc), but this is planned to change in the future and should not be used. + +The Pod name and namespace are also available as environment variables via the [downward API](/{{page.version}}/docs/user-guide/downward-api). Additionally, user-defined environment variables from the pod definition, are also available to the container, as are any environment variables specified statically in the Docker image. + +In the future, we anticipate expanding this information with richer information about the container.  Examples include available memory, number of restarts, and in general any state that you could get from the call to GET /pods on the API server. + +### Cluster Information + +Currently the list of all services that are running at the time when the container was created via the Kubernetes Cluster API are available to the container as environment variables.  The set of environment variables matches the syntax of Docker links. + +For a service named **foo** that maps to a container port named **bar**, the following variables are defined: + +```shell +FOO_SERVICE_HOST= +FOO_SERVICE_PORT= +``` + +Services have dedicated IP address, and are also surfaced to the container via DNS (If [DNS addon](http://releases.k8s.io/{{page.githubbranch}}/cluster/addons/dns/) is enabled).  Of course DNS is still not an enumerable protocol, so we will continue to provide environment variables so that containers can do discovery. + +## Container Hooks + +Container hooks provide information to the container about events in its management lifecycle.  For example, immediately after a container is started, it receives a *PostStart* hook.  These hooks are broadcast *into* the container with information about the life-cycle of the container.  They are different from the events provided by Docker and other systems which are *output* from the container.  Output events provide a log of what has already happened.  Input hooks provide real-time notification about things that are happening, but no historical log. + +### Hook Details + +There are currently two container hooks that are surfaced to containers: + +*PostStart* + +This hook is sent immediately after a container is created.  It notifies the container that it has been created.  No parameters are passed to the handler. + +*PreStop* + +This hook is called immediately before a container is terminated. No parameters are passed to the handler. This event handler is blocking, and must complete before the call to delete the container is sent to the Docker daemon. The SIGTERM notification sent by Docker is also still sent. A more complete description of termination behavior can be found in [Termination of Pods](/{{page.version}}/docs/user-guide/pods/#termination-of-pods). + +### Hook Handler Execution + +When a management hook occurs, the management system calls into any registered hook handlers in the container for that hook.  These hook handler calls are synchronous in the context of the pod containing the container. Typically we expect that users will make their hook handlers as lightweight as possible, but there are cases where long running commands make sense (e.g. saving state prior to container stop). + +### Hook delivery guarantees + +Hook delivery is intended to be "at least once", which means that a hook may be called multiple times for any given event (e.g. "start" or "stop") and it is up to the hook implementer to be able to handle this +correctly. + +We expect double delivery to be rare, but in some cases if the Kubelet restarts in the middle of sending a hook, the hook may be resent after the Kubelet comes back up. + +Likewise, we only make a single delivery attempt. If (for example) an http hook receiver is down, and unable to take traffic, we do not make any attempts to resend. + +Currently, there are (hopefully rare) scenarios where PostStart hooks may not be delivered. + +### Hook Handler Implementations + +Hook handlers are the way that hooks are surfaced to containers.  Containers can select the type of hook handler they would like to implement.  Kubernetes currently supports two different hook handler types: + + * Exec - Executes a specific command (e.g. pre-stop.sh) inside the cgroups and namespaces of the container.  Resources consumed by the command are counted against the container. + + * HTTP - Executes an HTTP request against a specific endpoint on the container. + +[1]: http://man7.org/linux/man-pages/man2/gethostname.2.html \ No newline at end of file diff --git a/_includes/docs/docs/user-guide/containers.md b/_includes/docs/docs/user-guide/containers.md new file mode 100644 index 0000000000..fb0da340c4 --- /dev/null +++ b/_includes/docs/docs/user-guide/containers.md @@ -0,0 +1,91 @@ + +* TOC +{:toc} + +## Containers and commands + +So far the Pods we've seen have all used the `image` field to indicate what process Kubernetes +should run in a container. In this case, Kubernetes runs the image's default command. If we want +to run a particular command or override the image's defaults, there are two additional fields that +we can use: + +1. `Command`: Controls the actual command run by the image +2. `Args`: Controls the arguments passed to the command + +### How docker handles command and arguments + +Docker images have metadata associated with them that is used to store information about the image. +The image author may use this to define defaults for the command and arguments to run a container +when the user does not supply values. Docker calls the fields for commands and arguments +`Entrypoint` and `Cmd` respectively. The full details for this feature are too complicated to +describe here, mostly due to the fact that the Docker API allows users to specify both of these +fields as either a string array or a string and there are subtle differences in how those cases are +handled. We encourage the curious to check out Docker's documentation for this feature. + +Kubernetes allows you to override both the image's default command (docker `Entrypoint`) and args +(docker `Cmd`) with the `Command` and `Args` fields of `Container`. The rules are: + +1. If you do not supply a `Command` or `Args` for a container, the defaults defined by the image + will be used +2. If you supply a `Command` but no `Args` for a container, only the supplied `Command` will be + used; the image's default arguments are ignored +3. If you supply only `Args`, the image's default command will be used with the arguments you + supply +4. If you supply a `Command` **and** `Args`, the image's defaults will be ignored and the values + you supply will be used + +Here are examples for these rules in table format + +| Image `Entrypoint` | Image `Cmd` | Container `Command` | Container `Args` | Command Run | +|--------------------|------------------|---------------------|--------------------|------------------| +| `[/ep-1]` | `[foo bar]` | <not set> | <not set> | `[ep-1 foo bar]` | +| `[/ep-1]` | `[foo bar]` | `[/ep-2]` | <not set> | `[ep-2]` | +| `[/ep-1]` | `[foo bar]` | <not set> | `[zoo boo]` | `[ep-1 zoo boo]` | +| `[/ep-1]` | `[foo bar]` | `[/ep-2]` | `[zoo boo]` | `[ep-2 zoo boo]` | + + +## Capabilities + +By default, Docker containers are "unprivileged" and cannot, for example, run a Docker daemon inside a Docker container. We can have fine grain control over the capabilities using cap-add and cap-drop.More details [here](https://docs.docker.com/reference/run/#runtime-privilege-linux-capabilities-and-lxc-configuration). + +The relationship between Docker's capabilities and [Linux capabilities](http://man7.org/linux/man-pages/man7/capabilities.7) + +| Docker's capabilities | Linux capabilities | +| ---- | ---- | +| SETPCAP | CAP_SETPCAP | +| SYS_MODULE | CAP_SYS_MODULE | +| SYS_RAWIO | CAP_SYS_RAWIO | +| SYS_PACCT | CAP_SYS_PACCT | +| SYS_ADMIN | CAP_SYS_ADMIN | +| SYS_NICE | CAP_SYS_NICE | +| SYS_RESOURCE | CAP_SYS_RESOURCE | +| SYS_TIME | CAP_SYS_TIME | +| SYS_TTY_CONFIG | CAP_SYS_TTY_CONFIG | +| MKNOD | CAP_MKNOD | +| AUDIT_WRITE | CAP_AUDIT_WRITE | +| AUDIT_CONTROL | CAP_AUDIT_CONTROL | +| MAC_OVERRIDE | CAP_MAC_OVERRIDE | +| MAC_ADMIN | CAP_MAC_ADMIN | +| NET_ADMIN | CAP_NET_ADMIN | +| SYSLOG | CAP_SYSLOG | +| CHOWN | CAP_CHOWN | +| NET_RAW | CAP_NET_RAW | +| DAC_OVERRIDE | CAP_DAC_OVERRIDE | +| FOWNER | CAP_FOWNER | +| DAC_READ_SEARCH | CAP_DAC_READ_SEARCH | +| FSETID | CAP_FSETID | +| KILL | CAP_KILL | +| SETGID | CAP_SETGID | +| SETUID | CAP_SETUID | +| LINUX_IMMUTABLE | CAP_LINUX_IMMUTABLE | +| NET_BIND_SERVICE | CAP_NET_BIND_SERVICE | +| NET_BROADCAST | CAP_NET_BROADCAST | +| IPC_LOCK | CAP_IPC_LOCK | +| IPC_OWNER | CAP_IPC_OWNER | +| SYS_CHROOT | CAP_SYS_CHROOT | +| SYS_PTRACE | CAP_SYS_PTRACE | +| SYS_BOOT | CAP_SYS_BOOT | +| LEASE | CAP_LEASE | +| SETFCAP | CAP_SETFCAP | +| WAKE_ALARM | CAP_WAKE_ALARM | +| BLOCK_SUSPEND | CAP_BLOCK_SUSPEND | \ No newline at end of file diff --git a/_includes/docs/docs/user-guide/debugging-services.md b/_includes/docs/docs/user-guide/debugging-services.md new file mode 100644 index 0000000000..bd59aad1c7 --- /dev/null +++ b/_includes/docs/docs/user-guide/debugging-services.md @@ -0,0 +1,501 @@ + +An issue that comes up rather frequently for new installations of Kubernetes is +that `Services` are not working properly. You've run all your `Pod`s and +`ReplicationController`s, but you get no response when you try to access them. +This document will hopefully help you to figure out what's going wrong. + +* TOC +{:toc} + + +## Conventions + +Throughout this doc you will see various commands that you can run. Some +commands need to be run within `Pod`, others on a Kubernetes `Node`, and others +can run anywhere you have `kubectl` and credentials for the cluster. To make it +clear what is expected, this document will use the following conventions. + +If the command "COMMAND" is expected to run in a `Pod` and produce "OUTPUT": + +```shell +u@pod$ COMMAND +OUTPUT +``` + +If the command "COMMAND" is expected to run on a `Node` and produce "OUTPUT": + +```shell +u@node$ COMMAND +OUTPUT +``` + +If the command is "kubectl ARGS": + +```shell +$ kubectl ARGS +OUTPUT +``` + +## Running commands in a Pod + +For many steps here you will want to see what a `Pod` running in the cluster +sees. Kubernetes does not directly support interactive `Pod`s (yet), but you can +approximate it: + +```shell +$ cat < +``` + +or + +```shell +$ kubectl exec -ti busybox-sleep sh +/ # +``` + +## Setup + +For the purposes of this walk-through, let's run some `Pod`s. Since you're +probably debugging your own `Service` you can substitute your own details, or you +can follow along and get a second data point. + +```shell +$ kubectl run hostnames --image=gcr.io/google_containers/serve_hostname \ + --labels=app=hostnames \ + --port=9376 \ + --replicas=3 +CONTROLLER CONTAINER(S) IMAGE(S) SELECTOR REPLICAS +hostnames hostnames gcr.io/google_containers/serve_hostname app=hostnames 3 +``` + +Note that this is the same as if you had started the `ReplicationController` with +the following YAML: + +```yaml +apiVersion: v1 +kind: ReplicationController +metadata: + name: hostnames +spec: + selector: + app: hostnames + replicas: 3 + template: + metadata: + labels: + app: hostnames + spec: + containers: + - name: hostnames + image: gcr.io/google_containers/serve_hostname + ports: + - containerPort: 9376 + protocol: TCP +``` + +Confirm your `Pod`s are running: + +```shell +$ kubectl get pods -l app=hostnames +NAME READY STATUS RESTARTS AGE +hostnames-0uton 1/1 Running 0 12s +hostnames-bvc05 1/1 Running 0 12s +hostnames-yp2kp 1/1 Running 0 12s +``` + +## Does the Service exist? + +The astute reader will have noticed that we did not actually create a `Service` +yet - that is intentional. This is a step that sometimes gets forgotten, and +is the first thing to check. + +So what would happen if I tried to access a non-existent `Service`? Assuming you +have another `Pod` that consumes this `Service` by name you would get something +like: + +```shell +u@pod$ wget -qO- hostnames +wget: bad address 'hostname' +``` + +or: + +```shell +u@pod$ echo $HOSTNAMES_SERVICE_HOST +``` + +So the first thing to check is whether that `Service` actually exists: + +```shell +$ kubectl get svc hostnames +Error from server: service "hostnames" not found +``` + +So we have a culprit, let's create the `Service`. As before, this is for the +walk-through - you can use your own `Service`'s details here. + +```shell +$ kubectl expose rc hostnames --port=80 --target-port=9376 +service "hostnames" exposed +``` + +And read it back, just to be sure: + +```shell +$ kubectl get svc hostnames +NAME CLUSTER_IP EXTERNAL_IP PORT(S) SELECTOR AGE +hostnames 10.0.0.1 80/TCP run=hostnames 1h +``` + +As before, this is the same as if you had started the `Service` with YAML: + +```yaml +apiVersion: v1 +kind: Service +metadata: + name: hostnames +spec: + selector: + app: hostnames + ports: + - name: default + protocol: TCP + port: 80 + targetPort: 9376 +``` + +Now you can confirm that the `Service` exists. + +## Does the Service work by DNS? + +From a `Pod` in the same `Namespace`: + +```shell +u@pod$ nslookup hostnames +Server: 10.0.0.10 +Address: 10.0.0.10#53 + +Name: hostnames +Address: 10.0.1.175 +``` + +If this fails, perhaps your `Pod` and `Service` are in different +`Namespace`s, try a namespace-qualified name: + +```shell +u@pod$ nslookup hostnames.default +Server: 10.0.0.10 +Address: 10.0.0.10#53 + +Name: hostnames.default +Address: 10.0.1.175 +``` + +If this works, you'll need to ensure that `Pod`s and `Service`s run in the same +`Namespace`. If this still fails, try a fully-qualified name: + +```shell +u@pod$ nslookup hostnames.default.svc.cluster.local +Server: 10.0.0.10 +Address: 10.0.0.10#53 + +Name: hostnames.default.svc.cluster.local +Address: 10.0.1.175 +``` + +Note the suffix here: "default.svc.cluster.local". The "default" is the +`Namespace` we're operating in. The "svc" denotes that this is a `Service`. +The "cluster.local" is your cluster domain. + +You can also try this from a `Node` in the cluster (note: 10.0.0.10 is my DNS +`Service`): + +```shell +u@node$ nslookup hostnames.default.svc.cluster.local 10.0.0.10 +Server: 10.0.0.10 +Address: 10.0.0.10#53 + +Name: hostnames.default.svc.cluster.local +Address: 10.0.1.175 +``` + +If you are able to do a fully-qualified name lookup but not a relative one, you +need to check that your `kubelet` is running with the right flags. +The `--cluster-dns` flag needs to point to your DNS `Service`'s IP and the +`--cluster-domain` flag needs to be your cluster's domain - we assumed +"cluster.local" in this document, but yours might be different, in which case +you should change that in all of the commands above. + +### Does any Service exist in DNS? + +If the above still fails - DNS lookups are not working for your `Service` - we +can take a step back and see what else is not working. The Kubernetes master +`Service` should always work: + +```shell +u@pod$ nslookup kubernetes.default +Server: 10.0.0.10 +Address 1: 10.0.0.10 + +Name: kubernetes +Address 1: 10.0.0.1 +``` + +If this fails, you might need to go to the kube-proxy section of this doc, or +even go back to the top of this document and start over, but instead of +debugging your own `Service`, debug DNS. + +## Does the Service work by IP? + +The next thing to test is whether your `Service` works at all. From a +`Node` in your cluster, access the `Service`'s IP (from `kubectl get` above). + +```shell +u@node$ curl 10.0.1.175:80 +hostnames-0uton + +u@node$ curl 10.0.1.175:80 +hostnames-yp2kp + +u@node$ curl 10.0.1.175:80 +hostnames-bvc05 +``` + +If your `Service` is working, you should get correct responses. If not, there +are a number of things that could be going wrong. Read on. + +## Is the Service correct? + +It might sound silly, but you should really double and triple check that your +`Service` is correct and matches your `Pods`. Read back your `Service` and +verify it: + +```shell +$ kubectl get service hostnames -o json +{ + "kind": "Service", + "apiVersion": "v1", + "metadata": { + "name": "hostnames", + "namespace": "default", + "selfLink": "/api/v1/namespaces/default/services/hostnames", + "uid": "428c8b6c-24bc-11e5-936d-42010af0a9bc", + "resourceVersion": "347189", + "creationTimestamp": "2015-07-07T15:24:29Z", + "labels": { + "app": "hostnames" + } + }, + "spec": { + "ports": [ + { + "name": "default", + "protocol": "TCP", + "port": 80, + "targetPort": 9376, + "nodePort": 0 + } + ], + "selector": { + "app": "hostnames" + }, + "clusterIP": "10.0.1.175", + "type": "ClusterIP", + "sessionAffinity": "None" + }, + "status": { + "loadBalancer": {} + } +} +``` + +Is the port you are trying to access in `spec.ports[]`? Is the `targetPort` +correct for your `Pod`s? If you meant it to be a numeric port, is it a number +(9376) or a string "9376"? If you meant it to be a named port, do your `Pod`s +expose a port with the same name? Is the port's `protocol` the same as the +`Pod`'s? + +## Does the Service have any Endpoints? + +If you got this far, we assume that you have confirmed that your `Service` +exists and resolves by DNS. Now let's check that the `Pod`s you ran are +actually being selected by the `Service`. + +Earlier we saw that the `Pod`s were running. We can re-check that: + +```shell +$ kubectl get pods -l app=hostnames +NAME READY STATUS RESTARTS AGE +hostnames-0uton 1/1 Running 0 1h +hostnames-bvc05 1/1 Running 0 1h +hostnames-yp2kp 1/1 Running 0 1h +``` + +The "AGE" column says that these `Pod`s are about an hour old, which implies that +they are running fine and not crashing. + +The `-l app=hostnames` argument is a label selector - just like our `Service` +has. Inside the Kubernetes system is a control loop which evaluates the +selector of every `Service` and save the results into an `Endpoints` object. + +```shell +$ kubectl get endpoints hostnames +NAME ENDPOINTS +hostnames 10.244.0.5:9376,10.244.0.6:9376,10.244.0.7:9376 +``` + +This confirms that the control loop has found the correct `Pod`s for your +`Service`. If the `hostnames` row is blank, you should check that the +`spec.selector` field of your `Service` actually selects for `metadata.labels` +values on your `Pod`s. + +## Are the Pods working? + +At this point, we know that your `Service` exists and has selected your `Pod`s. +Let's check that the `Pod`s are actually working - we can bypass the `Service` +mechanism and go straight to the `Pod`s. + +```shell +u@pod$ wget -qO- 10.244.0.5:9376 +hostnames-0uton + +pod $ wget -qO- 10.244.0.6:9376 +hostnames-bvc05 + +u@pod$ wget -qO- 10.244.0.7:9376 +hostnames-yp2kp +``` + +We expect each `Pod` in the `Endpoints` list to return its own hostname. If +this is not what happens (or whatever the correct behavior is for your own +`Pod`s), you should investigate what's happening there. You might find +`kubectl logs` to be useful or `kubectl exec` directly to your `Pod`s and check +service from there. + +## Is the kube-proxy working? + +If you get here, your `Service` is running, has `Endpoints`, and your `Pod`s +are actually serving. At this point, the whole `Service` proxy mechanism is +suspect. Let's confirm it, piece by piece. + +### Is kube-proxy running? + +Confirm that `kube-proxy` is running on your `Node`s. You should get something +like the below: + +```shell +u@node$ ps auxw | grep kube-proxy +root 4194 0.4 0.1 101864 17696 ? Sl Jul04 25:43 /usr/local/bin/kube-proxy --master=https://kubernetes-master --kubeconfig=/var/lib/kube-proxy/kubeconfig --v=2 +``` + +Next, confirm that it is not failing something obvious, like contacting the +master. To do this, you'll have to look at the logs. Accessing the logs +depends on your `Node` OS. On some OSes it is a file, such as +/var/log/kube-proxy.log, while other OSes use `journalctl` to access logs. You +should see something like: + +```shell +I0707 17:34:53.945651 30031 server.go:88] Running in resource-only container "/kube-proxy" +I0707 17:34:53.945921 30031 proxier.go:121] Setting proxy IP to 10.240.115.247 and initializing iptables +I0707 17:34:54.053023 30031 roundrobin.go:262] LoadBalancerRR: Setting endpoints for default/kubernetes: to [10.240.169.188:443] +I0707 17:34:54.053175 30031 roundrobin.go:262] LoadBalancerRR: Setting endpoints for default/hostnames:default to [10.244.0.5:9376 10.244.0.6:9376 10.244.0.7:9376] +I0707 17:34:54.053284 30031 roundrobin.go:262] LoadBalancerRR: Setting endpoints for default/kube-dns:dns to [10.244.3.3:53] +I0707 17:34:54.053310 30031 roundrobin.go:262] LoadBalancerRR: Setting endpoints for default/kube-dns:dns-tcp to [10.244.3.3:53] +I0707 17:34:54.054780 30031 proxier.go:306] Adding new service "default/kubernetes:" at 10.0.0.1:443/TCP +I0707 17:34:54.054903 30031 proxier.go:247] Proxying for service "default/kubernetes:" on TCP port 40074 +I0707 17:34:54.079181 30031 proxier.go:306] Adding new service "default/hostnames:default" at 10.0.1.175:80/TCP +I0707 17:34:54.079273 30031 proxier.go:247] Proxying for service "default/hostnames:default" on TCP port 48577 +I0707 17:34:54.113665 30031 proxier.go:306] Adding new service "default/kube-dns:dns" at 10.0.0.10:53/UDP +I0707 17:34:54.113776 30031 proxier.go:247] Proxying for service "default/kube-dns:dns" on UDP port 34149 +I0707 17:34:54.120224 30031 proxier.go:306] Adding new service "default/kube-dns:dns-tcp" at 10.0.0.10:53/TCP +I0707 17:34:54.120297 30031 proxier.go:247] Proxying for service "default/kube-dns:dns-tcp" on TCP port 53476 +I0707 17:34:54.902313 30031 proxysocket.go:130] Accepted TCP connection from 10.244.3.3:42670 to 10.244.3.1:40074 +I0707 17:34:54.903107 30031 proxysocket.go:130] Accepted TCP connection from 10.244.3.3:42671 to 10.244.3.1:40074 +I0707 17:35:46.015868 30031 proxysocket.go:246] New UDP connection from 10.244.3.2:57493 +I0707 17:35:46.017061 30031 proxysocket.go:246] New UDP connection from 10.244.3.2:55471 +``` + +If you see error messages about not being able to contact the master, you +should double-check your `Node` configuration and installation steps. + +### Is kube-proxy writing iptables rules? + +One of the main responsibilities of `kube-proxy` is to write the `iptables` +rules which implement `Service`s. Let's check that those rules are getting +written. + +```shell +u@node$ iptables-save | grep hostnames +-A KUBE-PORTALS-CONTAINER -d 10.0.1.175/32 -p tcp -m comment --comment "default/hostnames:default" -m tcp --dport 80 -j REDIRECT --to-ports 48577 +-A KUBE-PORTALS-HOST -d 10.0.1.175/32 -p tcp -m comment --comment "default/hostnames:default" -m tcp --dport 80 -j DNAT --to-destination 10.240.115.247:48577 +``` + +There should be 2 rules for each port on your `Service` (just one in this +example) - a "KUBE-PORTALS-CONTAINER" and a "KUBE-PORTALS-HOST". If you do +not see these, try restarting `kube-proxy` with the `-V` flag set to 4, and +then look at the logs again. + +### Is kube-proxy proxying? + +Assuming you do see the above rules, try again to access your `Service` by IP: + +```shell +u@node$ curl 10.0.1.175:80 +hostnames-0uton +``` + +If this fails, we can try accessing the proxy directly. Look back at the +`iptables-save` output above, and extract the port number that `kube-proxy` is +using for your `Service`. In the above examples it is "48577". Now connect to +that: + +```shell +u@node$ curl localhost:48577 +hostnames-yp2kp +``` + +If this still fails, look at the `kube-proxy` logs for specific lines like: + +```shell +Setting endpoints for default/hostnames:default to [10.244.0.5:9376 10.244.0.6:9376 10.244.0.7:9376] +``` + +If you don't see those, try restarting `kube-proxy` with the `-V` flag set to 4, and +then look at the logs again. + +## Seek help + +If you get this far, something very strange is happening. Your `Service` is +running, has `Endpoints`, and your `Pod`s are actually serving. You have DNS +working, `iptables` rules installed, and `kube-proxy` does not seem to be +misbehaving. And yet your `Service` is not working. You should probably let +us know, so we can help investigate! + +Contact us on +[Slack](/{{page.version}}/docs/troubleshooting/#slack) or +[email](https://groups.google.com/forum/#!forum/google-containers) or +[GitHub](https://github.com/kubernetes/kubernetes). + +## More information + +Visit [troubleshooting document](/{{page.version}}/docs/troubleshooting/) for more information. \ No newline at end of file diff --git a/_includes/docs/docs/user-guide/deploying-applications.md b/_includes/docs/docs/user-guide/deploying-applications.md new file mode 100644 index 0000000000..eef0dc12cc --- /dev/null +++ b/_includes/docs/docs/user-guide/deploying-applications.md @@ -0,0 +1,112 @@ + +You previously read about how to quickly deploy a simple replicated application using [`kubectl run`](/{{page.version}}/docs/user-guide/quick-start) and how to configure and launch single-run containers using pods ([Configuring containers](/{{page.version}}/docs/user-guide/configuring-containers)). Here you'll use the configuration-based approach to deploy a continuously running, replicated application. + +* TOC +{:toc} + +## Launching a set of replicas using a configuration file + +Kubernetes creates and manages sets of replicated containers (actually, replicated [Pods](/{{page.version}}/docs/user-guide/pods)) using [*Replication Controllers*](/{{page.version}}/docs/user-guide/replication-controller). + +A replication controller simply ensures that a specified number of pod "replicas" are running at any one time. If there are too many, it will kill some. If there are too few, it will start more. It's analogous to Google Compute Engine's [Instance Group Manager](https://cloud.google.com/compute/docs/instance-groups/manager/) or AWS's [Auto-scaling Group](http://docs.aws.amazon.com/AutoScaling/latest/DeveloperGuide/AutoScalingGroup) (with no scaling policies). + +The replication controller created to run nginx by `kubectl run` in the [Quick start](/{{page.version}}/docs/user-guide/quick-start) could be specified using YAML as follows: + +```yaml +apiVersion: v1 +kind: ReplicationController +metadata: + name: my-nginx +spec: + replicas: 2 + template: + metadata: + labels: + app: nginx + spec: + containers: + - name: nginx + image: nginx + ports: + - containerPort: 80 +``` + +Some differences compared to specifying just a pod are that the `kind` is `ReplicationController`, the number of `replicas` desired is specified, and the pod specification is under the `template` field. The names of the pods don't need to be specified explicitly because they are generated from the name of the replication controller. +View the [replication controller API +object](http://kubernetes.io/v1.1/docs/api-reference/v1/definitions/#_v1_replicationcontroller) +to view the list of supported fields. + +This replication controller can be created using `create`, just as with pods: + +```shell +$ kubectl create -f ./nginx-rc.yaml +replicationcontrollers/my-nginx +``` + +Unlike in the case where you directly create pods, a replication controller replaces pods that are deleted or terminated for any reason, such as in the case of node failure. For this reason, we recommend that you use a replication controller for a continuously running application even if your application requires only a single pod, in which case you can omit `replicas` and it will default to a single replica. + +## Viewing replication controller status + +You can view the replication controller you created using `get`: + +```shell +$ kubectl get rc +CONTROLLER CONTAINER(S) IMAGE(S) SELECTOR REPLICAS +my-nginx nginx nginx app=nginx 2 +``` + +This tells you that your controller will ensure that you have two nginx replicas. + +You can see those replicas using `get`, just as with pods you created directly: + +```shell +$ kubectl get pods +NAME READY STATUS RESTARTS AGE +my-nginx-065jq 1/1 Running 0 51s +my-nginx-buaiq 1/1 Running 0 51s +``` + +## Deleting replication controllers + +When you want to kill your application, delete your replication controller, as in the [Quick start](/{{page.version}}/docs/user-guide/quick-start): + +```shell +$ kubectl delete rc my-nginx +replicationcontrollers/my-nginx +``` + +By default, this will also cause the pods managed by the replication controller to be deleted. If there were a large number of pods, this may take a while to complete. If you want to leave the pods running, specify `--cascade=false`. + +If you try to delete the pods before deleting the replication controller, it will just replace them, as it is supposed to do. + +## Labels + +Kubernetes uses user-defined key-value attributes called [*labels*](/{{page.version}}/docs/user-guide/labels) to categorize and identify sets of resources, such as pods and replication controllers. The example above specified a single label in the pod template, with key `app` and value `nginx`. All pods created carry that label, which can be viewed using `-L`: + +```shell +$ kubectl get pods -L app +NAME READY STATUS RESTARTS AGE APP +my-nginx-afv12 0/1 Running 0 3s nginx +my-nginx-lg99z 0/1 Running 0 3s nginx +``` + +The labels from the pod template are copied to the replication controller's labels by default, as well -- all resources in Kubernetes support labels: + +```shell +$ kubectl get rc my-nginx -L app +CONTROLLER CONTAINER(S) IMAGE(S) SELECTOR REPLICAS APP +my-nginx nginx nginx app=nginx 2 nginx +``` + +More importantly, the pod template's labels are used to create a [`selector`](/{{page.version}}/docs/user-guide/labels/#label-selectors) that will match pods carrying those labels. You can see this field by requesting it using the [Go template output format of `kubectl get`](/{{page.version}}/docs/user-guide/kubectl/kubectl_get): + +```shell +$ kubectl get rc my-nginx -o template --template="{{.spec.selector}}" +map[app:nginx] +``` + +You could also specify the `selector` explicitly, such as if you wanted to specify labels in the pod template that you didn't want to select on, but you should ensure that the selector will match the labels of the pods created from the pod template, and that it won't match pods created by other replication controllers. The most straightforward way to ensure the latter is to create a unique label value for the replication controller, and to specify it in both the pod template's labels and in the selector. + +## What's next? + +[Learn about exposing applications to users and clients, and connecting tiers of your application together.](/{{page.version}}/docs/user-guide/connecting-applications) \ No newline at end of file diff --git a/_includes/docs/docs/user-guide/deployments.md b/_includes/docs/docs/user-guide/deployments.md new file mode 100644 index 0000000000..cfea8d0435 --- /dev/null +++ b/_includes/docs/docs/user-guide/deployments.md @@ -0,0 +1,349 @@ + +* TOC +{:toc} + +## What is a _Deployment_? + +A _Deployment_ provides declarative update for Pods and ReplicationControllers. +Users describe the desired state in deployment object and deployment +controller changes the actual state to that at a controlled rate. +Users can define deployments to create new resources, or replace existing ones +by new ones. + +A typical use case is: + +* Create a deployment to bring up a replication controller and pods. +* Later, update that deployment to recreate the pods (for ex: to use a new image). + +## Enabling Deployments on kubernetes cluster + +Deployments is part of the [`extensions` API Group](/{{page.version}}/docs/api/#api-groups) and is not enabled by default. +Set `--runtime-config=extensions/v1beta1/deployments=true` on API server to +enable it. +This can be achieved by exporting `ENABLE_DEPLOYMENTS=true` before running +`kube-up.sh` script on GCE. + +Note that Deployment objects effectively have [API version +`v1alpha1`](/{{page.version}}/docs/api/)#api-versioning). +Alpha objects may change or even be discontinued in future software releases. +However, due to to a known issue, they will appear as API version `v1beta1` if +enabled. + +## Creating a Deployment + +Here is an example Deployment. It creates a replication controller to +bring up 3 nginx pods. + + + +```yaml +apiVersion: extensions/v1beta1 +kind: Deployment +metadata: + name: nginx-deployment +spec: + replicas: 3 + template: + metadata: + labels: + app: nginx + spec: + containers: + - name: nginx + image: nginx:1.7.9 + ports: + - containerPort: 80 +``` + +[Download example](/{{page.version}}/docs/user-guide/nginx-deployment.yaml) + + +Run the example by downloading the example file and then running this command: + +```shell +$ kubectl create -f docs/user-guide/nginx-deployment.yaml +deployment "nginx-deployment" created +``` + +Running a get immediately will give: + +```shell +$ kubectl get deployments +NAME UPDATEDREPLICAS AGE +nginx-deployment 0/3 8s +``` + +This indicates that deployment is trying to update 3 replicas. It has not +updated any one of those yet. + +Running a get again after a minute, will give: + +```shell +$ kubectl get deployments +NAME UPDATEDREPLICAS AGE +nginx-deployment 3/3 1m +``` + +This indicates that deployent has created all the 3 replicas. +Running ```kubectl get rc``` +and ```kubectl get pods``` +will show the replication controller (RC) and pods created. + +```shell +$ kubectl get rc +CONTROLLER CONTAINER(S) IMAGE(S) SELECTOR REPLICAS AGE +REPLICAS AGE +deploymentrc-1975012602 nginx nginx:1.7.9 deployment.kubernetes.io/podTemplateHash=1975012602,app=nginx 3 2m +``` + +```shell +$ kubectl get pods +NAME READY STATUS RESTARTS AGE +deploymentrc-1975012602-4f2tb 1/1 Running 0 1m +deploymentrc-1975012602-j975u 1/1 Running 0 1m +deploymentrc-1975012602-uashb 1/1 Running 0 1m +``` + +The created RC will ensure that there are 3 nginx pods at all time. + +## Updating a Deployment + +Lets say, now we want to update the nginx pods to start using nginx:1.9.1 image +instead of nginx:1.7.9. +For this, we update our deployment to be as follows: + + + +```yaml +apiVersion: extensions/v1beta1 +kind: Deployment +metadata: + name: nginx-deployment +spec: + replicas: 3 + template: + metadata: + labels: + app: nginx + spec: + containers: + - name: nginx + image: nginx:1.9.1 + ports: + - containerPort: 80 +``` + +[Download example](/{{page.version}}/docs/user-guide/new-nginx-deployment.yaml) + + + +```shell +$ kubectl apply -f docs/user-guide/new-nginx-deployment.yaml +deployment "nginx-deployment" configured +``` + +Running a get immediately will still give: + +```shell +$ kubectl get deployments +NAME UPDATEDREPLICAS AGE +nginx-deployment 3/3 8s +``` + +This indicates that deployment status has not been updated yet (it is still +showing old status). +Running a get again after a minute, will give: + +```shell +$ kubectl get deployments +NAME UPDATEDREPLICAS AGE +nginx-deployment 1/3 1m +``` + +This indicates that deployment has updated one of the three pods that it needs +to update. +Eventually, it will get around to updating all the pods. + +```shell +$ kubectl get deployments +NAME UPDATEDREPLICAS AGE +nginx-deployment 3/3 3m +``` + +We can run `kubectl get rc` +to see that deployment updated the pods by creating a new RC +which it scaled up to 3 and scaled down the old RC to 0. + +```shell +kubectl get rc +CONTROLLER CONTAINER(S) IMAGE(S) SELECTOR REPLICAS AGE +deploymentrc-1562004724 nginx nginx:1.9.1 deployment.kubernetes.io/podTemplateHash=1562004724,app=nginx 3 5m +deploymentrc-1975012602 nginx nginx:1.7.9 deployment.kubernetes.io/podTemplateHash=1975012602,app=nginx 0 7m +``` + +Running get pods, will only show the new pods. + +```shell +kubectl get pods +NAME READY STATUS RESTARTS AGE +deploymentrc-1562004724-0tgk5 1/1 Running 0 9m +deploymentrc-1562004724-1rkfl 1/1 Running 0 8m +deploymentrc-1562004724-6v702 1/1 Running 0 8m +``` + +Next time we want to update pods, we can just update the deployment again. + +Deployment ensures that not all pods are down while they are being updated. By +default, it ensures that minimum of 1 less than the desired number of pods are +up. For example, if you look at the above deployment closely, you will see that +it first created a new pod, then deleted some old pods and created new ones. It +does not kill old pods until a sufficient number of new pods have come up. + +```shell +$ kubectl describe deployments +Name: nginx-deployment +Namespace: default +CreationTimestamp: Thu, 22 Oct 2015 17:58:49 -0700 +Labels: app=nginx-deployment +Selector: app=nginx +Replicas: 3 updated / 3 total +StrategyType: RollingUpdate +RollingUpdateStrategy: 1 max unavailable, 1 max surge, 0 min ready seconds +OldReplicationControllers: deploymentrc-1562004724 (3/3 replicas created) +NewReplicationController: +Events: + FirstSeen LastSeen Count From SubobjectPath Reason Message + '��'��'��'��'��'��'��'��'�� '��'��'��'��'��'��'��'�� '��'��'��'��'�� '��'��'��'�� '��'��'��'��'��'��'��'��'��'��'��'��'�� '��'��'��'��'��'�� '��'��'��'��'��'��'�� + 10m 10m 1 {deployment-controller } ScalingRC Scaled up rc deploymentrc-1975012602 to 3 + 2m 2m 1 {deployment-controller } ScalingRC Scaled up rc deploymentrc-1562004724 to 1 + 2m 2m 1 {deployment-controller } ScalingRC Scaled down rc deploymentrc-1975012602 to 1 + 1m 1m 1 {deployment-controller } ScalingRC Scaled up rc deploymentrc-1562004724 to 3 + 1m 1m 1 {deployment-controller } ScalingRC Scaled down rc deploymentrc-1975012602 to 0 +``` + +Here we see that when we first created the deployment, it created an RC and scaled it up to 3 replicas directly. +When we updated the deployment, it created a new RC and scaled it up to 1 and then scaled down the old RC by 1, so that at least 2 pods were available at all times. +It then scaled up the new RC to 3 and when those pods were ready, it scaled down the old RC to 0. + +### Multiple Updates + +Each time a new deployment object is observed, a replication controller is +created to bring up the desired pods if there is no existing RC doing so. +Existing RCs controlling pods whose labels match `.spec.selector` but the +template does not match `.spec.template` are scaled down. +Eventually, the new RC will be scaled to `.spec.replicas` and all old RCs will +be scaled to 0. +If the user updates the deployment while an existing deployment was in progress, +deployment will create a new RC as per the update and start scaling that up and +will roll the RC that it was scaling up before in its list of old RCs and will +start scaling it down. +For example: If user creates a deployment to create 5 replicas of nginx:1.7.9. +But then updates the deployment to create 5 replicas of nging:1.9.1, when only 3 +replicas of nginx:1.7.9 had been created, then deployment will immediately start +killing the 3 nginx:1.7.9 pods that it had created and will start creating +nginx:1.9.1 pods. It will not wait for 5 replicas of nginx:1.7.9 to be created +before changing course. + +## Writing a Deployment Spec + +As with all other Kubernetes configs, a Deployment needs `apiVersion`, `kind`, and +`metadata` fields. For general information about working with config files, +see [here](/{{page.version}}/docs/user-guide/deploying-applications), [here](/{{page.version}}/docs/user-guide/configuring-containers), and [here](/{{page.version}}/docs/user-guide/working-with-resources). + +A Deployment also needs a [`.spec` section](/{{page.version}}/docs/devel/api-conventions/#spec-and-status). + +### Pod Template + +The `.spec.template` is the only required field of the `.spec`. + +The `.spec.template` is a [pod template](/{{page.version}}/docs/user-guide/replication-controller/#pod-template). It has exactly +the same schema as a [pod](/{{page.version}}/docs/user-guide/pods), except it is nested and does not have an +`apiVersion` or `kind`. + +### Replicas + +`.spec.replicas` is an optional field that specifies the number of desired pods. Defaults +to 1. + +### Selector + +`.spec.selector` is an optional field that specifies label selectors for pods +targeted by this deployment. Deployment kills some of these pods, if their +template is different than `.spec.template` or if the total number of such pods +exceeds `.spec.replicas`. It will bring up new pods with `.spec.template` if +number of pods are less than the desired number. + +### Unique Label Key + +`.spec.uniqueLabelKey` is an optional field specifying key of the selector that +is added to existing RCs (and label key that is added to its pods) to prevent +the existing RCs to select new pods (and old pods being selected by new RC). +Users can set this to an empty string to indicate that the system should +not add any selector and label. If unspecified, system uses +"deployment.kubernetes.io/podTemplateHash". +Value of this key is hash of `.spec.template`. +No label is added if this is set to empty string. + +### Strategy + +`.spec.strategy` specifies the strategy to replace old pods by new ones. +`.spec.strategy.type` can be "Recreate" or "RollingUpdate". "RollingUpdate" is +the default value. + +#### Recreate Deployment + +All existing pods are killed before new ones are created when +`.spec.strategy.type==Recreate`. +Note: This is not implemented yet. + +#### Rolling Update Deployment + +Deployment updates pods in a [rolling update][update-demo/] fashion +when `.spec.strategy.type==RollingUpdate`. +Users can specify `maxUnavailable`, `maxSurge` and `minReadySeconds` to control +the rolling update process. + +##### Max Unavailable + +`.spec.strategy.rollingUpdate.maxUnavailable` is an optional field that specifies the +maximum number of pods that can be unavailable during the update process. +Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: +10%). +Absolute number is calculated from percentage by rounding up. +This can not be 0 if `.spec.strategy.rollingUpdate.maxSurge` is 0. +By default, a fixed value of 1 is used. +Example: when this is set to 30%, the old RC can be scaled down to +70% of desired pods immediately when the rolling update starts. Once new pods are +ready, old RC can be scaled down further, followed by scaling up the new RC, +ensuring that the total number of pods available at all times during the +update is at least 70% of desired pods. + +##### Max Surge + +`.spec.strategy.rollingUpdate.maxSurge` is an optional field that specifies the +maximum number of pods that can be created above the desired number of pods. +Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: +10%). +This can not be 0 if MaxUnavailable is 0. +Absolute number is calculated from percentage by rounding up. +By default, a value of 1 is used. +Example: when this is set to 30%, the new RC can be scaled up immediately when +the rolling update starts, such that the total number of old and new pods do not exceed +130% of desired pods. Once old pods have been killed, +new RC can be scaled up further, ensuring that total number of pods running +at any time during the update is atmost 130% of desired pods. + +##### Min Ready Seconds + +`.spec.strategy.rollingUpdate.minReadySeconds` is an optional field that specifies the +minimum number of seconds for which a newly created pod should be ready +without any of its container crashing, for it to be considered available. +Defaults to 0 (pod will be considered available as soon as it is ready). +Note: This is not implemented yet. + +## Alternative to Deployments + +### kubectl rolling update + +[Kubectl rolling update](/{{page.version}}/docs/user-guide/kubectl/kubectl_rolling-update) also updates pods and replication controllers in a similar fashion. +But deployments is declarative and is server side. \ No newline at end of file diff --git a/_includes/docs/docs/user-guide/docker-cli-to-kubectl.md b/_includes/docs/docs/user-guide/docker-cli-to-kubectl.md new file mode 100644 index 0000000000..d8ab07fc2d --- /dev/null +++ b/_includes/docs/docs/user-guide/docker-cli-to-kubectl.md @@ -0,0 +1,264 @@ + +In this doc, we introduce the Kubernetes command line for interacting with the api to docker-cli users. The tool, kubectl, is designed to be familiar to docker-cli users but there are a few necessary differences. Each section of this doc highlights a docker subcommand explains the kubectl equivalent. + +* TOC +{:toc} + +#### docker run + +How do I run an nginx container and expose it to the world? Checkout [kubectl run](/{{page.version}}/docs/user-guide/kubectl/kubectl_run). + +With docker: + +```shell +$ docker run -d --restart=always -e DOMAIN=cluster --name nginx-app -p 80:80 nginx +a9ec34d9878748d2f33dc20cb25c714ff21da8d40558b45bfaec9955859075d0 +$ docker ps +CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES +a9ec34d98787 nginx "nginx -g 'daemon of 2 seconds ago Up 2 seconds 0.0.0.0:80->80/tcp, 443/tcp nginx-app +``` + +With kubectl: + +```shell +# start the pod running nginx +$ kubectl run --image=nginx nginx-app --port=80 --env="DOMAIN=cluster" +replicationcontroller "nginx-app" created +# expose a port through with a service +$ kubectl expose rc nginx-app --port=80 --name=nginx-http +``` + +With kubectl, we create a [replication controller](/{{page.version}}/docs/user-guide/replication-controller) which will make sure that N pods are running nginx (where N is the number of replicas stated in the spec, which defaults to 1). We also create a [service](/{{page.version}}/docs/user-guide/services) with a selector that matches the replication controller's selector. See the [Quick start](/{{page.version}}/docs/user-guide/quick-start) for more information. + +By default images are run in the background, similar to `docker run -d ...`, if you want to run things in the foreground, use: + +```shell +kubectl run [-i] [--tty] --attach --image= +``` + +Unlike `docker run ...`, if `--attach` is specified, we attach to `stdin`, `stdout` and `stderr`, there is no ability to control which streams are attached (`docker -a ...`). + +Because we start a replication controller for your container, it will be restarted if you terminate the attached process (e.g. `ctrl-c`), this is different than `docker run -it`. +To destroy the replication controller (and it's pods) you need to run `kubectl delete rc ` + +#### docker ps + +How do I list what is currently running? Checkout [kubectl get](/{{page.version}}/docs/user-guide/kubectl/kubectl_get). + +With docker: + +```shell +$ docker ps +CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES +a9ec34d98787 nginx "nginx -g 'daemon of About an hour ago Up About an hour 0.0.0.0:80->80/tcp, 443/tcp nginx-app +``` + +With kubectl: + +```shell +$ kubectl get po +NAME READY STATUS RESTARTS AGE +nginx-app-5jyvm 1/1 Running 0 1h +``` + +#### docker attach + +How do I attach to a process that is already running in a container? Checkout [kubectl attach](/{{page.version}}/docs/user-guide/kubectl/kubectl_attach) + +With docker: + +```shell +$ docker ps +CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES +a9ec34d98787 nginx "nginx -g 'daemon of 8 minutes ago Up 8 minutes 0.0.0.0:80->80/tcp, 443/tcp nginx-app +$ docker attach -it a9ec34d98787 +... +``` + +With kubectl: + +```shell +$ kubectl get pods +NAME READY STATUS RESTARTS AGE +nginx-app-5jyvm 1/1 Running 0 10m +$ kubectl attach -it nginx-app-5jyvm +... +``` + +#### docker exec + +How do I execute a command in a container? Checkout [kubectl exec](/{{page.version}}/docs/user-guide/kubectl/kubectl_exec). + +With docker: + +```shell +$ docker ps +CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES +a9ec34d98787 nginx "nginx -g 'daemon of 8 minutes ago Up 8 minutes 0.0.0.0:80->80/tcp, 443/tcp nginx-app +$ docker exec a9ec34d98787 cat /etc/hostname +a9ec34d98787 +``` + +With kubectl: + +```shell +$ kubectl get po +NAME READY STATUS RESTARTS AGE +nginx-app-5jyvm 1/1 Running 0 10m +$ kubectl exec nginx-app-5jyvm -- cat /etc/hostname +nginx-app-5jyvm +``` + +What about interactive commands? + + +With docker: + +```shell +$ docker exec -ti a9ec34d98787 /bin/sh +# exit +``` + +With kubectl: + +```shell +$ kubectl exec -ti nginx-app-5jyvm -- /bin/sh +# exit +``` + +For more information see [Getting into containers](/{{page.version}}/docs/user-guide/getting-into-containers). + +#### docker logs + +How do I follow stdout/stderr of a running process? Checkout [kubectl logs](/{{page.version}}/docs/user-guide/kubectl/kubectl_logs). + + +With docker: + +```shell +$ docker logs -f a9e +192.168.9.1 - - [14/Jul/2015:01:04:02 +0000] "GET / HTTP/1.1" 200 612 "-" "curl/7.35.0" "-" +192.168.9.1 - - [14/Jul/2015:01:04:03 +0000] "GET / HTTP/1.1" 200 612 "-" "curl/7.35.0" "-" +``` + +With kubectl: + +```shell +$ kubectl logs -f nginx-app-zibvs +10.240.63.110 - - [14/Jul/2015:01:09:01 +0000] "GET / HTTP/1.1" 200 612 "-" "curl/7.26.0" "-" +10.240.63.110 - - [14/Jul/2015:01:09:02 +0000] "GET / HTTP/1.1" 200 612 "-" "curl/7.26.0" "-" +``` + +Now's a good time to mention slight difference between pods and containers; by default pods will not terminate if their processes exit. Instead it will restart the process. This is similar to the docker run option `--restart=always` with one major difference. In docker, the output for each invocation of the process is concatenated but for Kubernetes, each invocation is separate. To see the output from a previous run in Kubernetes, do this: + +```shell +$ kubectl logs --previous nginx-app-zibvs +10.240.63.110 - - [14/Jul/2015:01:09:01 +0000] "GET / HTTP/1.1" 200 612 "-" "curl/7.26.0" "-" +10.240.63.110 - - [14/Jul/2015:01:09:02 +0000] "GET / HTTP/1.1" 200 612 "-" "curl/7.26.0" "-" +``` + +See [Logging](/{{page.version}}/docs/user-guide/logging) for more information. + +#### docker stop and docker rm + +How do I stop and delete a running process? Checkout [kubectl delete](/{{page.version}}/docs/user-guide/kubectl/kubectl_delete). + +With docker + +```shell +$ docker ps +CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES +a9ec34d98787 nginx "nginx -g 'daemon of 22 hours ago Up 22 hours 0.0.0.0:80->80/tcp, 443/tcp nginx-app +$ docker stop a9ec34d98787 +a9ec34d98787 +$ docker rm a9ec34d98787 +a9ec34d98787 +``` + +With kubectl: + +```shell +$ kubectl get rc nginx-app +CONTROLLER CONTAINER(S) IMAGE(S) SELECTOR REPLICAS +nginx-app nginx-app nginx run=nginx-app 1 +$ kubectl get po +NAME READY STATUS RESTARTS AGE +nginx-app-aualv 1/1 Running 0 16s +$ kubectl delete rc nginx-app +NAME READY STATUS RESTARTS AGE +nginx-app-aualv 1/1 Running 0 16s +$ kubectl get po +NAME READY STATUS RESTARTS AGE +``` + +Notice that we don't delete the pod directly. With kubectl we want to delete the replication controller that owns the pod. If we delete the pod directly, the replication controller will recreate the pod. + +#### docker login + +There is no direct analog of `docker login` in kubectl. If you are interested in using Kubernetes with a private registry, see [Using a Private Registry](/{{page.version}}/docs/user-guide/images/#using-a-private-registry). + +#### docker version + +How do I get the version of my client and server? Checkout [kubectl version](/{{page.version}}/docs/user-guide/kubectl/kubectl_version). + +With docker: + +```shell +$ docker version +Client version: 1.7.0 +Client API version: 1.19 +Go version (client): go1.4.2 +Git commit (client): 0baf609 +OS/Arch (client): linux/amd64 +Server version: 1.7.0 +Server API version: 1.19 +Go version (server): go1.4.2 +Git commit (server): 0baf609 +OS/Arch (server): linux/amd64 +``` + +With kubectl: + +```shell +$ kubectl version +Client Version: version.Info{Major:"0", Minor:"20.1", GitVersion:"v0.20.1", GitCommit:"", GitTreeState:"not a git tree"} +Server Version: version.Info{Major:"0", Minor:"21+", GitVersion:"v0.21.1-411-g32699e873ae1ca-dirty", GitCommit:"32699e873ae1caa01812e41de7eab28df4358ee4", GitTreeState:"dirty"} +``` + +#### docker info + +How do I get miscellaneous info about my environment and configuration? Checkout [kubectl cluster-info](/{{page.version}}/docs/user-guide/kubectl/kubectl_cluster-info). + +With docker: + +```shell +$ docker info +Containers: 40 +Images: 168 +Storage Driver: aufs + Root Dir: /usr/local/google/docker/aufs + Backing Filesystem: extfs + Dirs: 248 + Dirperm1 Supported: false +Execution Driver: native-0.2 +Logging Driver: json-file +Kernel Version: 3.13.0-53-generic +Operating System: Ubuntu 14.04.2 LTS +CPUs: 12 +Total Memory: 31.32 GiB +Name: k8s-is-fun.mtv.corp.google.com +ID: ADUV:GCYR:B3VJ:HMPO:LNPQ:KD5S:YKFQ:76VN:IANZ:7TFV:ZBF4:BYJO +WARNING: No swap limit support +``` + +With kubectl: + +```shell +$ kubectl cluster-info +Kubernetes master is running at https://108.59.85.141 +KubeDNS is running at https://108.59.85.141/api/v1/proxy/namespaces/kube-system/services/kube-dns +KubeUI is running at https://108.59.85.141/api/v1/proxy/namespaces/kube-system/services/kube-ui +Grafana is running at https://108.59.85.141/api/v1/proxy/namespaces/kube-system/services/monitoring-grafana +Heapster is running at https://108.59.85.141/api/v1/proxy/namespaces/kube-system/services/monitoring-heapster +InfluxDB is running at https://108.59.85.141/api/v1/proxy/namespaces/kube-system/services/monitoring-influxdb +``` \ No newline at end of file diff --git a/_includes/docs/docs/user-guide/downward-api.md b/_includes/docs/docs/user-guide/downward-api.md new file mode 100644 index 0000000000..b04c849d5d --- /dev/null +++ b/_includes/docs/docs/user-guide/downward-api.md @@ -0,0 +1,152 @@ + +It is sometimes useful for a container to have information about itself, but we +want to be careful not to over-couple containers to Kubernetes. The downward +API allows containers to consume information about themselves or the system and +expose that information how they want it, without necessarily coupling to the +Kubernetes client or REST API. + +An example of this is a "legacy" app that is already written assuming +that a particular environment variable will hold a unique identifier. While it +is often possible to "wrap" such applications, this is tedious and error prone, +and violates the goal of low coupling. Instead, the user should be able to use +the Pod's name, for example, and inject it into this well-known variable. + +## Capabilities + +The following information is available to a `Pod` through the downward API: + +* The pod's name +* The pod's namespace +* The pod's IP + +More information will be exposed through this same API over time. + +## Exposing pod information into a container + +Containers consume information from the downward API using environment +variables or using a volume plugin. + +### Environment variables + +Most environment variables in the Kubernetes API use the `value` field to carry +simple values. However, the alternate `valueFrom` field allows you to specify +a `fieldRef` to select fields from the pod's definition. The `fieldRef` field +is a structure that has an `apiVersion` field and a `fieldPath` field. The +`fieldPath` field is an expression designating a field of the pod. The +`apiVersion` field is the version of the API schema that the `fieldPath` is +written in terms of. If the `apiVersion` field is not specified it is +defaulted to the API version of the enclosing object. + +The `fieldRef` is evaluated and the resulting value is used as the value for +the environment variable. This allows users to publish their pod's name in any +environment variable they want. + +## Example + +This is an example of a pod that consumes its name and namespace via the +downward API: + + + +```yaml +apiVersion: v1 +kind: Pod +metadata: + name: dapi-test-pod +spec: + containers: + - name: test-container + image: gcr.io/google_containers/busybox + command: [ "/bin/sh", "-c", "env" ] + env: + - name: MY_POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: MY_POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: MY_POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + restartPolicy: Never +``` + +[Download example](/{{page.version}}/docs/user-guide/downward-api/dapi-pod.yaml) + + + +### Downward API volume + +Using a similar syntax it's possible to expose pod information to containers using plain text files. +Downward API are dumped to a mounted volume. This is achieved using a `downwardAPI` +volume type and the different items represent the files to be created. `fieldPath` references the field to be exposed. + +Downward API volume permits to store more complex data like [`metadata.labels`](/{{page.version}}/docs/user-guide/labels) and [`metadata.annotations`](/{{page.version}}/docs/user-guide/annotations). Currently key/value pair set fields are saved using `key="value"` format: + +```conf +key1="value1" +key2="value2" +``` + +In future, it will be possible to specify an output format option. + +Downward API volumes can expose: + +* The pod's name +* The pod's namespace +* The pod's labels +* The pod's annotations + +The downward API volume refreshes its data in step with the kubelet refresh loop. When labels will be modifiable on the fly without respawning the pod containers will be able to detect changes through mechanisms such as [inotify](https://en.wikipedia.org/wiki/Inotify). + +In future, it will be possible to specify a specific annotation or label. + +## Example + +This is an example of a pod that consumes its labels and annotations via the downward API volume, labels and annotations are dumped in `/etc/podlabels` and in `/etc/annotations`, respectively: + + + +```yaml +apiVersion: v1 +kind: Pod +metadata: + name: kubernetes-downwardapi-volume-example + labels: + zone: us-est-coast + cluster: test-cluster1 + rack: rack-22 + annotations: + build: two + builder: john-doe +spec: + containers: + - name: client-container + image: gcr.io/google_containers/busybox + command: ["sh", "-c", "while true; do if [[ -e /etc/labels ]]; then cat /etc/labels; fi; if [[ -e /etc/annotations ]]; then cat /etc/annotations; fi; sleep 5; done"] + volumeMounts: + - name: podinfo + mountPath: /etc + readOnly: false + volumes: + - name: podinfo + downwardAPI: + items: + - path: "labels" + fieldRef: + fieldPath: metadata.labels + - path: "annotations" + fieldRef: + fieldPath: metadata.annotations +``` + +[Download example](/{{page.version}}/docs/user-guide/downward-api/volume/dapi-volume.yaml) + + +Some more thorough examples: + + * [environment variables](/{{page.version}}/docs/user-guide/environment-guide/) + * [downward API](/{{page.version}}/docs/user-guide/downward-api/) \ No newline at end of file diff --git a/_includes/docs/docs/user-guide/downward-api/dapi-pod.yaml b/_includes/docs/docs/user-guide/downward-api/dapi-pod.yaml new file mode 100644 index 0000000000..7d688aa0e9 --- /dev/null +++ b/_includes/docs/docs/user-guide/downward-api/dapi-pod.yaml @@ -0,0 +1,23 @@ +apiVersion: v1 +kind: Pod +metadata: + name: dapi-test-pod +spec: + containers: + - name: test-container + image: gcr.io/google_containers/busybox + command: [ "/bin/sh", "-c", "env" ] + env: + - name: MY_POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: MY_POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: MY_POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + restartPolicy: Never diff --git a/_includes/docs/docs/user-guide/downward-api/index.md b/_includes/docs/docs/user-guide/downward-api/index.md new file mode 100644 index 0000000000..d35c692039 --- /dev/null +++ b/_includes/docs/docs/user-guide/downward-api/index.md @@ -0,0 +1,33 @@ + +Following this example, you will create a pod with a container that consumes the pod's name and +namespace using the [downward API](/{{page.version}}/docs/user-guide/downward-api/). + +## Step Zero: Prerequisites + +This example assumes you have a Kubernetes cluster installed and running, and that you have +installed the `kubectl` command line tool somewhere in your path. Please see the [getting +started](/{{page.version}}/docs/getting-started-guides/) for installation instructions for your platform. + +## Step One: Create the pod + +Containers consume the downward API using environment variables. The downward API allows +containers to be injected with the name and namespace of the pod the container is in. + +Use the [`dapi-pod.yaml`](/{{page.version}}/docs/user-guide/downward-api/dapi-pod.yaml) file to create a Pod with a container that consumes the +downward API. + +```shell +$ kubectl create -f docs/user-guide/downward-api/dapi-pod.yaml +``` + +### Examine the logs + +This pod runs the `env` command in a container that consumes the downward API. You can grep +through the pod logs to see that the pod was injected with the correct values: + +```shell +$ kubectl logs dapi-test-pod | grep POD_ +2015-04-30T20:22:18.568024817Z MY_POD_NAME=dapi-test-pod +2015-04-30T20:22:18.568087688Z MY_POD_NAMESPACE=default +2015-04-30T20:22:18.568092435Z MY_POD_IP=10.0.1.6 +``` \ No newline at end of file diff --git a/_includes/docs/docs/user-guide/downward-api/volume/dapi-volume.yaml b/_includes/docs/docs/user-guide/downward-api/volume/dapi-volume.yaml new file mode 100644 index 0000000000..be926498d1 --- /dev/null +++ b/_includes/docs/docs/user-guide/downward-api/volume/dapi-volume.yaml @@ -0,0 +1,30 @@ +apiVersion: v1 +kind: Pod +metadata: + name: kubernetes-downwardapi-volume-example + labels: + zone: us-est-coast + cluster: test-cluster1 + rack: rack-22 + annotations: + build: two + builder: john-doe +spec: + containers: + - name: client-container + image: gcr.io/google_containers/busybox + command: ["sh", "-c", "while true; do if [[ -e /etc/labels ]]; then cat /etc/labels; fi; if [[ -e /etc/annotations ]]; then cat /etc/annotations; fi; sleep 5; done"] + volumeMounts: + - name: podinfo + mountPath: /etc + readOnly: false + volumes: + - name: podinfo + downwardAPI: + items: + - path: "labels" + fieldRef: + fieldPath: metadata.labels + - path: "annotations" + fieldRef: + fieldPath: metadata.annotations diff --git a/_includes/docs/docs/user-guide/downward-api/volume/index.md b/_includes/docs/docs/user-guide/downward-api/volume/index.md new file mode 100644 index 0000000000..698f1188b9 --- /dev/null +++ b/_includes/docs/docs/user-guide/downward-api/volume/index.md @@ -0,0 +1,65 @@ + +Following this example, you will create a pod with a downward API volume. +A downward API volume is a k8s volume plugin with the ability to save some pod information in a plain text file. The pod information can be for example some [metadata](/{{page.version}}/docs/devel/api-conventions/#metadata). + +Supported metadata fields: + +1. `metadata.annotations` +2. `metadata.namespace` +3. `metadata.name` +4. `metadata.labels` + +### Step Zero: Prerequisites + +This example assumes you have a Kubernetes cluster installed and running, and the `kubectl` +command line tool somewhere in your path. Please see the [gettingstarted](/{{page.version}}/docs/getting-started-guides/) for installation instructions for your platform. + +### Step One: Create the pod + +Use the `docs/user-guide/downward-api/dapi-volume.yaml` file to create a Pod with a  downward API volume which stores pod labels and pod annotations to `/etc/labels` and  `/etc/annotations` respectively. + +```shell +$ kubectl create -f docs/user-guide/downward-api/volume/dapi-volume.yaml +``` + +### Step Two: Examine pod/container output + +The pod displays (every 5 seconds) the content of the dump files which can be executed via the usual `kubectl log` command + +```shell +$ kubectl logs kubernetes-downwardapi-volume-example +cluster="test-cluster1" +rack="rack-22" +zone="us-est-coast" +build="two" +builder="john-doe" +kubernetes.io/config.seen="2015-08-24T13:47:23.432459138Z" +kubernetes.io/config.source="api" +``` + +### Internals + +In pod's `/etc` directory one may find the file created by the plugin (system files elided): + +```shell +$ kubectl exec kubernetes-downwardapi-volume-example -i -t -- sh +/ # ls -laR /etc +/etc: +total 32 +drwxrwxrwt 3 0 0 180 Aug 24 13:03 . +drwxr-xr-x 1 0 0 4096 Aug 24 13:05 .. +drwx------ 2 0 0 80 Aug 24 13:03 ..2015_08_24_13_03_44259413923 +lrwxrwxrwx 1 0 0 30 Aug 24 13:03 ..downwardapi -> ..2015_08_24_13_03_44259413923 +lrwxrwxrwx 1 0 0 25 Aug 24 13:03 annotations -> ..downwardapi/annotations +lrwxrwxrwx 1 0 0 20 Aug 24 13:03 labels -> ..downwardapi/labels + +/etc/..2015_08_24_13_03_44259413923: +total 8 +drwx------ 2 0 0 80 Aug 24 13:03 . +drwxrwxrwt 3 0 0 180 Aug 24 13:03 .. +-rw-r--r-- 1 0 0 115 Aug 24 13:03 annotations +-rw-r--r-- 1 0 0 53 Aug 24 13:03 labels +/ # +``` + +The file `labels` is stored in a temporary directory (`..2015_08_24_13_03_44259413923` in the example above) which is symlinked to by `..downwardapi`. Symlinks for annotations and labels in `/etc` point to files containing the actual metadata through the `..downwardapi` indirection.  This structure allows for dynamic atomic refresh of the metadata: updates are written to a new temporary directory, and the `..downwardapi` symlink is updated atomically using `rename(2)`. \ No newline at end of file diff --git a/_includes/docs/docs/user-guide/environment-guide/backend-rc.yaml b/_includes/docs/docs/user-guide/environment-guide/backend-rc.yaml new file mode 100644 index 0000000000..6c57b95dac --- /dev/null +++ b/_includes/docs/docs/user-guide/environment-guide/backend-rc.yaml @@ -0,0 +1,30 @@ +--- +apiVersion: v1 +kind: ReplicationController +metadata: + name: backend-rc + labels: + type: backend-type +spec: + replicas: 3 + template: + metadata: + labels: + type: backend-type + spec: + containers: + - name: backend-container + image: gcr.io/google-samples/env-backend:1.1 + imagePullPolicy: Always + ports: + - containerPort: 5000 + protocol: TCP + env: + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace diff --git a/_includes/docs/docs/user-guide/environment-guide/backend-srv.yaml b/_includes/docs/docs/user-guide/environment-guide/backend-srv.yaml new file mode 100644 index 0000000000..7083b37bf8 --- /dev/null +++ b/_includes/docs/docs/user-guide/environment-guide/backend-srv.yaml @@ -0,0 +1,13 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: backend-srv + labels: + type: backend-type +spec: + ports: + - port: 5000 + protocol: TCP + selector: + type: backend-type diff --git a/_includes/docs/docs/user-guide/environment-guide/containers/backend/Dockerfile b/_includes/docs/docs/user-guide/environment-guide/containers/backend/Dockerfile new file mode 100644 index 0000000000..3fa58ff7ab --- /dev/null +++ b/_includes/docs/docs/user-guide/environment-guide/containers/backend/Dockerfile @@ -0,0 +1,2 @@ +FROM golang:onbuild +EXPOSE 8080 diff --git a/_includes/docs/docs/user-guide/environment-guide/containers/backend/backend.go b/_includes/docs/docs/user-guide/environment-guide/containers/backend/backend.go new file mode 100644 index 0000000000..b4edf75ff5 --- /dev/null +++ b/_includes/docs/docs/user-guide/environment-guide/containers/backend/backend.go @@ -0,0 +1,37 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package main + +import ( + "fmt" + "log" + "net/http" + "os" +) + +func printInfo(resp http.ResponseWriter, req *http.Request) { + name := os.Getenv("POD_NAME") + namespace := os.Getenv("POD_NAMESPACE") + fmt.Fprintf(resp, "Backend Container\n") + fmt.Fprintf(resp, "Backend Pod Name: %v\n", name) + fmt.Fprintf(resp, "Backend Namespace: %v\n", namespace) +} + +func main() { + http.HandleFunc("/", printInfo) + log.Fatal(http.ListenAndServe(":5000", nil)) +} diff --git a/_includes/docs/docs/user-guide/environment-guide/containers/index.md b/_includes/docs/docs/user-guide/environment-guide/containers/index.md new file mode 100644 index 0000000000..d022ccddcf --- /dev/null +++ b/_includes/docs/docs/user-guide/environment-guide/containers/index.md @@ -0,0 +1,25 @@ + + +## Building + +For each container, the build steps are the same. The examples below +are for the `show` container. Replace `show` with `backend` for the +backend container. + +## Google Container Registry ([GCR](https://cloud.google.com/tools/container-registry/)) + + docker build -t gcr.io//show . + gcloud docker push gcr.io//show + +## Docker Hub + + docker build -t /show . + docker push /show + +## Change Pod Definitions + +Edit both `show-rc.yaml` and `backend-rc.yaml` and replace the +specified `image:` with the one that you built. + + + diff --git a/_includes/docs/docs/user-guide/environment-guide/containers/show/Dockerfile b/_includes/docs/docs/user-guide/environment-guide/containers/show/Dockerfile new file mode 100644 index 0000000000..3fa58ff7ab --- /dev/null +++ b/_includes/docs/docs/user-guide/environment-guide/containers/show/Dockerfile @@ -0,0 +1,2 @@ +FROM golang:onbuild +EXPOSE 8080 diff --git a/_includes/docs/docs/user-guide/environment-guide/containers/show/show.go b/_includes/docs/docs/user-guide/environment-guide/containers/show/show.go new file mode 100644 index 0000000000..9a2cfc639d --- /dev/null +++ b/_includes/docs/docs/user-guide/environment-guide/containers/show/show.go @@ -0,0 +1,95 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package main + +import ( + "fmt" + "io" + "log" + "net/http" + "os" + "sort" + "strings" +) + +func getKubeEnv() (map[string]string, error) { + environS := os.Environ() + environ := make(map[string]string) + for _, val := range environS { + split := strings.Split(val, "=") + if len(split) != 2 { + return environ, fmt.Errorf("Some weird env vars") + } + environ[split[0]] = split[1] + } + for key := range environ { + if !(strings.HasSuffix(key, "_SERVICE_HOST") || + strings.HasSuffix(key, "_SERVICE_PORT")) { + delete(environ, key) + } + } + return environ, nil +} + +func printInfo(resp http.ResponseWriter, req *http.Request) { + kubeVars, err := getKubeEnv() + if err != nil { + http.Error(resp, err.Error(), http.StatusInternalServerError) + return + } + + backendHost := os.Getenv("BACKEND_SRV_SERVICE_HOST") + backendPort := os.Getenv("BACKEND_SRV_SERVICE_PORT") + backendRsp, backendErr := http.Get(fmt.Sprintf( + "http://%v:%v/", + backendHost, + backendPort)) + if backendErr == nil { + defer backendRsp.Body.Close() + } + + name := os.Getenv("POD_NAME") + namespace := os.Getenv("POD_NAMESPACE") + fmt.Fprintf(resp, "Pod Name: %v \n", name) + fmt.Fprintf(resp, "Pod Namespace: %v \n", namespace) + + envvar := os.Getenv("USER_VAR") + fmt.Fprintf(resp, "USER_VAR: %v \n", envvar) + + fmt.Fprintf(resp, "\nKubernetes environment variables\n") + var keys []string + for key := range kubeVars { + keys = append(keys, key) + } + sort.Strings(keys) + for _, key := range keys { + fmt.Fprintf(resp, "%v = %v \n", key, kubeVars[key]) + } + + fmt.Fprintf(resp, "\nFound backend ip: %v port: %v\n", backendHost, backendPort) + if backendErr == nil { + fmt.Fprintf(resp, "Response from backend\n") + io.Copy(resp, backendRsp.Body) + } else { + fmt.Fprintf(resp, "Error from backend: %v", backendErr.Error()) + } +} + +func main() { + http.HandleFunc("/", printInfo) + log.Fatal(http.ListenAndServe(":8080", nil)) +} diff --git a/_includes/docs/docs/user-guide/environment-guide/index.md b/_includes/docs/docs/user-guide/environment-guide/index.md new file mode 100644 index 0000000000..795c527e76 --- /dev/null +++ b/_includes/docs/docs/user-guide/environment-guide/index.md @@ -0,0 +1,91 @@ + +This example demonstrates running pods, replication controllers, and +services. It shows two types of pods: frontend and backend, with +services on top of both. Accessing the frontend pod will return +environment information about itself, and a backend pod that it has +accessed through the service. The goal is to illuminate the +environment metadata available to running containers inside the +Kubernetes cluster. The documentation for the Kubernetes environment +is [here](/{{page.version}}/docs/user-guide/container-environment). + +![Diagram](/images/docs/diagram.png) + +## Prerequisites + +This example assumes that you have a Kubernetes cluster installed and +running, and that you have installed the `kubectl` command line tool +somewhere in your path. Please see the [getting +started](/{{page.version}}/docs/getting-started-guides/) for installation instructions +for your platform. + +## Optional: Build your own containers + +The code for the containers is under +[containers/](/{{page.version}}/docs/user-guide/environment-guide/containers/) + +## Get everything running + +```shell +kubectl create -f ./backend-rc.yaml +kubectl create -f ./backend-srv.yaml +kubectl create -f ./show-rc.yaml +kubectl create -f ./show-srv.yaml +``` + +## Query the service + +Use `kubectl describe service show-srv` to determine the public IP of +your service. + +> Note: If your platform does not support external load balancers, +> you'll need to open the proper port and direct traffic to the +> internal IP shown for the frontend service with the above command + +Run `curl :80` to query the service. You should get +something like this back: + +```shell +Pod Name: show-rc-xxu6i +Pod Namespace: default +USER_VAR: important information + +Kubernetes environment variables +BACKEND_SRV_SERVICE_HOST = 10.147.252.185 +BACKEND_SRV_SERVICE_PORT = 5000 +KUBERNETES_RO_SERVICE_HOST = 10.147.240.1 +KUBERNETES_RO_SERVICE_PORT = 80 +KUBERNETES_SERVICE_HOST = 10.147.240.2 +KUBERNETES_SERVICE_PORT = 443 +KUBE_DNS_SERVICE_HOST = 10.147.240.10 +KUBE_DNS_SERVICE_PORT = 53 + +Found backend ip: 10.147.252.185 port: 5000 +Response from backend +Backend Container +Backend Pod Name: backend-rc-6qiya +Backend Namespace: default +``` + +First the frontend pod's information is printed. The pod name and +[namespace](https://github.com/kubernetes/kubernetes/blob/{{page.githubbranch}}/docs/design/namespaces.md) are retrieved from the +[Downward API](/{{page.version}}/docs/user-guide/downward-api). Next, `USER_VAR` is the name of +an environment variable set in the [pod +definition](/{{page.version}}/docs/user-guide/environment-guide/show-rc.yaml). Then, the dynamic Kubernetes environment +variables are scanned and printed. These are used to find the backend +service, named `backend-srv`. Finally, the frontend pod queries the +backend service and prints the information returned. Again the backend +pod returns its own pod name and namespace. + +Try running the `curl` command a few times, and notice what +changes. Ex: `watch -n 1 curl -s ` Firstly, the frontend service +is directing your request to different frontend pods each time. The +frontend pods are always contacting the backend through the backend +service. This results in a different backend pod servicing each +request as well. + +## Cleanup + +```shell +kubectl delete rc,service -l type=show-type +kubectl delete rc,service -l type=backend-type +``` \ No newline at end of file diff --git a/_includes/docs/docs/user-guide/environment-guide/show-rc.yaml b/_includes/docs/docs/user-guide/environment-guide/show-rc.yaml new file mode 100644 index 0000000000..4de94c06ca --- /dev/null +++ b/_includes/docs/docs/user-guide/environment-guide/show-rc.yaml @@ -0,0 +1,32 @@ +--- +apiVersion: v1 +kind: ReplicationController +metadata: + name: show-rc + labels: + type: show-type +spec: + replicas: 3 + template: + metadata: + labels: + type: show-type + spec: + containers: + - name: show-container + image: gcr.io/google-samples/env-show:1.1 + imagePullPolicy: Always + ports: + - containerPort: 8080 + protocol: TCP + env: + - name: USER_VAR + value: important information + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace diff --git a/_includes/docs/docs/user-guide/environment-guide/show-srv.yaml b/_includes/docs/docs/user-guide/environment-guide/show-srv.yaml new file mode 100644 index 0000000000..25a2d7473e --- /dev/null +++ b/_includes/docs/docs/user-guide/environment-guide/show-srv.yaml @@ -0,0 +1,15 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: show-srv + labels: + type: show-type +spec: + type: LoadBalancer + ports: + - port: 80 + protocol: TCP + targetPort: 8080 + selector: + type: show-type diff --git a/_includes/docs/docs/user-guide/getting-into-containers.md b/_includes/docs/docs/user-guide/getting-into-containers.md new file mode 100644 index 0000000000..417ff5a069 --- /dev/null +++ b/_includes/docs/docs/user-guide/getting-into-containers.md @@ -0,0 +1,68 @@ + +Developers can use `kubectl exec` to run commands in a container. This guide demonstrates two use cases. + +## Using kubectl exec to check the environment variables of a container + +Kubernetes exposes [services](/{{page.version}}/docs/user-guide/services/#environment-variables) through environment variables. It is convenient to check these environment variables using `kubectl exec`. + +We first create a pod and a service, + +```shell +$ kubectl create -f examples/guestbook/redis-master-controller.yaml +$ kubectl create -f examples/guestbook/redis-master-service.yaml +``` +wait until the pod is Running and Ready, + +```shell +$ kubectl get pod +NAME READY REASON RESTARTS AGE +redis-master-ft9ex 1/1 Running 0 12s +``` + +then we can check the environment variables of the pod, + +```shell +$ kubectl exec redis-master-ft9ex env +... +REDIS_MASTER_SERVICE_PORT=6379 +REDIS_MASTER_SERVICE_HOST=10.0.0.219 +... +``` + +We can use these environment variables in applications to find the service. + + +## Using kubectl exec to check the mounted volumes + +It is convenient to use `kubectl exec` to check if the volumes are mounted as expected. +We first create a Pod with a volume mounted at /data/redis, + +```shell +kubectl create -f docs/user-guide/walkthrough/pod-redis.yaml +``` + +wait until the pod is Running and Ready, + +```shell +$ kubectl get pods +NAME READY REASON RESTARTS AGE +storage 1/1 Running 0 1m +``` + +we then use `kubectl exec` to verify that the volume is mounted at /data/redis, + +```shell +$ kubectl exec storage ls /data +redis +``` + +## Using kubectl exec to open a bash terminal in a pod + +After all, open a terminal in a pod is the most direct way to introspect the pod. Assuming the pod/storage is still running, run + +```shell +$ kubectl exec -ti storage -- bash +root@storage:/data# +``` + +This gets you a terminal. \ No newline at end of file diff --git a/_includes/docs/docs/user-guide/horizontal-pod-autoscaler.md b/_includes/docs/docs/user-guide/horizontal-pod-autoscaler.md new file mode 100644 index 0000000000..ac2ffc2895 --- /dev/null +++ b/_includes/docs/docs/user-guide/horizontal-pod-autoscaler.md @@ -0,0 +1,76 @@ + +This document describes the current state of Horizontal Pod Autoscaler in Kubernetes. + +## What is Horizontal Pod Autoscaler? + +Horizontal pod autoscaling allows the number of pods in a replication controller or deployment +to scale automatically based on observed CPU utilization. +It is a [beta](/{{page.version}}/docs/api/)#api-versioning) feature in Kubernetes 1.1. + +The autoscaler is implemented as a Kubernetes API resource and a controller. +The resource describes behavior of the controller. +The controller periodically adjusts the number of replicas in a replication controller or deployment +to match the observed average CPU utilization to the target specified by user. + + +## How does Horizontal Pod Autoscaler work? + +![Horizontal Pod Autoscaler diagram](/images/docs/horizontal-pod-autoscaler.svg) + +The autoscaler is implemented as a control loop. +It periodically queries CPU utilization for the pods it targets. +(The period of the autoscaler is controlled by `--horizontal-pod-autoscaler-sync-period` flag of controller manager. +The default value is 30 seconds). +Then, it compares the arithmetic mean of the pods' CPU utilization with the target and adjust the number of replicas if needed. + +CPU utilization is the recent CPU usage of a pod divided by the sum of CPU requested by the pod's containers. +Please note that if some of the pod's containers do not have CPU request set, +CPU utilization for the pod will not be defined and the autoscaler will not take any action. +Further details of the autoscaling algorithm are given [here](https://github.com/kubernetes/kubernetes/blob/{{page.githubbranch}}/docs/design/horizontal-pod-autoscaler.md#autoscaling-algorithm). + +Autoscaler uses heapster to collect CPU utilization. +Therefore, it is required to deploy heapster monitoring in your cluster for autoscaling to work. + +Autoscaler accesses corresponding replication controller or deployment by scale sub-resource. +Scale is an interface which allows to dynamically set the number of replicas and to learn the current state of them. +More details on scale sub-resource can be found [here](https://github.com/kubernetes/kubernetes/blob/{{page.githubbranch}}/docs/design/horizontal-pod-autoscaler.md#scale-subresource). + + +## API Object + +Horizontal pod autoscaler is a top-level resource in the Kubernetes REST API (currently in [beta](/{{page.version}}/docs/api/)#api-versioning)). +More details about the API object can be found at +[HorizontalPodAutoscaler Object](https://github.com/kubernetes/kubernetes/blob/{{page.githubbranch}}/docs/design/horizontal-pod-autoscaler.md#horizontalpodautoscaler-object). + +## Support for horizontal pod autoscaler in kubectl + +Horizontal pod autoscaler, like every API resource, is supported in a standard way by `kubectl`. +We can create a new autoscaler using `kubectl create` command. +We can list autoscalers by `kubectl get hpa` and get detailed description by `kubectl describe hpa`. +Finally, we can delete an autoscaler using `kubectl delete hpa`. + +In addition, there is a special `kubectl autoscale` command that allows for easy creation of horizontal pod autoscaler. +For instance, executing `kubectl autoscale rc foo --min=2 --max=5 --cpu-percent=80` +will create an autoscaler for replication controller *foo*, with target CPU utilization set to `80%` +and the number of replicas between 2 and 5. +The detailed documentation of `kubectl autoscale` can be found [here](/{{page.version}}/docs/user-guide/kubectl/kubectl_autoscale). + + +## Autoscaling during rolling update + +Currently in Kubernetes, it is possible to perform a rolling update by managing replication controllers directly, +or by using the deployment object, which manages the underlying replication controllers for you. +Horizontal pod autoscaler only supports the latter approach: the horizontal pod autoscaler is bound to the deployment object, +it sets the size for the deployment object, and the deployment is responsible for setting sizes of underlying replication controllers. + +Horizontal pod autoscaler does not work with rolling update using direct manipulation of replication controllers, +i.e. you cannot bind a horizontal pod autoscaler to a replication controller and do rolling update (e.g. using `kubectl rolling-update`). +The reason this doesn't work is that when rolling update creates a new replication controller, +the horizontal pod autoscaler will not be bound to the new replication controller. + + +## Further reading + +* Design documentation: [Horizontal Pod Autoscaling](https://github.com/kubernetes/kubernetes/blob/{{page.githubbranch}}/docs/design/horizontal-pod-autoscaler.md). +* Manual of autoscale command in kubectl: [kubectl autoscale](/{{page.version}}/docs/user-guide/kubectl/kubectl_autoscale). +* Usage example of [Horizontal Pod Autoscaler](/{{page.version}}/docs/user-guide/horizontal-pod-autoscaling/). \ No newline at end of file diff --git a/_includes/docs/docs/user-guide/horizontal-pod-autoscaling/hpa-php-apache.yaml b/_includes/docs/docs/user-guide/horizontal-pod-autoscaling/hpa-php-apache.yaml new file mode 100644 index 0000000000..1a2067714d --- /dev/null +++ b/_includes/docs/docs/user-guide/horizontal-pod-autoscaling/hpa-php-apache.yaml @@ -0,0 +1,14 @@ +apiVersion: extensions/v1beta1 +kind: HorizontalPodAutoscaler +metadata: + name: php-apache + namespace: default +spec: + scaleRef: + kind: ReplicationController + name: php-apache + namespace: default + minReplicas: 1 + maxReplicas: 10 + cpuUtilization: + targetPercentage: 50 diff --git a/_includes/docs/docs/user-guide/horizontal-pod-autoscaling/image/Dockerfile b/_includes/docs/docs/user-guide/horizontal-pod-autoscaling/image/Dockerfile new file mode 100644 index 0000000000..56f2d6252e --- /dev/null +++ b/_includes/docs/docs/user-guide/horizontal-pod-autoscaling/image/Dockerfile @@ -0,0 +1,5 @@ +FROM php:5-apache + +ADD index.php /var/www/html/index.php + +RUN chmod a+rx index.php diff --git a/_includes/docs/docs/user-guide/horizontal-pod-autoscaling/image/index.php b/_includes/docs/docs/user-guide/horizontal-pod-autoscaling/image/index.php new file mode 100755 index 0000000000..8f0b1c0cf3 --- /dev/null +++ b/_includes/docs/docs/user-guide/horizontal-pod-autoscaling/image/index.php @@ -0,0 +1,7 @@ + diff --git a/_includes/docs/docs/user-guide/horizontal-pod-autoscaling/index.md b/_includes/docs/docs/user-guide/horizontal-pod-autoscaling/index.md new file mode 100644 index 0000000000..0c34a8cd9c --- /dev/null +++ b/_includes/docs/docs/user-guide/horizontal-pod-autoscaling/index.md @@ -0,0 +1,182 @@ + +Horizontal pod autoscaling is a [beta](/{{page.version}}/docs/api/#api-versioning) feature in Kubernetes 1.1. +It allows the number of pods in a replication controller or deployment to scale automatically based on observed CPU usage. +In the future also other metrics will be supported. + +In this document we explain how this feature works by walking you through an example of enabling horizontal pod autoscaling with the php-apache server. + +## Prerequisites + +This example requires a running Kubernetes cluster and kubectl in the version at least 1.1. +[Heapster](https://github.com/kubernetes/heapster) monitoring needs to be deployed in the cluster +as horizontal pod autoscaler uses it to collect metrics +(if you followed [getting started on GCE guide](/{{page.version}}/docs/getting-started-guides/gce), +heapster monitoring will be turned-on by default). + + +## Step One: Run & expose php-apache server + +To demonstrate horizontal pod autoscaler we will use a custom docker image based on php-apache server. +The image can be found [here](https://releases.k8s.io/{{page.githubbranch}}/docs/user-guide/horizontal-pod-autoscaling/image). +It defines [index.php](/{{page.version}}/docs/user-guide/horizontal-pod-autoscaling/image/index.php) page which performs some CPU intensive computations. + +First, we will start a replication controller running the image and expose it as an external service: + + + +```shell +$ kubectl run php-apache --image=gcr.io/google_containers/hpa-example --requests=cpu=200m +replicationcontroller "php-apache" created + +$ kubectl expose rc php-apache --port=80 --type=LoadBalancer +service "php-apache" exposed +``` + +Now, we will wait some time and verify that both the replication controller and the service were correctly created and are running. We will also determine the IP address of the service: + +```shell +$ kubectl get pods +NAME READY STATUS RESTARTS AGE +php-apache-wa3t1 1/1 Running 0 12m + +$ kubectl describe services php-apache | grep "LoadBalancer Ingress" +LoadBalancer Ingress: 146.148.24.244 +``` + +We may now check that php-apache server works correctly by calling `curl` with the service's IP: + +```shell +$ curl http://146.148.24.244 +OK! +``` + +Please notice that when exposing the service we assumed that our cluster runs on a provider which supports load balancers (e.g.: on GCE). +If load balancers are not supported (e.g.: on Vagrant), we can expose php-apache service as ``ClusterIP`` and connect to it using the proxy on the master: + +```shell +$ kubectl expose rc php-apache --port=80 --type=ClusterIP +service "php-apache" exposed + +$ kubectl cluster-info | grep master +Kubernetes master is running at https://146.148.6.215 + +$ curl -k -u : https://146.148.6.215/api/v1/proxy/namespaces/default/services/php-apache/ +OK! +``` + +## Step Two: Create horizontal pod autoscaler + +Now that the server is running, we will create a horizontal pod autoscaler for it. +To create it, we will use the [hpa-php-apache.yaml](/{{page.version}}/docs/user-guide/horizontal-pod-autoscaling/hpa-php-apache.yaml) file, which looks like this: + +```yaml +apiVersion: extensions/v1beta1 +kind: HorizontalPodAutoscaler +metadata: + name: php-apache + namespace: default +spec: + scaleRef: + kind: ReplicationController + name: php-apache + namespace: default + minReplicas: 1 + maxReplicas: 10 + cpuUtilization: + targetPercentage: 50 +``` + +This defines a horizontal pod autoscaler that maintains between 1 and 10 replicas of the Pods +controlled by the php-apache replication controller we created in the first step of these instructions. +Roughly speaking, the horizontal autoscaler will increase and decrease the number of replicas +(via the replication controller) so as to maintain an average CPU utilization across all Pods of 50% +(since each pod requests 200 milli-cores by [kubectl run](#kubectl-run), this means average CPU utilization of 100 milli-cores). +See [here](https://github.com/kubernetes/kubernetes/blob/{{page.githubbranch}}/docs/design/horizontal-pod-autoscaler.md#autoscaling-algorithm) for more details on the algorithm. + +We will create the autoscaler by executing the following command: + +```shell +$ kubectl create -f docs/user-guide/horizontal-pod-autoscaling/hpa-php-apache.yaml +horizontalpodautoscaler "php-apache" created +``` + +Alternatively, we can create the autoscaler using [kubectl autoscale](https://github.com/kubernetes/kubernetes/blob/{{page.githubbranch}}/docs/user-guide/kubectl/kubectl_autoscale.md). +The following command will create the equivalent autoscaler as defined in the [hpa-php-apache.yaml](/{{page.version}}/docs/user-guide/horizontal-pod-autoscaling/hpa-php-apache.yaml) file: + +```shell +$ kubectl autoscale rc php-apache --cpu-percent=50 --min=1 --max=10 +replicationcontroller "php-apache" autoscaled +``` + +We may check the current status of autoscaler by running: + +```shell +$ kubectl get hpa +NAME REFERENCE TARGET CURRENT MINPODS MAXPODS AGE +php-apache ReplicationController/default/php-apache/ 50% 0% 1 10 27s +``` + +Please note that the current CPU consumption is 0% as we are not sending any requests to the server +(the ``CURRENT`` column shows the average across all the pods controlled by the corresponding replication controller). + +## Step Three: Increase load + +Now, we will see how the autoscaler reacts on the increased load of the server. +We will start an infinite loop of queries to our server (please run it in a different terminal): + +```shell +$ while true; do curl http://146.148.6.244; done +``` + +We may examine, how CPU load was increased (the results should be visible after about 3-4 minutes) by executing: + +```shell +$ kubectl get hpa +NAME REFERENCE TARGET CURRENT MINPODS MAXPODS AGE +php-apache ReplicationController/default/php-apache/ 50% 305% 1 10 4m +``` + +In the case presented here, it bumped CPU consumption to 305% of the request. +As a result, the replication controller was resized to 7 replicas: + +```shell +$ kubectl get rc +CONTROLLER CONTAINER(S) IMAGE(S) SELECTOR REPLICAS AGE +php-apache php-apache gcr.io/google_containers/hpa-example run=php-apache 7 18m +``` + +Now, we may increase the load even more by running yet another infinite loop of queries (in yet another terminal): + +```shell +$ while true; do curl http://146.148.6.244; done +``` + +In the case presented here, it increased the number of serving pods to 10: + +```shell +$ kubectl get hpa +NAME REFERENCE TARGET CURRENT MINPODS MAXPODS AGE +php-apache ReplicationController/default/php-apache/ 50% 65% 1 10 14m + +$ kubectl get rc +CONTROLLER CONTAINER(S) IMAGE(S) SELECTOR REPLICAS AGE +php-apache php-apache gcr.io/google_containers/hpa-example run=php-apache 10 24m +``` + +## Step Four: Stop load + +We will finish our example by stopping the user load. + +We will terminate both infinite ``while`` loops sending requests to the server and verify the result state: + +```shell +$ kubectl get hpa +NAME REFERENCE TARGET CURRENT MINPODS MAXPODS AGE +php-apache ReplicationController/default/php-apache/ 50% 0% 1 10 21m + +$ kubectl get rc +CONTROLLER CONTAINER(S) IMAGE(S) SELECTOR REPLICAS AGE +php-apache php-apache gcr.io/google_containers/hpa-example run=php-apache 1 31m +``` + +As we see, in the presented case CPU utilization dropped to 0, and the number of replicas dropped to 1. \ No newline at end of file diff --git a/_includes/docs/docs/user-guide/identifiers.md b/_includes/docs/docs/user-guide/identifiers.md new file mode 100644 index 0000000000..8f30ee659c --- /dev/null +++ b/_includes/docs/docs/user-guide/identifiers.md @@ -0,0 +1,12 @@ + +All objects in the Kubernetes REST API are unambiguously identified by a Name and a UID. + +For non-unique user-provided attributes, Kubernetes provides [labels](/{{page.version}}/docs/user-guide/labels) and [annotations](/{{page.version}}/docs/user-guide/annotations). + +## Names + +Names are generally client-provided. Only one object of a given kind can have a given name at a time (i.e., they are spatially unique). But if you delete an object, you can make a new object with the same name. Names are the used to refer to an object in a resource URL, such as `/api/v1/pods/some-name`. By convention, the names of Kubernetes resources should be up to maximum length of 253 characters and consist of lower case alphanumeric characters, `-`, and `.`, but certain resources have more specific restrictions. See the [identifiers design doc](https://github.com/kubernetes/kubernetes/blob/{{page.githubbranch}}/docs/design/identifiers.md) for the precise syntax rules for names. + +## UIDs + +UID are generated by Kubernetes. Every object created over the whole lifetime of a Kubernetes cluster has a distinct UID (i.e., they are spatially and temporally unique). \ No newline at end of file diff --git a/_includes/docs/docs/user-guide/images.md b/_includes/docs/docs/user-guide/images.md new file mode 100644 index 0000000000..ac6fb48724 --- /dev/null +++ b/_includes/docs/docs/user-guide/images.md @@ -0,0 +1,235 @@ + +Each container in a pod has its own image. Currently, the only type of image supported is a [Docker Image](https://docs.docker.com/userguide/dockerimages/). + +You create your Docker image and push it to a registry before referring to it in a Kubernetes pod. + +The `image` property of a container supports the same syntax as the `docker` command does, including private registries and tags. + +* TOC +{:toc} + + +## Updating Images + +The default pull policy is `IfNotPresent` which causes the Kubelet to not +pull an image if it already exists. If you would like to always force a pull +you must set a pull image policy of `Always` or specify a `:latest` tag on +your image. + +## Using a Private Registry + +Private registries may require keys to read images from them. +Credentials can be provided in several ways: + + - Using Google Container Registry + - Per-cluster + - automatically configured on Google Compute Engine or Google Container Engine + - all pods can read the project's private registry + - Configuring Nodes to Authenticate to a Private Registry + - all pods can read any configured private registries + - requires node configuration by cluster administrator + - Pre-pulling Images + - all pods can use any images cached on a node + - requires root access to all nodes to setup + - Specifying ImagePullSecrets on a Pod + - only pods which provide own keys can access the private registry +Each option is described in more detail below. + + +### Using Google Container Registry + +Kubernetes has native support for the [Google Container +Registry (GCR)](https://cloud.google.com/tools/container-registry/), when running on Google Compute +Engine (GCE). If you are running your cluster on GCE or Google Container Engine (GKE), simply +use the full image name (e.g. gcr.io/my_project/image:tag). + +All pods in a cluster will have read access to images in this registry. + +The kubelet will authenticate to GCR using the instance's +Google service account. The service account on the instance +will have a `https://www.googleapis.com/auth/devstorage.read_only`, +so it can pull from the project's GCR, but not push. + +### Configuring Nodes to Authenticate to a Private Repository + +**Note:** if you are running on Google Container Engine (GKE), there will already be a `.dockercfg` on each node +with credentials for Google Container Registry. You cannot use this approach. + +**Note:** this approach is suitable if you can control node configuration. It +will not work reliably on GCE, and any other cloud provider that does automatic +node replacement. + +Docker stores keys for private registries in the `$HOME/.dockercfg` file. If you put this +in the `$HOME` of `root` on a kubelet, then docker will use it. + +Here are the recommended steps to configuring your nodes to use a private registry. In this +example, run these on your desktop/laptop: + + 1. run `docker login [server]` for each set of credentials you want to use. + 1. view `$HOME/.dockercfg` in an editor to ensure it contains just the credentials you want to use. + 1. get a list of your nodes + - for example: `nodes=$(kubectl get nodes -o template --template='{{range.items}}{{.metadata.name}} {{end}}')` + 1. copy your local `.dockercfg` to the home directory of root on each node. + - for example: `for n in $nodes; do scp ~/.dockercfg root@$n:/root/.dockercfg; done` + +Verify by creating a pod that uses a private image, e.g.: + +```yaml +$ cat < /tmp/private-image-test-1.yaml +apiVersion: v1 +kind: Pod +metadata: + name: private-image-test-1 +spec: + containers: + - name: uses-private-image + image: $PRIVATE_IMAGE_NAME + imagePullPolicy: Always + command: [ "echo", "SUCCESS" ] +EOF +$ kubectl create -f /tmp/private-image-test-1.yaml +pods/private-image-test-1 +$ +``` + +If everything is working, then, after a few moments, you should see: + +```shell +$ kubectl logs private-image-test-1 +SUCCESS +``` + +If it failed, then you will see: + +```shell +$ kubectl describe pods/private-image-test-1 | grep "Failed" + Fri, 26 Jun 2015 15:36:13 -0700 Fri, 26 Jun 2015 15:39:13 -0700 19 {kubelet node-i2hq} spec.containers{uses-private-image} failed Failed to pull image "user/privaterepo:v1": Error: image user/privaterepo:v1 not found +``` + +You must ensure all nodes in the cluster have the same `.dockercfg`. Otherwise, pods will run on +some nodes and fail to run on others. For example, if you use node autoscaling, then each instance +template needs to include the `.dockercfg` or mount a drive that contains it. + +All pods will have read access to images in any private registry once private +registry keys are added to the `.dockercfg`. + +**This was tested with a private docker repository as of 26 June with Kubernetes version v0.19.3. +It should also work for a private registry such as quay.io, but that has not been tested.** + +### Pre-pulling Images + +**Note:** if you are running on Google Container Engine (GKE), there will already be a `.dockercfg` on each node +with credentials for Google Container Registry. You cannot use this approach. + +**Note:** this approach is suitable if you can control node configuration. It +will not work reliably on GCE, and any other cloud provider that does automatic +node replacement. + +Be default, the kubelet will try to pull each image from the specified registry. +However, if the `imagePullPolicy` property of the container is set to `IfNotPresent` or `Never`, +then a local image is used (preferentially or exclusively, respectively). + +If you want to rely on pre-pulled images as a substitute for registry authentication, +you must ensure all nodes in the cluster have the same pre-pulled images. + +This can be used to preload certain images for speed or as an alternative to authenticating to a private registry. + +All pods will have read access to any pre-pulled images. + +### Specifying ImagePullSecrets on a Pod + +**Note:** This approach is currently the recommended approach for GKE, GCE, and any cloud-providers +where node creation is automated. + +Kubernetes supports specifying registry keys on a pod. + +First, create a `.dockercfg`, such as running `docker login `. +Then put the resulting `.dockercfg` file into a [secret resource](/{{page.version}}/docs/user-guide/secrets). For example: + +```shell +$ docker login +Username: janedoe +Password: '�?'�?'�?'�?'�?'�?'�?'�?'�?'�?'�? +Email: jdoe@example.com +WARNING: login credentials saved in /Users/jdoe/.dockercfg. +Login Succeeded + +$ echo $(cat ~/.dockercfg) +{ "https://index.docker.io/v1/": { "auth": "ZmFrZXBhc3N3b3JkMTIK", "email": "jdoe@example.com" } } + +$ cat ~/.dockercfg | base64 +eyAiaHR0cHM6Ly9pbmRleC5kb2NrZXIuaW8vdjEvIjogeyAiYXV0aCI6ICJabUZyWlhCaGMzTjNiM0prTVRJSyIsICJlbWFpbCI6ICJqZG9lQGV4YW1wbGUuY29tIiB9IH0K + +$ cat > /tmp/image-pull-secret.yaml <