Merge remote-tracking branch 'origin/master'
commit
b42679610c
|
@ -0,0 +1,20 @@
|
|||
source "https://rubygems.org"
|
||||
|
||||
gem "jekyll", "3.2.1"
|
||||
gem "jekyll-sass-converter", "1.3.0"
|
||||
gem "minima", "1.1.0"
|
||||
gem "kramdown", "1.11.1"
|
||||
gem "liquid", "3.0.6"
|
||||
gem "rouge", "1.11.1"
|
||||
gem "jemoji", "0.7.0"
|
||||
gem "jekyll-mentions", "1.2.0"
|
||||
gem "jekyll-redirect-from", "0.11.0"
|
||||
gem "jekyll-sitemap", "0.10.0"
|
||||
gem "jekyll-feed", "0.5.1"
|
||||
gem "jekyll-gist", "1.4.0"
|
||||
gem "jekyll-paginate", "1.1.0"
|
||||
gem "jekyll-coffeescript", "1.0.1"
|
||||
gem "jekyll-seo-tag", "2.0.0"
|
||||
gem "jekyll-github-metadata", "2.0.2"
|
||||
gem "listen", "3.0.6"
|
||||
gem "activesupport", "4.2.7"
|
|
@ -0,0 +1,119 @@
|
|||
GEM
|
||||
remote: https://rubygems.org/
|
||||
specs:
|
||||
activesupport (4.2.7)
|
||||
i18n (~> 0.7)
|
||||
json (~> 1.7, >= 1.7.7)
|
||||
minitest (~> 5.1)
|
||||
thread_safe (~> 0.3, >= 0.3.4)
|
||||
tzinfo (~> 1.1)
|
||||
addressable (2.4.0)
|
||||
coffee-script (2.4.1)
|
||||
coffee-script-source
|
||||
execjs
|
||||
coffee-script-source (1.10.0)
|
||||
colorator (1.1.0)
|
||||
execjs (2.7.0)
|
||||
faraday (0.9.2)
|
||||
multipart-post (>= 1.2, < 3)
|
||||
ffi (1.9.14)
|
||||
forwardable-extended (2.6.0)
|
||||
gemoji (2.1.0)
|
||||
html-pipeline (2.4.2)
|
||||
activesupport (>= 2)
|
||||
nokogiri (>= 1.4)
|
||||
i18n (0.7.0)
|
||||
jekyll (3.2.1)
|
||||
colorator (~> 1.0)
|
||||
jekyll-sass-converter (~> 1.0)
|
||||
jekyll-watch (~> 1.1)
|
||||
kramdown (~> 1.3)
|
||||
liquid (~> 3.0)
|
||||
mercenary (~> 0.3.3)
|
||||
pathutil (~> 0.9)
|
||||
rouge (~> 1.7)
|
||||
safe_yaml (~> 1.0)
|
||||
jekyll-coffeescript (1.0.1)
|
||||
coffee-script (~> 2.2)
|
||||
jekyll-feed (0.5.1)
|
||||
jekyll-gist (1.4.0)
|
||||
octokit (~> 4.2)
|
||||
jekyll-github-metadata (2.0.2)
|
||||
jekyll (~> 3.1)
|
||||
octokit (~> 4.0)
|
||||
jekyll-mentions (1.2.0)
|
||||
activesupport (~> 4.0)
|
||||
html-pipeline (~> 2.3)
|
||||
jekyll (~> 3.0)
|
||||
jekyll-paginate (1.1.0)
|
||||
jekyll-redirect-from (0.11.0)
|
||||
jekyll (>= 2.0)
|
||||
jekyll-sass-converter (1.3.0)
|
||||
sass (~> 3.2)
|
||||
jekyll-seo-tag (2.0.0)
|
||||
jekyll (~> 3.1)
|
||||
jekyll-sitemap (0.10.0)
|
||||
jekyll-watch (1.5.0)
|
||||
listen (~> 3.0, < 3.1)
|
||||
jemoji (0.7.0)
|
||||
activesupport (~> 4.0)
|
||||
gemoji (~> 2.0)
|
||||
html-pipeline (~> 2.2)
|
||||
jekyll (>= 3.0)
|
||||
json (1.8.3)
|
||||
kramdown (1.11.1)
|
||||
liquid (3.0.6)
|
||||
listen (3.0.6)
|
||||
rb-fsevent (>= 0.9.3)
|
||||
rb-inotify (>= 0.9.7)
|
||||
mercenary (0.3.6)
|
||||
mini_portile2 (2.1.0)
|
||||
minima (1.1.0)
|
||||
minitest (5.9.0)
|
||||
multipart-post (2.0.0)
|
||||
nokogiri (1.6.8)
|
||||
mini_portile2 (~> 2.1.0)
|
||||
pkg-config (~> 1.1.7)
|
||||
octokit (4.3.0)
|
||||
sawyer (~> 0.7.0, >= 0.5.3)
|
||||
pathutil (0.14.0)
|
||||
forwardable-extended (~> 2.6)
|
||||
pkg-config (1.1.7)
|
||||
rb-fsevent (0.9.7)
|
||||
rb-inotify (0.9.7)
|
||||
ffi (>= 0.5.0)
|
||||
rouge (1.11.1)
|
||||
safe_yaml (1.0.4)
|
||||
sass (3.4.22)
|
||||
sawyer (0.7.0)
|
||||
addressable (>= 2.3.5, < 2.5)
|
||||
faraday (~> 0.8, < 0.10)
|
||||
thread_safe (0.3.5)
|
||||
tzinfo (1.2.2)
|
||||
thread_safe (~> 0.1)
|
||||
|
||||
PLATFORMS
|
||||
ruby
|
||||
|
||||
DEPENDENCIES
|
||||
activesupport (= 4.2.7)
|
||||
jekyll (= 3.2.1)
|
||||
jekyll-coffeescript (= 1.0.1)
|
||||
jekyll-feed (= 0.5.1)
|
||||
jekyll-gist (= 1.4.0)
|
||||
jekyll-github-metadata (= 2.0.2)
|
||||
jekyll-mentions (= 1.2.0)
|
||||
jekyll-paginate (= 1.1.0)
|
||||
jekyll-redirect-from (= 0.11.0)
|
||||
jekyll-sass-converter (= 1.3.0)
|
||||
jekyll-seo-tag (= 2.0.0)
|
||||
jekyll-sitemap (= 0.10.0)
|
||||
jemoji (= 0.7.0)
|
||||
kramdown (= 1.11.1)
|
||||
liquid (= 3.0.6)
|
||||
listen (= 3.0.6)
|
||||
minima (= 1.1.0)
|
||||
rouge (= 1.11.1)
|
||||
|
||||
BUNDLED WITH
|
||||
1.11.2
|
|
@ -0,0 +1,15 @@
|
|||
.PONY: all build build-preview help serve
|
||||
|
||||
help: ## Show this help.
|
||||
@awk 'BEGIN {FS = ":.*?## "} /^[a-zA-Z_-]+:.*?## / {sub("\\\\n",sprintf("\n%22c"," "), $$2);printf "\033[36m%-20s\033[0m %s\n", $$1, $$2}' $(MAKEFILE_LIST)
|
||||
|
||||
all: build ## Build site with production settings and put deliverables in _site.
|
||||
|
||||
build: ## Build site with production settings and put deliverables in _site.
|
||||
jekyll build
|
||||
|
||||
build-preview: ## Build site with drafts and future posts enabled.
|
||||
jekyll build --drafts --future
|
||||
|
||||
serve: ## Boot the development server.
|
||||
jekyll serve
|
|
@ -161,6 +161,8 @@ toc:
|
|||
path: /docs/getting-started-guides/azure/
|
||||
- title: Running Kubernetes on CenturyLink Cloud
|
||||
path: /docs/getting-started-guides/clc/
|
||||
- title: Running Kubernetes on IBM SoftLayer
|
||||
path: https://github.com/patrocinio/kubernetes-softlayer
|
||||
- title: Running Kubernetes on Custom Solutions
|
||||
section:
|
||||
- title: Creating a Custom Cluster from Scratch
|
||||
|
@ -195,8 +197,6 @@ toc:
|
|||
path: /docs/getting-started-guides/openstack-heat/
|
||||
- title: CoreOS on Multinode Cluster
|
||||
path: /docs/getting-started-guides/coreos/coreos_multinode_cluster/
|
||||
- title: Fedora With Calico Networking
|
||||
path: /docs/getting-started-guides/fedora/fedora-calico/
|
||||
- title: rkt
|
||||
section:
|
||||
- title: Running Kubernetes with rkt
|
||||
|
|
|
@ -188,8 +188,15 @@ some OAuth2 providers, notably Azure Active Directory, Salesforce, and Google.
|
|||
The protocol's main extension of OAuth2 is an additional field returned with
|
||||
the access token called an [ID Token](https://openid.net/specs/openid-connect-core-1_0.html#IDToken).
|
||||
This token is a JSON Web Token (JWT) with well known fields, such as a user's
|
||||
email, signed by the server. When used as a bearer token, the API server can
|
||||
verify ID token's signature and determine the end users identity.
|
||||
email, signed by the server.
|
||||
|
||||
To identify the user, the authenticator uses the `id_token` (not the `access_token`)
|
||||
from the OAuth2 [token response](https://openid.net/specs/openid-connect-core-1_0.html#TokenResponse)
|
||||
as a bearer token.
|
||||
|
||||
```
|
||||
Authentication: Bearer (id_token)
|
||||
```
|
||||
|
||||
To enable the plugin, pass the following required flags:
|
||||
|
||||
|
@ -223,12 +230,12 @@ Kubernetes does not provide an OpenID Connect Identity Provider.
|
|||
You can use an existing public OpenID Connect Identity Provider (such as Google, or [others](http://connect2id.com/products/nimbus-oauth-openid-connect-sdk/openid-connect-providers)).
|
||||
Or, you can run your own Identity Provider, such as CoreOS [dex](https://github.com/coreos/dex), [Keycloak](https://github.com/keycloak/keycloak) or CloudFoundary [UAA](https://github.com/cloudfoundry/uaa).
|
||||
|
||||
The provider needs to support [OpenID connect discovery]https://openid.net/specs/openid-connect-discovery-1_0.html); not all do.
|
||||
The provider needs to support [OpenID connect discovery](https://openid.net/specs/openid-connect-discovery-1_0.html); not all do.
|
||||
|
||||
Setup instructions for specific systems:
|
||||
|
||||
- [UAA]: http://apigee.com/about/blog/engineering/kubernetes-authentication-enterprise
|
||||
- [Dex]: https://speakerdeck.com/ericchiang/kubernetes-access-control-with-dex
|
||||
- [UAA](http://apigee.com/about/blog/engineering/kubernetes-authentication-enterprise)
|
||||
- [Dex](https://speakerdeck.com/ericchiang/kubernetes-access-control-with-dex)
|
||||
|
||||
### Webhook Token Authentication
|
||||
|
||||
|
|
|
@ -111,11 +111,11 @@ to the corresponding `gcloud` commands.
|
|||
Examples:
|
||||
|
||||
```shell
|
||||
gcloud container clusters create mytestcluster --zone=us-central1-b --enable-autoscaling=true --min-nodes=3 --max-nodes=10 --num-nodes=5
|
||||
gcloud container clusters create mytestcluster --zone=us-central1-b --enable-autoscaling --min-nodes=3 --max-nodes=10 --num-nodes=5
|
||||
```
|
||||
|
||||
```shell
|
||||
gcloud container clusters update mytestcluster --enable-autoscaling=true --min-nodes=1 --max-nodes=15
|
||||
gcloud container clusters update mytestcluster --enable-autoscaling --min-nodes=1 --max-nodes=15
|
||||
```
|
||||
|
||||
**Cluster autoscaler expects that nodes have not been manually modified (e.g. by adding labels via kubectl) as those properties would not be propagated to the new nodes within the same instance group.**
|
||||
|
|
|
@ -65,7 +65,7 @@ Each of these options are overridable by `export`ing the values before running t
|
|||
|
||||
The first step in the process is to initialize the master node.
|
||||
|
||||
Clone the `kube-deploy` repo, and run [master.sh](master.sh) on the master machine _with root_:
|
||||
Clone the `kube-deploy` repo, and run `master.sh` on the master machine _with root_:
|
||||
|
||||
```shell
|
||||
$ git clone https://github.com/kubernetes/kube-deploy
|
||||
|
@ -82,7 +82,7 @@ Lastly, it launches `kubelet` in the main docker daemon, and the `kubelet` in tu
|
|||
|
||||
Once your master is up and running you can add one or more workers on different machines.
|
||||
|
||||
Clone the `kube-deploy` repo, and run [worker.sh](worker.sh) on the worker machine _with root_:
|
||||
Clone the `kube-deploy` repo, and run `worker.sh` on the worker machine _with root_:
|
||||
|
||||
```shell
|
||||
$ git clone https://github.com/kubernetes/kube-deploy
|
||||
|
|
|
@ -1,313 +0,0 @@
|
|||
---
|
||||
assignees:
|
||||
- caesarxuchao
|
||||
|
||||
---
|
||||
|
||||
This guide will walk you through the process of getting a Kubernetes Fedora cluster running on Digital Ocean with networking powered by Calico networking.
|
||||
It will cover the installation and configuration of the following systemd processes on the following hosts:
|
||||
|
||||
Kubernetes Master:
|
||||
|
||||
- `kube-apiserver`
|
||||
- `kube-controller-manager`
|
||||
- `kube-scheduler`
|
||||
- `etcd`
|
||||
- `docker`
|
||||
- `calico-node`
|
||||
|
||||
Kubernetes Node:
|
||||
|
||||
- `kubelet`
|
||||
- `kube-proxy`
|
||||
- `docker`
|
||||
- `calico-node`
|
||||
|
||||
For this demo, we will be setting up one Master and one Node with the following information:
|
||||
|
||||
| Hostname | IP |
|
||||
|-------------|-------------|
|
||||
| kube-master |10.134.251.56|
|
||||
| kube-node-1 |10.134.251.55|
|
||||
|
||||
This guide is scalable to multiple nodes provided you [configure interface-cbr0 with its own subnet on each Node](#configure-the-virtual-interface---cbr0)
|
||||
and [add an entry to /etc/hosts for each host](#setup-communication-between-hosts).
|
||||
|
||||
Ensure you substitute the IP Addresses and Hostnames used in this guide with ones in your own setup.
|
||||
|
||||
* TOC
|
||||
{:toc}
|
||||
|
||||
## Prerequisites
|
||||
|
||||
You need two or more Fedora 22 droplets on Digital Ocean with [Private Networking](https://www.digitalocean.com/community/tutorials/how-to-set-up-and-use-digitalocean-private-networking) enabled.
|
||||
|
||||
## Setup Communication Between Hosts
|
||||
|
||||
Digital Ocean private networking configures a private network on eth1 for each host. To simplify communication between the hosts, we will add an entry to /etc/hosts
|
||||
so that all hosts in the cluster can hostname-resolve one another to this interface. **It is important that the hostname resolves to this interface instead of eth0, as
|
||||
all Kubernetes and Calico services will be running on it.**
|
||||
|
||||
```shell
|
||||
echo "10.134.251.56 kube-master" >> /etc/hosts
|
||||
echo "10.134.251.55 kube-node-1" >> /etc/hosts
|
||||
```
|
||||
|
||||
> Make sure that communication works between kube-master and each kube-node by using a utility such as ping.
|
||||
|
||||
## Setup Master
|
||||
|
||||
### Install etcd
|
||||
|
||||
* Both Calico and Kubernetes use etcd as their datastore. We will run etcd on Master and point all Kubernetes and Calico services at it.
|
||||
|
||||
```shell
|
||||
yum -y install etcd
|
||||
```
|
||||
|
||||
* Edit `/etc/etcd/etcd.conf`
|
||||
|
||||
```conf
|
||||
ETCD_LISTEN_CLIENT_URLS="http://kube-master:4001"
|
||||
|
||||
ETCD_ADVERTISE_CLIENT_URLS="http://kube-master:4001"
|
||||
```
|
||||
|
||||
### Install Kubernetes
|
||||
|
||||
* Run the following command on Master to install the latest Kubernetes (as well as docker):
|
||||
|
||||
```shell
|
||||
yum -y install kubernetes
|
||||
```
|
||||
|
||||
* Edit `/etc/kubernetes/config `
|
||||
|
||||
```conf
|
||||
# How the controller-manager, scheduler, and proxy find the apiserver
|
||||
KUBE_MASTER="--master=http://kube-master:8080"
|
||||
```
|
||||
|
||||
* Edit `/etc/kubernetes/apiserver`
|
||||
|
||||
```conf
|
||||
# The address on the local server to listen to.
|
||||
KUBE_API_ADDRESS="--insecure-bind-address=0.0.0.0"
|
||||
|
||||
KUBE_ETCD_SERVERS="--etcd-servers=http://kube-master:4001"
|
||||
|
||||
# Remove ServiceAccount from this line to run without API Tokens
|
||||
KUBE_ADMISSION_CONTROL="--admission-control=NamespaceLifecycle,LimitRanger,SecurityContextDeny,ResourceQuota"
|
||||
```
|
||||
|
||||
* Create /var/run/kubernetes on master:
|
||||
|
||||
```shell
|
||||
mkdir /var/run/kubernetes
|
||||
chown kube:kube /var/run/kubernetes
|
||||
chmod 750 /var/run/kubernetes
|
||||
```
|
||||
|
||||
* Start the appropriate services on master:
|
||||
|
||||
```shell
|
||||
for SERVICE in etcd kube-apiserver kube-controller-manager kube-scheduler; do
|
||||
systemctl restart $SERVICE
|
||||
systemctl enable $SERVICE
|
||||
systemctl status $SERVICE
|
||||
done
|
||||
```
|
||||
|
||||
### Install Calico
|
||||
|
||||
Next, we'll launch Calico on Master to allow communication between Pods and any services running on the Master.
|
||||
* Install calicoctl, the calico configuration tool.
|
||||
|
||||
```shell
|
||||
wget https://github.com/Metaswitch/calico-docker/releases/download/v0.5.5/calicoctl
|
||||
chmod +x ./calicoctl
|
||||
sudo mv ./calicoctl /usr/bin
|
||||
```
|
||||
|
||||
* Create `/etc/systemd/system/calico-node.service`
|
||||
|
||||
```conf
|
||||
[Unit]
|
||||
Description=calicoctl node
|
||||
Requires=docker.service
|
||||
After=docker.service
|
||||
|
||||
[Service]
|
||||
User=root
|
||||
Environment="ETCD_AUTHORITY=kube-master:4001"
|
||||
PermissionsStartOnly=true
|
||||
ExecStartPre=/usr/bin/calicoctl checksystem --fix
|
||||
ExecStart=/usr/bin/calicoctl node --ip=10.134.251.56 --detach=false
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
```
|
||||
|
||||
>Be sure to substitute `--ip=10.134.251.56` with your Master's eth1 IP Address.
|
||||
|
||||
* Start Calico
|
||||
|
||||
```shell
|
||||
systemctl enable calico-node.service
|
||||
systemctl start calico-node.service
|
||||
```
|
||||
|
||||
>Starting calico for the first time may take a few minutes as the calico-node docker image is downloaded.
|
||||
|
||||
## Setup Node
|
||||
|
||||
### Configure the Virtual Interface - cbr0
|
||||
|
||||
By default, docker will create and run on a virtual interface called `docker0`. This interface is automatically assigned the address range 172.17.42.1/16.
|
||||
In order to set our own address range, we will create a new virtual interface called `cbr0` and then start docker on it.
|
||||
|
||||
* Add a virtual interface by creating `/etc/sysconfig/network-scripts/ifcfg-cbr0`:
|
||||
|
||||
```conf
|
||||
DEVICE=cbr0
|
||||
TYPE=Bridge
|
||||
IPADDR=192.168.1.1
|
||||
NETMASK=255.255.255.0
|
||||
ONBOOT=yes
|
||||
BOOTPROTO=static
|
||||
```
|
||||
|
||||
>**Note for Multi-Node Clusters:** Each node should be assigned an IP address on a unique subnet. In this example, node-1 is using 192.168.1.1/24,
|
||||
so node-2 should be assigned another pool on the 192.168.x.0/24 subnet, e.g. 192.168.2.1/24.
|
||||
|
||||
* Ensure that your system has bridge-utils installed. Then, restart the networking daemon to activate the new interface
|
||||
|
||||
```shell
|
||||
systemctl restart network.service
|
||||
```
|
||||
|
||||
### Install Docker
|
||||
|
||||
* Install Docker
|
||||
|
||||
```shell
|
||||
yum -y install docker
|
||||
```
|
||||
|
||||
* Configure docker to run on `cbr0` by editing `/etc/sysconfig/docker-network`:
|
||||
|
||||
```conf
|
||||
DOCKER_NETWORK_OPTIONS="--bridge=cbr0 --iptables=false --ip-masq=false"
|
||||
```
|
||||
|
||||
* Start docker
|
||||
|
||||
```shell
|
||||
systemctl start docker
|
||||
```
|
||||
|
||||
### Install Calico
|
||||
|
||||
* Install calicoctl, the calico configuration tool.
|
||||
|
||||
```shell
|
||||
wget https://github.com/Metaswitch/calico-docker/releases/download/v0.5.5/calicoctl
|
||||
chmod +x ./calicoctl
|
||||
sudo mv ./calicoctl /usr/bin
|
||||
```
|
||||
|
||||
* Create `/etc/systemd/system/calico-node.service`
|
||||
|
||||
```conf
|
||||
[Unit]
|
||||
Description=calicoctl node
|
||||
Requires=docker.service
|
||||
After=docker.service
|
||||
|
||||
[Service]
|
||||
User=root
|
||||
Environment="ETCD_AUTHORITY=kube-master:4001"
|
||||
PermissionsStartOnly=true
|
||||
ExecStartPre=/usr/bin/calicoctl checksystem --fix
|
||||
ExecStart=/usr/bin/calicoctl node --ip=10.134.251.55 --detach=false --kubernetes
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
```
|
||||
|
||||
> Note: You must replace the IP address with your node's eth1 IP Address!
|
||||
|
||||
* Start Calico
|
||||
|
||||
```shell
|
||||
systemctl enable calico-node.service
|
||||
systemctl start calico-node.service
|
||||
```
|
||||
|
||||
* Configure the IP Address Pool
|
||||
|
||||
Most Kubernetes application deployments will require communication between Pods and the kube-apiserver on Master. On a standard Digital
|
||||
Ocean Private Network, requests sent from Pods to the kube-apiserver will not be returned as the networking fabric will drop response packets
|
||||
destined for any 192.168.0.0/16 address. To resolve this, you can have calicoctl add a masquerade rule to all outgoing traffic on the node:
|
||||
|
||||
```shell
|
||||
ETCD_AUTHORITY=kube-master:4001 calicoctl pool add 192.168.0.0/16 --nat-outgoing
|
||||
```
|
||||
|
||||
### Install Kubernetes
|
||||
|
||||
* First, install Kubernetes.
|
||||
|
||||
```shell
|
||||
yum -y install kubernetes
|
||||
```
|
||||
|
||||
* Edit `/etc/kubernetes/config`
|
||||
|
||||
```conf
|
||||
# How the controller-manager, scheduler, and proxy find the apiserver
|
||||
KUBE_MASTER="--master=http://kube-master:8080"
|
||||
```
|
||||
|
||||
* Edit `/etc/kubernetes/kubelet`
|
||||
|
||||
We'll pass in an extra parameter - `--network-plugin=calico` to tell the Kubelet to use the Calico networking plugin. Additionally, we'll add two
|
||||
environment variables that will be used by the Calico networking plugin.
|
||||
|
||||
```shell
|
||||
# The address for the info server to serve on (set to 0.0.0.0 or "" for all interfaces)
|
||||
KUBELET_ADDRESS="--address=0.0.0.0"
|
||||
|
||||
# You may leave this blank to use the actual hostname
|
||||
# KUBELET_HOSTNAME="--hostname-override=127.0.0.1"
|
||||
|
||||
# location of the api-server
|
||||
KUBELET_API_SERVER="--api-servers=http://kube-master:8080"
|
||||
|
||||
# Add your own!
|
||||
KUBELET_ARGS="--network-plugin=calico"
|
||||
|
||||
# The following are variables which the kubelet will pass to the calico-networking plugin
|
||||
ETCD_AUTHORITY="kube-master:4001"
|
||||
KUBE_API_ROOT="http://kube-master:8080/api/v1"
|
||||
```
|
||||
|
||||
* Start Kubernetes on the node.
|
||||
|
||||
```shell
|
||||
for SERVICE in kube-proxy kubelet; do
|
||||
systemctl restart $SERVICE
|
||||
systemctl enable $SERVICE
|
||||
systemctl status $SERVICE
|
||||
done
|
||||
```
|
||||
|
||||
## Check Running Cluster
|
||||
|
||||
The cluster should be running! Check that your nodes are reporting as such:
|
||||
|
||||
```shell
|
||||
kubectl get nodes
|
||||
NAME LABELS STATUS
|
||||
kube-node-1 kubernetes.io/hostname=kube-node-1 Ready
|
||||
```
|
|
@ -49,6 +49,7 @@ few commands, and have active community support.
|
|||
- [Azure](/docs/getting-started-guides/coreos/azure/) (Weave-based, contributed by WeaveWorks employees)
|
||||
- [Azure](/docs/getting-started-guides/azure/) (Flannel-based, contributed by Microsoft employee)
|
||||
- [CenturyLink Cloud](/docs/getting-started-guides/clc)
|
||||
- [IBM SoftLayer](https://github.com/patrocinio/kubernetes-softlayer)
|
||||
|
||||
### Custom Solutions
|
||||
|
||||
|
|
|
@ -83,7 +83,7 @@ First configure the cluster information in cluster/ubuntu/config-default.sh, fol
|
|||
```shell
|
||||
export nodes="vcap@10.10.103.250 vcap@10.10.103.162 vcap@10.10.103.223"
|
||||
|
||||
export role="ai i i"
|
||||
export roles="ai i i"
|
||||
|
||||
export NUM_NODES=${NUM_NODES:-3}
|
||||
|
||||
|
@ -95,7 +95,7 @@ export FLANNEL_NET=172.16.0.0/16
|
|||
The first variable `nodes` defines all your cluster nodes, master node comes first and
|
||||
separated with blank space like `<user_1@ip_1> <user_2@ip_2> <user_3@ip_3> `
|
||||
|
||||
Then the `role` variable defines the role of above machine in the same order, "ai" stands for machine
|
||||
Then the `roles` variable defines the roles of above machine in the same order, "ai" stands for machine
|
||||
acts as both master and node, "a" stands for master, "i" stands for node.
|
||||
|
||||
The `NUM_NODES` variable defines the total number of nodes.
|
||||
|
|
|
@ -120,13 +120,14 @@ all running pods. Example:
|
|||
alpha/target.custom-metrics.podautoscaler.kubernetes.io: '{"items":[{"name":"qps", "value": "10"}]}'
|
||||
```
|
||||
|
||||
In this case if there are 4 pods running and each of them reports qps metric to be equal to 15 HPA will start 2 additional pods so there will be 6 pods in total. If there are multiple metrics passed in the annotation or CPU is configured as well then HPA will use the biggest
|
||||
number of replicas that comes from the calculations.
|
||||
In this case, if there are four pods running and each pod reports a QPS metric of 15 or higher, horizontal pod autoscaling will start two additional pods (for a total of six pods running).
|
||||
|
||||
If you specify multiple metrics in your annotation or if you set a target CPU utilization, horizontal pod autoscaling will scale to according to the metric that requires the highest number of replicas.
|
||||
|
||||
If you do not specify a target for CPU utilization, Kubernetes defaults to an 80% utilization threshold for horizontal pod autoscaling.
|
||||
|
||||
If you want to ensure that horizontal pod autoscaling calculates the number of required replicas based only on custom metrics, you should set the CPU utilization target to a very large value (such as 100000%). As this level of CPU utilization isn't possible, horizontal pod autoscaling will calculate based only on the custom metrics (and min/max limits).
|
||||
|
||||
At this moment even if target CPU utilization is not specified a default of 80% will be used.
|
||||
To calculate number of desired replicas based only on custom metrics CPU utilization
|
||||
target should be set to a very large value (e.g. 100000%). Then CPU-related logic
|
||||
will want only 1 replica, leaving the decision about higher replica count to cusom metrics (and min/max limits).
|
||||
|
||||
## Further reading
|
||||
|
||||
|
|
|
@ -44,13 +44,7 @@ It can be configured to give services externally-reachable urls, load balance tr
|
|||
|
||||
Before you start using the Ingress resource, there are a few things you should understand. The Ingress is a beta resource, not available in any Kubernetes release prior to 1.1. You need an Ingress controller to satisfy an Ingress, simply creating the resource will have no effect.
|
||||
|
||||
On GCE/GKE there should be a [L7 cluster addon](https://github.com/kubernetes/contrib/blob/master/ingress/controllers/gce/README.md), deployed into the `kube-system` namespace:
|
||||
|
||||
```shell
|
||||
$ kubectl get pods --namespace=kube-system -l k8s-app=glbc
|
||||
NAME READY STATUS RESTARTS AGE
|
||||
l7-lb-controller-v0.6.0-chnan 2/2 Running 0 1d
|
||||
```
|
||||
GCE/GKE deploys an ingress controller on the master. You can deploy any number of custom ingress controllers in a pod. You must annotate each ingress with the appropriate class, as indicated [here](https://github.com/kubernetes/contrib/tree/master/ingress/controllers/nginx#running-multiple-ingress-controllers) and [here](https://github.com/kubernetes/contrib/blob/master/ingress/controllers/gce/BETA_LIMITATIONS.md#disabling-glbc).
|
||||
|
||||
Make sure you review the [beta limitations](https://github.com/kubernetes/contrib/tree/master/ingress/controllers/gce/BETA_LIMITATIONS.md) of this controller. In environments other than GCE/GKE, you need to [deploy a controller](https://github.com/kubernetes/contrib/tree/master/ingress/controllers) as a pod.
|
||||
|
||||
|
|
|
@ -85,7 +85,7 @@ $ kubectl describe pods <rc-name> # Lists pods created by <rc-name
|
|||
$ kubectl get services --sort-by=.metadata.name
|
||||
|
||||
# List pods Sorted by Restart Count
|
||||
$ kubectl get pods --sort-by=.status.containerStatuses[0].restartCount
|
||||
$ kubectl get pods --sort-by='.status.containerStatuses[0].restartCount'
|
||||
|
||||
# Get the version label of all pods with label app=cassandra
|
||||
$ kubectl get pods --selector=app=cassandra rc -o 'jsonpath={.items[*].metadata.labels.version}'
|
||||
|
@ -95,12 +95,13 @@ $ kubectl get nodes -o jsonpath='{.items[*].status.addresses[?(@.type=="External
|
|||
|
||||
# List Names of Pods that belong to Particular RC
|
||||
# "jq" command useful for transformations that are too complex for jsonpath
|
||||
$ sel=$(./kubectl get rc <rc-name> --output=json | jq -j '.spec.selector | to_entries | .[] | "\(.key)=\(.value),"')
|
||||
$ sel=$(kubectl get rc <rc-name> --output=json | jq -j '.spec.selector | to_entries | .[] | "\(.key)=\(.value),"')
|
||||
$ sel=${sel%?} # Remove trailing comma
|
||||
$ pods=$(kubectl get pods --selector=$sel --output=jsonpath={.items..metadata.name})`
|
||||
$ pods=$(kubectl get pods --selector=$sel --output=jsonpath={.items..metadata.name})
|
||||
$ echo $pods
|
||||
|
||||
# Check which nodes are ready
|
||||
$ kubectl get nodes -o jsonpath='{range .items[*]}{@.metadata.name}:{range @.status.conditions[*]}{@.type}={@.status};{end}{end}'| tr ';' "\n" | grep "Ready=True"
|
||||
$ kubectl get nodes -o jsonpath='{range .items[*]}{@.metadata.name}:{range @.status.conditions[*]}{@.type}={@.status};{end}{end}'| tr ';' "\n" | grep "Ready=True"
|
||||
```
|
||||
|
||||
## Modifying and Deleting Resources
|
||||
|
@ -122,6 +123,6 @@ $ kubectl run -i --tty busybox --image=busybox -- sh # Run pod as interacti
|
|||
$ kubectl attach <podname> -i # Attach to Running Container
|
||||
$ kubectl port-forward <podname> <local-and-remote-port> # Forward port of Pod to your local machine
|
||||
$ kubectl port-forward <servicename> <port> # Forward port to service
|
||||
$ kubectl exec <pod-name> -- ls / # Run command in existing pod (1 container case)
|
||||
$ kubectl exec <pod-name> -c <container-name> -- ls / # Run command in existing pod (multi-container case)
|
||||
$ kubectl exec <pod-name> -- ls / # Run command in existing pod (1 container case)
|
||||
$ kubectl exec <pod-name> -c <container-name> -- ls / # Run command in existing pod (multi-container case)
|
||||
```
|
||||
|
|
|
@ -25,7 +25,7 @@ If this fails with an "invalid command" error, you're likely using an older vers
|
|||
|
||||
Also, note that label keys must be in the form of DNS labels (as described in the [identifiers doc](https://github.com/kubernetes/kubernetes/blob/{{page.githubbranch}}/docs/design/identifiers.md)), meaning that they are not allowed to contain any upper-case letters.
|
||||
|
||||
You can verify that it worked by re-running `kubectl get nodes` and checking that the node now has a label.
|
||||
You can verify that it worked by re-running `kubectl get nodes --show-labels` and checking that the node now has a label.
|
||||
|
||||
### Step Two: Add a nodeSelector field to your pod configuration
|
||||
|
||||
|
|
|
@ -460,7 +460,7 @@ within AWS Certificate Manager.
|
|||
"metadata": {
|
||||
"name": "my-service",
|
||||
"annotations": {
|
||||
"service.beta.kubernetes.io/aws-load-balancer-backend-protocol=": "(https|http|ssl|tcp)"
|
||||
"service.beta.kubernetes.io/aws-load-balancer-backend-protocol": "(https|http|ssl|tcp)"
|
||||
}
|
||||
},
|
||||
```
|
||||
|
|
|
@ -10,7 +10,7 @@ assignees:
|
|||
On-disk files in a container are ephemeral, which presents some problems for
|
||||
non-trivial applications when running in containers. First, when a container
|
||||
crashes kubelet will restart it, but the files will be lost - the
|
||||
container starts with a clean slate. Second, when running containers together
|
||||
container starts with a clean state. Second, when running containers together
|
||||
in a `Pod` it is often necessary to share files between those containers. The
|
||||
Kubernetes `Volume` abstraction solves both of these problems.
|
||||
|
||||
|
|
|
@ -0,0 +1,6 @@
|
|||
[build]
|
||||
command = "make build"
|
||||
publish = "_site"
|
||||
|
||||
[context.deploy-preview]
|
||||
command = "make build-preview"
|
14
package.json
14
package.json
|
@ -1,14 +0,0 @@
|
|||
{
|
||||
"name": "githubpagessites",
|
||||
"description": "Version 1.1 of the docs for Kubernetes",
|
||||
"version": "1.1",
|
||||
"private": true,
|
||||
"license": "Apache Version 2.0",
|
||||
"author": "The Kubernetes Authors",
|
||||
"engines": {
|
||||
"node": "~4.2"
|
||||
},
|
||||
"dependencies": {
|
||||
"express": "^4.13.4"
|
||||
}
|
||||
}
|
Loading…
Reference in New Issue