symbol errors in quotation mark
Signed-off-by: tim-zju <21651152@zju.edu.cn>reviewable/pr2036/r2
commit
30f1d03b8d
|
@ -111,6 +111,8 @@ toc:
|
|||
- docs/getting-started-guides/clc.md
|
||||
- title: Running Kubernetes on IBM SoftLayer
|
||||
path: https://github.com/patrocinio/kubernetes-softlayer
|
||||
- title: Running Kubernetes on Multiple Clouds with Stackpoint.io
|
||||
path: /docs/getting-started-guides/stackpoint/
|
||||
- title: Running Kubernetes on Custom Solutions
|
||||
section:
|
||||
- docs/getting-started-guides/scratch.md
|
||||
|
|
|
@ -27,6 +27,13 @@ toc:
|
|||
- docs/api-reference/batch/v1/operations.html
|
||||
- docs/api-reference/batch/v1/definitions.html
|
||||
|
||||
- title: Apps API
|
||||
section:
|
||||
- title: Apps API Operations
|
||||
path: /docs/api-reference/apps/v1beta1/operations/
|
||||
- title: Apps API Definitions
|
||||
path: /docs/api-reference/apps/v1beta1/definitions/
|
||||
|
||||
- title: Extensions API
|
||||
section:
|
||||
- docs/api-reference/extensions/v1beta1/operations.html
|
||||
|
|
|
@ -18,9 +18,17 @@
|
|||
<div id="cellophane" onclick="kub.toggleMenu()"></div>
|
||||
<header>
|
||||
<a href="/" class="logo"></a>
|
||||
|
||||
<div class="nav-buttons" data-auto-burger="primary">
|
||||
<a href="/docs/" class="button" id="viewDocs" data-auto-burger-exclude>View Documentation</a>
|
||||
<a href="/docs/hellonode/" class="button" id="tryKubernetes" data-auto-burger-exclude>Try Kubernetes</a>
|
||||
<ul class="global-nav">
|
||||
<li><a href="/docs/">Documentation</a></li>
|
||||
<li><a href="http://blog.kubernetes.io/">Blog</a></li>
|
||||
<li><a href="/partners/">Partners</a></li>
|
||||
<li><a href="/community/">Community</a></li>
|
||||
<li><a href="/case-studies/">Case Studies</a></li>
|
||||
</ul>
|
||||
<!-- <a href="/docs/" class="button" id="viewDocs" data-auto-burger-exclude>View Documentation</a> -->
|
||||
<a href="/docs/tutorials/kubernetes-basics/" class="button" id="tryKubernetes" data-auto-burger-exclude>Try Kubernetes</a>
|
||||
<button id="hamburger" onclick="kub.toggleMenu()" data-auto-burger-exclude><div></div></button>
|
||||
</div>
|
||||
|
||||
|
|
|
@ -234,6 +234,40 @@ header
|
|||
color: $blue
|
||||
text-decoration: none
|
||||
|
||||
// Global Nav - 12/9/2016 Update
|
||||
|
||||
ul.global-nav
|
||||
display: none
|
||||
|
||||
li
|
||||
display: inline-block
|
||||
margin-right: 14px
|
||||
|
||||
a
|
||||
color: #fff
|
||||
font-weight: 400
|
||||
padding: 0
|
||||
position: relative
|
||||
|
||||
&.active:after
|
||||
position: absolute
|
||||
width: 100%
|
||||
height: 2px
|
||||
content: ''
|
||||
bottom: -4px
|
||||
left: 0
|
||||
background: #fff
|
||||
|
||||
|
||||
.flip-nav ul.global-nav li a,
|
||||
.open-nav ul.global-nav li a,
|
||||
color: #333
|
||||
|
||||
.flip-nav ul.global-nav li a.active:after,
|
||||
.open-nav ul.global-nav li a.active:after,
|
||||
|
||||
background: $blue
|
||||
|
||||
// FLIP NAV
|
||||
.flip-nav
|
||||
header
|
||||
|
@ -301,6 +335,26 @@ header
|
|||
padding-left: 0
|
||||
padding-right: 0
|
||||
margin-bottom: 0
|
||||
position: relative
|
||||
|
||||
&.bot-bar:after
|
||||
display: block
|
||||
margin-bottom: -20px
|
||||
height: 8px
|
||||
width: 100%
|
||||
background-color: transparentize(white, 0.9)
|
||||
content: ''
|
||||
|
||||
&.no-sub
|
||||
|
||||
h5
|
||||
display: none
|
||||
|
||||
h1
|
||||
margin-bottom: 20px
|
||||
|
||||
#home #hero:after
|
||||
display: none
|
||||
|
||||
// VENDOR STRIP
|
||||
#vendorStrip
|
||||
|
@ -482,6 +536,19 @@ section
|
|||
margin: 0 auto
|
||||
height: 44px
|
||||
line-height: 44px
|
||||
position: relative
|
||||
|
||||
&:before
|
||||
position: absolute
|
||||
width: 15px
|
||||
height: 15px
|
||||
content: ''
|
||||
right: 8px
|
||||
top: 7px
|
||||
background-image: url(/images/search-icon.svg)
|
||||
background-repeat: no-repeat
|
||||
background-size: 100% 100%
|
||||
z-index: 1
|
||||
|
||||
#search
|
||||
width: 100%
|
||||
|
@ -490,6 +557,10 @@ section
|
|||
line-height: 30px
|
||||
font-size: 16px
|
||||
vertical-align: top
|
||||
background: #fff
|
||||
border: none
|
||||
border-radius: 4px
|
||||
position: relative
|
||||
|
||||
|
||||
#encyclopedia
|
||||
|
@ -758,7 +829,7 @@ dd
|
|||
background-color: $light-grey
|
||||
color: $dark-grey
|
||||
font-family: $mono-font
|
||||
vertical-align: bottom
|
||||
vertical-align: baseline
|
||||
font-size: 14px
|
||||
font-weight: bold
|
||||
padding: 2px 4px
|
||||
|
|
|
@ -3,6 +3,15 @@ $vendor-strip-height: 44px
|
|||
$video-section-height: 550px
|
||||
|
||||
@media screen and (min-width: 1025px)
|
||||
#hamburger
|
||||
display: none
|
||||
|
||||
ul.global-nav
|
||||
display: inline-block
|
||||
|
||||
#docs #vendorStrip #searchBox:before
|
||||
top: 15px
|
||||
|
||||
#vendorStrip
|
||||
height: $vendor-strip-height
|
||||
line-height: $vendor-strip-height
|
||||
|
@ -40,7 +49,7 @@ $video-section-height: 550px
|
|||
|
||||
#searchBox
|
||||
float: right
|
||||
width: 30%
|
||||
width: 320px
|
||||
|
||||
#search
|
||||
vertical-align: middle
|
||||
|
@ -65,7 +74,7 @@ $video-section-height: 550px
|
|||
|
||||
|
||||
#encyclopedia
|
||||
padding: 50px 50px 20px 20px
|
||||
padding: 50px 50px 100px 100px
|
||||
clear: both
|
||||
|
||||
#docsToc
|
||||
|
@ -88,6 +97,11 @@ $video-section-height: 550px
|
|||
section, header, footer
|
||||
main
|
||||
max-width: $main-max-width
|
||||
|
||||
header, #vendorStrip, #encyclopedia, #hero h1, #hero h5, #docs #hero h1, #docs #hero h5,
|
||||
#community #hero h1, .gridPage #hero h1, #community #hero h5, .gridPage #hero h5
|
||||
padding-left: 100px
|
||||
padding-right: 100px
|
||||
|
||||
#home
|
||||
section, header, footer
|
||||
|
@ -276,7 +290,7 @@ $video-section-height: 550px
|
|||
text-align: left
|
||||
|
||||
h1
|
||||
padding: 20px
|
||||
padding: 20px 100px
|
||||
|
||||
#tryKubernetes
|
||||
width: auto
|
||||
|
|
|
@ -148,7 +148,7 @@ By default the Kubernetes APIserver serves HTTP on 2 ports:
|
|||
- default IP is first non-localhost network interface, change with `--bind-address` flag.
|
||||
- request handled by authentication and authorization modules.
|
||||
- request handled by admission control module(s).
|
||||
- authentication and authoriation modules run.
|
||||
- authentication and authorisation modules run.
|
||||
|
||||
When the cluster is created by `kube-up.sh`, on Google Compute Engine (GCE),
|
||||
and on several other cloud providers, the API server serves on port 443. On
|
||||
|
|
|
@ -23,7 +23,7 @@ answer the following questions:
|
|||
- to where was it going?
|
||||
|
||||
NOTE: Currently, Kubernetes provides only basic audit capabilities, there is still a lot
|
||||
of work going on to provide fully featured auditing capabilities (see https://github.com/kubernetes/features/issues/22).
|
||||
of work going on to provide fully featured auditing capabilities (see [this issue](https://github.com/kubernetes/features/issues/22)).
|
||||
|
||||
Kubernetes audit is part of [kube-apiserver](/docs/admin/kube-apiserver) logging all requests
|
||||
coming to the server. Each audit log contains two entries:
|
||||
|
|
|
@ -31,7 +31,7 @@ to talk to the Kubernetes API.
|
|||
API requests are tied to either a normal user or a service account, or are treated
|
||||
as anonymous requests. This means every process inside or outside the cluster, from
|
||||
a human user typing `kubectl` on a workstation, to `kubelets` on nodes, to members
|
||||
of the control plane, must authenticate when making requests to the the API server,
|
||||
of the control plane, must authenticate when making requests to the API server,
|
||||
or be treated as an anonymous user.
|
||||
|
||||
## Authentication strategies
|
||||
|
|
|
@ -299,9 +299,8 @@ subjects:
|
|||
name: jane
|
||||
roleRef:
|
||||
kind: Role
|
||||
namespace: default
|
||||
name: pod-reader
|
||||
apiVersion: rbac.authorization.k8s.io/v1alpha1
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
```
|
||||
|
||||
`RoleBindings` may also refer to a `ClusterRole`. However, a `RoleBinding` that
|
||||
|
@ -326,7 +325,7 @@ subjects:
|
|||
roleRef:
|
||||
kind: ClusterRole
|
||||
name: secret-reader
|
||||
apiVersion: rbac.authorization.k8s.io/v1alpha1
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
```
|
||||
|
||||
Finally a `ClusterRoleBinding` may be used to grant permissions in all
|
||||
|
@ -338,14 +337,14 @@ namespaces. The following `ClusterRoleBinding` allows any user in the group
|
|||
kind: ClusterRoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1alpha1
|
||||
metadata:
|
||||
name: read-secrets
|
||||
name: read-secrets-global
|
||||
subjects:
|
||||
- kind: Group # May be "User", "Group" or "ServiceAccount"
|
||||
name: manager
|
||||
roleRef:
|
||||
kind: ClusterRole
|
||||
name: secret-reader
|
||||
apiVersion: rbac.authorization.k8s.io/v1alpha1
|
||||
name: secret-reader
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
```
|
||||
|
||||
### Referring to Resources
|
||||
|
|
|
@ -99,7 +99,7 @@ Some possible patterns for communicating with pods in a DaemonSet are:
|
|||
- **Push**: Pods in the Daemon Set are configured to send updates to another service, such
|
||||
as a stats database. They do not have clients.
|
||||
- **NodeIP and Known Port**: Pods in the Daemon Set use a `hostPort`, so that the pods are reachable
|
||||
via the node IPs. Clients knows the the list of nodes ips somehow, and know the port by convention.
|
||||
via the node IPs. Clients knows the list of nodes ips somehow, and know the port by convention.
|
||||
- **DNS**: Create a [headless service](/docs/user-guide/services/#headless-services) with the same pod selector,
|
||||
and then discover DaemonSets using the `endpoints` resource or retrieve multiple A records from
|
||||
DNS.
|
||||
|
|
|
@ -70,7 +70,7 @@ is no longer supported.
|
|||
|
||||
When enabled, pods are assigned a DNS A record in the form of `pod-ip-address.my-namespace.pod.cluster.local`.
|
||||
|
||||
For example, a pod with ip `1.2.3.4` in the namespace `default` with a dns name of `cluster.local` would have an entry: `1-2-3-4.default.pod.cluster.local`.
|
||||
For example, a pod with ip `1.2.3.4` in the namespace `default` with a DNS name of `cluster.local` would have an entry: `1-2-3-4.default.pod.cluster.local`.
|
||||
|
||||
#### A Records and hostname based on Pod's hostname and subdomain fields
|
||||
|
||||
|
@ -94,13 +94,42 @@ Example:
|
|||
|
||||
```yaml
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: default-subdomain
|
||||
spec:
|
||||
selector:
|
||||
name: busybox
|
||||
ports:
|
||||
- name: foo # Actually, no port is needed.
|
||||
port: 1234
|
||||
targetPort: 1234
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: busybox
|
||||
namespace: default
|
||||
name: busybox1
|
||||
labels:
|
||||
name: busybox
|
||||
spec:
|
||||
hostname: busybox-1
|
||||
subdomain: default
|
||||
subdomain: default-subdomain
|
||||
containers:
|
||||
- image: busybox
|
||||
command:
|
||||
- sleep
|
||||
- "3600"
|
||||
name: busybox
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: busybox2
|
||||
labels:
|
||||
name: busybox
|
||||
spec:
|
||||
hostname: busybox-2
|
||||
subdomain: default-subdomain
|
||||
containers:
|
||||
- image: busybox
|
||||
command:
|
||||
|
@ -110,9 +139,9 @@ spec:
|
|||
```
|
||||
|
||||
If there exists a headless service in the same namespace as the pod and with the same name as the subdomain, the cluster's KubeDNS Server also returns an A record for the Pod's fully qualified hostname.
|
||||
Given a Pod with the hostname set to "foo" and the subdomain set to "bar", and a headless Service named "bar" in the same namespace, the pod will see it's own FQDN as "foo.bar.my-namespace.svc.cluster.local". DNS serves an A record at that name, pointing to the Pod's IP.
|
||||
Given a Pod with the hostname set to "busybox-1" and the subdomain set to "default-subdomain", and a headless Service named "default-subdomain" in the same namespace, the pod will see it's own FQDN as "busybox-1.default-subdomain.my-namespace.svc.cluster.local". DNS serves an A record at that name, pointing to the Pod's IP. Both pods "busybox1" and "busybox2" can have their distinct A records.
|
||||
|
||||
With v1.2, the Endpoints object also has a new annotation `endpoints.beta.kubernetes.io/hostnames-map`. Its value is the json representation of map[string(IP)][endpoints.HostRecord], for example: '{"10.245.1.6":{HostName: "my-webserver"}}'.
|
||||
As of Kubernetes v1.2, the Endpoints object also has the annotation `endpoints.beta.kubernetes.io/hostnames-map`. Its value is the json representation of map[string(IP)][endpoints.HostRecord], for example: '{"10.245.1.6":{HostName: "my-webserver"}}'.
|
||||
If the Endpoints are for a headless service, an A record is created with the format <hostname>.<service name>.<pod namespace>.svc.<cluster domain>
|
||||
For the example json, if endpoints are for a headless service named "bar", and one of the endpoints has IP "10.245.1.6", an A record is created with the name "my-webserver.bar.my-namespace.svc.cluster.local" and the A record lookup would return "10.245.1.6".
|
||||
This endpoints annotation generally does not need to be specified by end-users, but can used by the internal service controller to deliver the aforementioned feature.
|
||||
|
@ -171,7 +200,7 @@ busybox 1/1 Running 0 <some-time>
|
|||
Once that pod is running, you can exec nslookup in that environment:
|
||||
|
||||
```
|
||||
kubectl exec busybox -- nslookup kubernetes.default
|
||||
kubectl exec -ti busybox -- nslookup kubernetes.default
|
||||
```
|
||||
|
||||
You should see something like:
|
||||
|
@ -194,10 +223,10 @@ If the nslookup command fails, check the following:
|
|||
Take a look inside the resolv.conf file. (See "Inheriting DNS from the node" and "Known issues" below for more information)
|
||||
|
||||
```
|
||||
cat /etc/resolv.conf
|
||||
kubectl exec busybox cat /etc/resolv.conf
|
||||
```
|
||||
|
||||
Verify that the search path and name server are set up like the following (note that seach path may vary for different cloud providers):
|
||||
Verify that the search path and name server are set up like the following (note that search path may vary for different cloud providers):
|
||||
|
||||
```
|
||||
search default.svc.cluster.local svc.cluster.local cluster.local google.internal c.gce_project_id.internal
|
||||
|
@ -210,7 +239,7 @@ options ndots:5
|
|||
Errors such as the following indicate a problem with the kube-dns add-on or associated Services:
|
||||
|
||||
```
|
||||
$ kubectl exec busybox -- nslookup kubernetes.default
|
||||
$ kubectl exec -ti busybox -- nslookup kubernetes.default
|
||||
Server: 10.0.0.10
|
||||
Address 1: 10.0.0.10
|
||||
|
||||
|
@ -220,7 +249,7 @@ nslookup: can't resolve 'kubernetes.default'
|
|||
or
|
||||
|
||||
```
|
||||
$ kubectl exec busybox -- nslookup kubernetes.default
|
||||
$ kubectl exec -ti busybox -- nslookup kubernetes.default
|
||||
Server: 10.0.0.10
|
||||
Address 1: 10.0.0.10 kube-dns.kube-system.svc.cluster.local
|
||||
|
||||
|
@ -244,7 +273,7 @@ kube-dns-v19-ezo1y 3/3 Running 0
|
|||
...
|
||||
```
|
||||
|
||||
If you see that no pod is running or that the pod has failed/completed, the dns add-on may not be deployed by default in your current environment and you will have to deploy it manually.
|
||||
If you see that no pod is running or that the pod has failed/completed, the DNS add-on may not be deployed by default in your current environment and you will have to deploy it manually.
|
||||
|
||||
#### Check for Errors in the DNS pod
|
||||
|
||||
|
@ -258,7 +287,7 @@ kubectl logs --namespace=kube-system $(kubectl get pods --namespace=kube-system
|
|||
|
||||
See if there is any suspicious log. W, E, F letter at the beginning represent Warning, Error and Failure. Please search for entries that have these as the logging level and use [kubernetes issues](https://github.com/kubernetes/kubernetes/issues) to report unexpected errors.
|
||||
|
||||
#### Is dns service up?
|
||||
#### Is DNS service up?
|
||||
|
||||
Verify that the DNS service is up by using the `kubectl get service` command.
|
||||
|
||||
|
@ -277,7 +306,7 @@ kube-dns 10.0.0.10 <none> 53/UDP,53/TCP 1h
|
|||
|
||||
If you have created the service or in the case it should be created by default but it does not appear, see this [debugging services page](http://kubernetes.io/docs/user-guide/debugging-services/) for more information.
|
||||
|
||||
#### Are dns endpoints exposed?
|
||||
#### Are DNS endpoints exposed?
|
||||
|
||||
You can verify that dns endpoints are exposed by using the `kubectl get endpoints` command.
|
||||
|
||||
|
@ -348,7 +377,7 @@ some of those settings will be lost. As a partial workaround, the node can run
|
|||
`dnsmasq` which will provide more `nameserver` entries, but not more `search`
|
||||
entries. You can also use kubelet's `--resolv-conf` flag.
|
||||
|
||||
If you are using Alpine version 3.3 or earlier as your base image, dns may not
|
||||
If you are using Alpine version 3.3 or earlier as your base image, DNS may not
|
||||
work properly owing to a known issue with Alpine. Check [here](https://github.com/kubernetes/kubernetes/issues/30215)
|
||||
for more information.
|
||||
|
||||
|
|
|
@ -17,7 +17,7 @@ kubernetes manages lifecycle of all images through imageManager, with the cooper
|
|||
of cadvisor.
|
||||
|
||||
The policy for garbage collecting images takes two factors into consideration:
|
||||
`HighThresholdPercent` and `LowThresholdPercent`. Disk usage above the the high threshold
|
||||
`HighThresholdPercent` and `LowThresholdPercent`. Disk usage above the high threshold
|
||||
will trigger garbage collection. The garbage collection will delete least recently used images until the low
|
||||
threshold has been met.
|
||||
|
||||
|
|
|
@ -45,7 +45,7 @@ kube-controller-manager
|
|||
--concurrent_rc_syncs int32 The number of replication controllers that are allowed to sync concurrently. Larger number = more responsive replica management, but more CPU (and network) load (default 5)
|
||||
--configure-cloud-routes Should CIDRs allocated by allocate-node-cidrs be configured on the cloud provider. (default true)
|
||||
--controller-start-interval duration Interval between starting controller managers.
|
||||
--daemonset-lookup-cache-size int32 The the size of lookup cache for daemonsets. Larger number = more responsive daemonsets, but more MEM load. (default 1024)
|
||||
--daemonset-lookup-cache-size int32 The size of lookup cache for daemonsets. Larger number = more responsive daemonsets, but more MEM load. (default 1024)
|
||||
--deployment-controller-sync-period duration Period for syncing the deployments. (default 30s)
|
||||
--enable-dynamic-provisioning Enable dynamic provisioning for environments that support it. (default true)
|
||||
--enable-garbage-collector Enables the generic garbage collector. MUST be synced with the corresponding flag of the kube-apiserver. (default true)
|
||||
|
@ -89,8 +89,8 @@ StreamingProxyRedirects=true|false (ALPHA - default=false)
|
|||
--pv-recycler-pod-template-filepath-nfs string The file path to a pod definition used as a template for NFS persistent volume recycling
|
||||
--pv-recycler-timeout-increment-hostpath int32 the increment of time added per Gi to ActiveDeadlineSeconds for a HostPath scrubber pod. This is for development and testing only and will not work in a multi-node cluster. (default 30)
|
||||
--pvclaimbinder-sync-period duration The period for syncing persistent volumes and persistent volume claims (default 15s)
|
||||
--replicaset-lookup-cache-size int32 The the size of lookup cache for replicatsets. Larger number = more responsive replica management, but more MEM load. (default 4096)
|
||||
--replication-controller-lookup-cache-size int32 The the size of lookup cache for replication controllers. Larger number = more responsive replica management, but more MEM load. (default 4096)
|
||||
--replicaset-lookup-cache-size int32 The size of lookup cache for replicatsets. Larger number = more responsive replica management, but more MEM load. (default 4096)
|
||||
--replication-controller-lookup-cache-size int32 The size of lookup cache for replication controllers. Larger number = more responsive replica management, but more MEM load. (default 4096)
|
||||
--resource-quota-sync-period duration The period for syncing quota usage status in the system (default 5m0s)
|
||||
--root-ca-file string If set, this root certificate authority will be included in service account's token secret. This must be a valid PEM-encoded CA bundle.
|
||||
--route-reconciliation-period duration The period for reconciling routes created for Nodes by cloud provider. (default 10s)
|
||||
|
|
|
@ -17,35 +17,40 @@ This document describes how to authenticate and authorize access to the kubelet'
|
|||
## Kubelet authentication
|
||||
|
||||
By default, requests to the kubelet's HTTPS endpoint that are not rejected by other configured
|
||||
authentication methods are treated as anonymous requests, and given a username of `system:anonymous`
|
||||
authentication methods are treated as anonymous requests, and given a username of `system:anonymous`
|
||||
and a group of `system:unauthenticated`.
|
||||
|
||||
To disable anonymous access and send `401 Unauthorized` responses to unauthenticated requests:
|
||||
|
||||
* start the kubelet with the `--anonymous-auth=false` flag
|
||||
|
||||
To enable X509 client certificate authentication to the kubelet's HTTPS endpoint:
|
||||
* start the kubelet with the `--client-ca-file` flag, providing a CA bundle to verify client certificates with
|
||||
|
||||
* start the kubelet with the `--client-ca-file` flag, providing a CA bundle to verify client certificates with
|
||||
* start the apiserver with `--kubelet-client-certificate` and `--kubelet-client-key` flags
|
||||
* see the [apiserver authentication documentation](/docs/admin/authentication/#x509-client-certs) for more details
|
||||
|
||||
To enable API bearer tokens (including service account tokens) to be used to authenticate to the kubelet's HTTPS endpoint:
|
||||
|
||||
* ensure the `authentication.k8s.io/v1beta1` API group is enabled in the API server
|
||||
* start the kubelet with the `--authentication-token-webhook`, `--kubeconfig`, and `--require-kubeconfig` flags
|
||||
* the kubelet calls the `TokenReview` API on the configured API server to determine user information from bearer tokens
|
||||
* the kubelet calls the `TokenReview` API on the configured API server to determine user information from bearer tokens
|
||||
|
||||
## Kubelet authorization
|
||||
|
||||
Any request that is successfully authenticated (including an anonymous request) is then authorized. The default authorization mode is `AlwaysAllow`, which allows all requests.
|
||||
|
||||
There are many possible reasons to subdivide access to the kubelet API:
|
||||
|
||||
* anonymous auth is enabled, but anonymous users' ability to call the kubelet API should be limited
|
||||
* bearer token auth is enabled, but arbitrary API users' (like service accounts) ability to call the kubelet API should be limited
|
||||
* client certificate auth is enabled, but only some of the client certificates signed by the configured CA should be allowed to use the kubelet API
|
||||
|
||||
To subdivide access to the kubelet API, delegate authorization to the API server:
|
||||
|
||||
* ensure the `authorization.k8s.io/v1beta1` API group is enabled in the API server
|
||||
* start the kubelet with the `--authorization-mode=Webhook`, `--kubeconfig`, and `--require-kubeconfig` flags
|
||||
* the kubelet calls the `SubjectAccessReview` API on the configured API server to determine whether each request is authorized
|
||||
* the kubelet calls the `SubjectAccessReview` API on the configured API server to determine whether each request is authorized
|
||||
|
||||
The kubelet authorizes API requests using the same [request attributes](/docs/admin/authorization/#request-attributes) approach as the apiserver.
|
||||
|
||||
|
@ -63,19 +68,20 @@ The resource and subresource is determined from the incoming request's path:
|
|||
|
||||
Kubelet API | resource | subresource
|
||||
-------------|----------|------------
|
||||
/stats/* | nodes | stats
|
||||
/metrics/* | nodes | metrics
|
||||
/logs/* | nodes | log
|
||||
/spec/* | nodes | spec
|
||||
/stats/\* | nodes | stats
|
||||
/metrics/\* | nodes | metrics
|
||||
/logs/\* | nodes | log
|
||||
/spec/\* | nodes | spec
|
||||
*all others* | nodes | proxy
|
||||
|
||||
The namespace and API group attributes are always an empty string, and
|
||||
The namespace and API group attributes are always an empty string, and
|
||||
the resource name is always the name of the kubelet's `Node` API object.
|
||||
|
||||
When running in this mode, ensure the user identified by the `--kubelet-client-certificate` and `--kubelet-client-key`
|
||||
When running in this mode, ensure the user identified by the `--kubelet-client-certificate` and `--kubelet-client-key`
|
||||
flags passed to the apiserver is authorized for the following attributes:
|
||||
* verb=*, resource=nodes, subresource=proxy
|
||||
* verb=*, resource=nodes, subresource=stats
|
||||
* verb=*, resource=nodes, subresource=log
|
||||
* verb=*, resource=nodes, subresource=spec
|
||||
* verb=*, resource=nodes, subresource=metrics
|
||||
|
||||
* verb=\*, resource=nodes, subresource=proxy
|
||||
* verb=\*, resource=nodes, subresource=stats
|
||||
* verb=\*, resource=nodes, subresource=log
|
||||
* verb=\*, resource=nodes, subresource=spec
|
||||
* verb=\*, resource=nodes, subresource=metrics
|
||||
|
|
|
@ -17,7 +17,7 @@ various mechanisms (primarily through the apiserver) and ensures that the contai
|
|||
described in those PodSpecs are running and healthy. The kubelet doesn't manage
|
||||
containers which were not created by Kubernetes.
|
||||
|
||||
Other than from an PodSpec from the apiserver, there are three ways that a container
|
||||
Other than from a PodSpec from the apiserver, there are three ways that a container
|
||||
manifest can be provided to the Kubelet.
|
||||
|
||||
File: Path passed as a flag on the command line. This file is rechecked every 20
|
||||
|
|
|
@ -57,6 +57,7 @@ The plugin requires a few things:
|
|||
* The standard CNI `bridge`, `lo` and `host-local` plugins are required, at minimum version 0.2.0. Kubenet will first search for them in `/opt/cni/bin`. Specify `network-plugin-dir` to supply additional search path. The first found match will take effect.
|
||||
* Kubelet must be run with the `--network-plugin=kubenet` argument to enable the plugin
|
||||
* Kubelet must also be run with the `--reconcile-cidr` argument to ensure the IP subnet assigned to the node by configuration or the controller-manager is propagated to the plugin
|
||||
* Kubelet should also be run with the `--non-masquerade-cidr=<clusterCidr>` argumment to ensure traffic to IPs outside this range will use IP masquerade.
|
||||
* The node must be assigned an IP subnet through either the `--pod-cidr` kubelet command-line option or the `--allocate-node-cidrs=true --cluster-cidr=<cidr>` controller-manager command-line options.
|
||||
|
||||
### Customizing the MTU (with kubenet)
|
||||
|
|
|
@ -181,6 +181,14 @@ The Nuage platform uses overlays to provide seamless policy-based networking bet
|
|||
complicated way to build an overlay network. This is endorsed by several of the
|
||||
"Big Shops" for networking.
|
||||
|
||||
### OVN (Open Virtual Networking)
|
||||
|
||||
OVN is an opensource network virtualization solution developed by the
|
||||
Open vSwitch community. It lets one create logical switches, logical routers,
|
||||
stateful ACLs, load-balancers etc to build different virtual networking
|
||||
topologies. The project has a specific Kubernetes plugin and documentation
|
||||
at [ovn-kubernetes](https://github.com/openvswitch/ovn-kubernetes).
|
||||
|
||||
### Project Calico
|
||||
|
||||
[Project Calico](http://docs.projectcalico.org/) is an open source container networking provider and network policy engine.
|
||||
|
|
|
@ -186,7 +186,7 @@ Modifications include setting labels on the node and marking it unschedulable.
|
|||
Labels on nodes can be used in conjunction with node selectors on pods to control scheduling,
|
||||
e.g. to constrain a pod to only be eligible to run on a subset of the nodes.
|
||||
|
||||
Marking a node as unscheduleable will prevent new pods from being scheduled to that
|
||||
Marking a node as unschedulable will prevent new pods from being scheduled to that
|
||||
node, but will not affect any existing pods on the node. This is useful as a
|
||||
preparatory step before a node reboot, etc. For example, to mark a node
|
||||
unschedulable, run this command:
|
||||
|
|
|
@ -349,7 +349,7 @@ in favor of the simpler configuation supported around eviction.
|
|||
The `kubelet` currently polls `cAdvisor` to collect memory usage stats at a regular interval. If memory usage
|
||||
increases within that window rapidly, the `kubelet` may not observe `MemoryPressure` fast enough, and the `OOMKiller`
|
||||
will still be invoked. We intend to integrate with the `memcg` notification API in a future release to reduce this
|
||||
latency, and instead have the kernel tell us when a threshold has been crossed immmediately.
|
||||
latency, and instead have the kernel tell us when a threshold has been crossed immediately.
|
||||
|
||||
If you are not trying to achieve extreme utilization, but a sensible measure of overcommit, a viable workaround for
|
||||
this issue is to set eviction thresholds at approximately 75% capacity. This increases the ability of this feature
|
||||
|
|
|
@ -36,7 +36,7 @@ Each critical add-on has to tolerate it,
|
|||
the other pods shouldn't tolerate the taint. The tain is removed once the add-on is successfully scheduled.
|
||||
|
||||
*Warning:* currently there is no guarantee which node is chosen and which pods are being killed
|
||||
in order to schedule crical pod, so if rescheduler is enabled you pods might be occasionally
|
||||
in order to schedule critical pods, so if rescheduler is enabled you pods might be occasionally
|
||||
killed for this purpose.
|
||||
|
||||
## Config
|
||||
|
|
|
@ -52,8 +52,7 @@ Resource Quota is enforced in a particular namespace when there is a
|
|||
|
||||
## Compute Resource Quota
|
||||
|
||||
You can limit the total sum of [compute resources](/docs/user-guide/compute-resources) and [storage resources](/docs/user-guide/persistent-volumes)
|
||||
that can be requested in a given namespace.
|
||||
You can limit the total sum of [compute resources](/docs/user-guide/compute-resources) that can be requested in a given namespace.
|
||||
|
||||
The following resource types are supported:
|
||||
|
||||
|
@ -65,7 +64,25 @@ The following resource types are supported:
|
|||
| `memory` | Across all pods in a non-terminal state, the sum of memory requests cannot exceed this value. |
|
||||
| `requests.cpu` | Across all pods in a non-terminal state, the sum of CPU requests cannot exceed this value. |
|
||||
| `requests.memory` | Across all pods in a non-terminal state, the sum of memory requests cannot exceed this value. |
|
||||
|
||||
## Storage Resource Quota
|
||||
|
||||
You can limit the total sum of [storage resources](/docs/user-guide/persistent-volumes) that can be requested in a given namespace.
|
||||
|
||||
In addition, you can limit consumption of storage resources based on associated storage-class.
|
||||
|
||||
| Resource Name | Description |
|
||||
| --------------------- | ----------------------------------------------------------- |
|
||||
| `requests.storage` | Across all persistent volume claims, the sum of storage requests cannot exceed this value. |
|
||||
| `persistentvolumeclaims` | The total number of [persistent volume claims](/docs/user-guide/persistent-volumes/#persistentvolumeclaims) that can exist in the namespace. |
|
||||
| `<storage-class-name>.storageclass.storage.k8s.io/requests.storage` | Across all persistent volume claims associated with the storage-class-name, the sum of storage requests cannot exceed this value. |
|
||||
| `<storage-class-name>.storageclass.storage.k8s.io/persistentvolumeclaims` | Across all persistent volume claims associated with the storage-class-name, the total number of [persistent volume claims](/docs/user-guide/persistent-volumes/#persistentvolumeclaims) that can exist in the namespace. |
|
||||
|
||||
For example, if an operator wants to quota storage with `gold` storage class separate from `bronze` storage class, the operator can
|
||||
define a quota as follows:
|
||||
|
||||
* `gold.storageclass.storage.k8s.io/requests.storage: 500Gi`
|
||||
* `bronze.storageclass.storage.k8s.io/requests.storage: 100Gi`
|
||||
|
||||
## Object Count Quota
|
||||
|
||||
|
@ -125,7 +142,7 @@ The quota can be configured to quota either value.
|
|||
|
||||
If the quota has a value specified for `requests.cpu` or `requests.memory`, then it requires that every incoming
|
||||
container makes an explicit request for those resources. If the quota has a value specified for `limits.cpu` or `limits.memory`,
|
||||
then it requires that every incoming container specifies an explict limit for those resources.
|
||||
then it requires that every incoming container specifies an explicit limit for those resources.
|
||||
|
||||
## Viewing and Setting Quotas
|
||||
|
||||
|
|
|
@ -232,7 +232,7 @@ services.loadbalancers 0 2
|
|||
services.nodeports 0 0
|
||||
```
|
||||
|
||||
As you can see, the pod that was created is consuming explict amounts of compute resources, and the usage is being
|
||||
As you can see, the pod that was created is consuming explicit amounts of compute resources, and the usage is being
|
||||
tracked by Kubernetes properly.
|
||||
|
||||
## Step 5: Advanced quota scopes
|
||||
|
|
|
@ -8,6 +8,7 @@ Use the following reference docs to understand the kubernetes REST API for vario
|
|||
* extensions/v1beta1: [operations](/docs/api-reference/extensions/v1beta1/operations.html), [model definitions](/docs/api-reference/extensions/v1beta1/definitions.html)
|
||||
* batch/v1: [operations](/docs/api-reference/batch/v1/operations.html), [model definitions](/docs/api-reference/batch/v1/definitions.html)
|
||||
* autoscaling/v1: [operations](/docs/api-reference/autoscaling/v1/operations.html), [model definitions](/docs/api-reference/autoscaling/v1/definitions.html)
|
||||
* apps/v1beta1: [operations](/docs/api-reference/apps/v1beta1/operations.html), [model definitions](/docs/api-reference/apps/v1beta1/definitions.html)
|
||||
|
||||
|
||||
<!-- BEGIN MUNGE: GENERATED_ANALYTICS -->
|
||||
|
|
|
@ -24,11 +24,13 @@ In our experience, any system that is successful needs to grow and change as new
|
|||
|
||||
What constitutes a compatible change and how to change the API are detailed by the [API change document](https://github.com/kubernetes/kubernetes/tree/{{page.githubbranch}}/docs/devel/api_changes.md).
|
||||
|
||||
## API Swagger definitions
|
||||
## OpenAPI and Swagger definitions
|
||||
|
||||
Complete API details are documented using [Swagger v1.2](http://swagger.io/). The Kubernetes apiserver (aka "master") exposes an API that can be used to retrieve the Swagger Kubernetes API spec located at `/swaggerapi`. You can also enable a UI to browse the API documentation at `/swagger-ui` by passing the `--enable-swagger-ui=true` flag to apiserver.
|
||||
Complete API details are documented using [Swagger v1.2](http://swagger.io/) and [OpenAPI](https://www.openapis.org/). The Kubernetes apiserver (aka "master") exposes an API that can be used to retrieve the Swagger v1.2 Kubernetes API spec located at `/swaggerapi`. You can also enable a UI to browse the API documentation at `/swagger-ui` by passing the `--enable-swagger-ui=true` flag to apiserver.
|
||||
|
||||
We also host a version of the [latest API documentation](http://kubernetes.io/docs/api-reference/README/). This is updated with the latest release, so if you are using a different version of Kubernetes you will want to use the spec from your apiserver.
|
||||
We also host a version of the [latest v1.2 API documentation UI](http://kubernetes.io/kubernetes/third_party/swagger-ui/). This is updated with the latest release, so if you are using a different version of Kubernetes you will want to use the spec from your apiserver.
|
||||
|
||||
Staring kubernetes 1.4, OpenAPI spec is also available at `/swagger.json`. While we are transitioning from Swagger v1.2 to OpenAPI (aka Swagger v2.0), some of the tools such as kubectl and swagger-ui are still using v1.2 spec. OpenAPI spec is in Beta as of Kubernetes 1.5.
|
||||
|
||||
Kubernetes implements an alternative Protobuf based serialization format for the API that is primarily intended for intra-cluster communication, documented in the [design proposal](https://github.com/kubernetes/kubernetes/blob/{{ page.githubbranch }}/docs/proposals/protobuf.md) and the IDL files for each schema are located in the Go packages that define the API objects.
|
||||
|
||||
|
|
|
@ -122,7 +122,7 @@ image format is SVG.
|
|||
{% capture whatsnext %}
|
||||
* Learn about [using page templates](/docs/contribute/page-templates/).
|
||||
* Learn about [staging your changes](/docs/contribute/stage-documentation-changes).
|
||||
* Learn about [creating a pull request](/docs/contribute/write-new-topic).
|
||||
* Learn about [creating a pull request](/docs/contribute/create-pull-request/).
|
||||
{% endcapture %}
|
||||
|
||||
{% include templates/task.md %}
|
||||
|
|
|
@ -78,9 +78,10 @@ KUBE_ALLOW_PRIV="--allow-privileged=false"
|
|||
KUBE_MASTER="--master=http://centos-master:8080"
|
||||
```
|
||||
|
||||
* Disable the firewall on the master and all the nodes, as docker does not play well with other firewall rule managers
|
||||
* Disable the firewall on the master and all the nodes, as docker does not play well with other firewall rule managers. CentOS won't let you disable the firewall as long as SELinux is enforcing, so that needs to be disabled first.
|
||||
|
||||
```shell
|
||||
setenforce 0
|
||||
systemctl disable iptables-services firewalld
|
||||
systemctl stop iptables-services firewalld
|
||||
```
|
||||
|
@ -118,10 +119,11 @@ KUBE_SERVICE_ADDRESSES="--service-cluster-ip-range=10.254.0.0/16"
|
|||
KUBE_API_ARGS=""
|
||||
```
|
||||
|
||||
* Configure ETCD to hold the network overlay configuration on master:
|
||||
* Start ETCD and configure it to hold the network overlay configuration on master:
|
||||
**Warning** This network must be unused in your network infrastructure! `172.30.0.0/16` is free in our network.
|
||||
|
||||
```shell
|
||||
$ systemctl start etcd
|
||||
$ etcdctl mkdir /kube-centos/network
|
||||
$ etcdctl mk /kube-centos/network/config "{ \"Network\": \"172.30.0.0/16\", \"SubnetLen\": 24, \"Backend\": { \"Type\": \"vxlan\" } }"
|
||||
```
|
||||
|
@ -164,7 +166,8 @@ KUBELET_ADDRESS="--address=0.0.0.0"
|
|||
KUBELET_PORT="--port=10250"
|
||||
|
||||
# You may leave this blank to use the actual hostname
|
||||
KUBELET_HOSTNAME="--hostname-override=centos-minion-n" # Check the node number!
|
||||
# Check the node number!
|
||||
KUBELET_HOSTNAME="--hostname-override=centos-minion-n"
|
||||
|
||||
# Location of the api-server
|
||||
KUBELET_API_SERVER="--api-servers=http://centos-master:8080"
|
||||
|
@ -228,4 +231,3 @@ IaaS Provider | Config. Mgmt | OS | Networking | Docs
|
|||
Bare-metal | custom | CentOS | flannel | [docs](/docs/getting-started-guides/centos/centos_manual_config) | | Community ([@coolsvap](https://github.com/coolsvap))
|
||||
|
||||
For support level information on all solutions, see the [Table of solutions](/docs/getting-started-guides/#table-of-solutions) chart.
|
||||
|
||||
|
|
|
@ -37,7 +37,7 @@ master,etcd = kube-master.example.com
|
|||
If not
|
||||
|
||||
```shell
|
||||
yum install -y ansible git python-netaddr
|
||||
dnf install -y ansible git python-netaddr
|
||||
```
|
||||
|
||||
**Now clone down the Kubernetes repository**
|
||||
|
|
|
@ -11,7 +11,7 @@ title: Fedora (Single Node)
|
|||
|
||||
## Prerequisites
|
||||
|
||||
1. You need 2 or more machines with Fedora installed.
|
||||
1. You need 2 or more machines with Fedora installed. These can be either bare metal machines or virtual machines.
|
||||
|
||||
## Instructions
|
||||
|
||||
|
@ -33,18 +33,16 @@ fed-node = 192.168.121.65
|
|||
**Prepare the hosts:**
|
||||
|
||||
* Install Kubernetes on all hosts - fed-{master,node}. This will also pull in docker. Also install etcd on fed-master. This guide has been tested with kubernetes-0.18 and beyond.
|
||||
* The [--enablerepo=updates-testing](https://fedoraproject.org/wiki/QA:Updates_Testing) directive in the yum command below will ensure that the most recent Kubernetes version that is scheduled for pre-release will be installed. This should be a more recent version than the Fedora "stable" release for Kubernetes that you would get without adding the directive.
|
||||
* If you want the very latest Kubernetes release [you can download and yum install the RPM directly from Fedora Koji](http://koji.fedoraproject.org/koji/packageinfo?packageID=19202) instead of using the yum install command below.
|
||||
* Running on AWS EC2 with RHEL 7.2, you need to enable "extras" repository for yum by editing `/etc/yum.repos.d/redhat-rhui.repo` and changing the changing the `enable=0` to `enable=1` for extras.
|
||||
|
||||
```shell
|
||||
yum -y install --enablerepo=updates-testing kubernetes
|
||||
dnf -y install kubernetes
|
||||
```
|
||||
|
||||
* Install etcd and iptables
|
||||
* Install etcd
|
||||
|
||||
```shell
|
||||
yum -y install etcd iptables
|
||||
dnf -y install etcd
|
||||
```
|
||||
|
||||
* Add master and node to /etc/hosts on all machines (not needed if hostnames already in DNS). Make sure that communication works between fed-master and fed-node by using a utility such as ping.
|
||||
|
@ -54,20 +52,12 @@ echo "192.168.121.9 fed-master
|
|||
192.168.121.65 fed-node" >> /etc/hosts
|
||||
```
|
||||
|
||||
* Edit /etc/kubernetes/config which will be the same on all hosts (master and node) to contain:
|
||||
* Edit /etc/kubernetes/config (which should be the same on all hosts) to set
|
||||
the name of the master server:
|
||||
|
||||
```shell
|
||||
# Comma separated list of nodes in the etcd cluster
|
||||
KUBE_MASTER="--master=http://fed-master:8080"
|
||||
|
||||
# logging to stderr means we get it in the systemd journal
|
||||
KUBE_LOGTOSTDERR="--logtostderr=true"
|
||||
|
||||
# journal message level, 0 is debug
|
||||
KUBE_LOG_LEVEL="--v=0"
|
||||
|
||||
# Should this cluster be allowed to run privileged docker containers
|
||||
KUBE_ALLOW_PRIV="--allow-privileged=false"
|
||||
```
|
||||
|
||||
* Disable the firewall on both the master and node, as docker does not play well with other firewall rule managers. Please note that iptables-services does not exist on default fedora server install.
|
||||
|
@ -86,7 +76,7 @@ systemctl stop iptables-services firewalld
|
|||
KUBE_API_ADDRESS="--address=0.0.0.0"
|
||||
|
||||
# Comma separated list of nodes in the etcd cluster
|
||||
KUBE_ETCD_SERVERS="--etcd-servers=http://127.0.0.1:4001"
|
||||
KUBE_ETCD_SERVERS="--etcd-servers=http://127.0.0.1:2379"
|
||||
|
||||
# Address range to use for services
|
||||
KUBE_SERVICE_ADDRESSES="--service-cluster-ip-range=10.254.0.0/16"
|
||||
|
@ -95,18 +85,10 @@ KUBE_SERVICE_ADDRESSES="--service-cluster-ip-range=10.254.0.0/16"
|
|||
KUBE_API_ARGS=""
|
||||
```
|
||||
|
||||
* Edit /etc/etcd/etcd.conf,let the etcd to listen all the ip instead of 127.0.0.1, if not, you will get the error like "connection refused". Note that Fedora 22 uses etcd 2.0, One of the changes in etcd 2.0 is that now uses port 2379 and 2380 (as opposed to etcd 0.46 which userd 4001 and 7001).
|
||||
* Edit /etc/etcd/etcd.conf to let etcd listen on all available IPs instead of 127.0.0.1; If you have not done this, you might see an error such as "connection refused".
|
||||
|
||||
```shell
|
||||
ETCD_LISTEN_CLIENT_URLS="http://0.0.0.0:4001"
|
||||
```
|
||||
|
||||
* Create /var/run/kubernetes on master:
|
||||
|
||||
```shell
|
||||
mkdir /var/run/kubernetes
|
||||
chown kube:kube /var/run/kubernetes
|
||||
chmod 750 /var/run/kubernetes
|
||||
ETCD_LISTEN_CLIENT_URLS="http://0.0.0.0:2379"
|
||||
```
|
||||
|
||||
* Start the appropriate services on master:
|
||||
|
|
|
@ -50,13 +50,19 @@ etcdctl get /coreos.com/network/config
|
|||
|
||||
**Perform following commands on all Kubernetes nodes**
|
||||
|
||||
Install the flannel package
|
||||
|
||||
```shell
|
||||
# dnf -y install flannel
|
||||
```
|
||||
|
||||
Edit the flannel configuration file /etc/sysconfig/flanneld as follows:
|
||||
|
||||
```shell
|
||||
# Flanneld configuration options
|
||||
|
||||
# etcd url location. Point this to the server where etcd runs
|
||||
FLANNEL_ETCD="http://fed-master:4001"
|
||||
FLANNEL_ETCD="http://fed-master:2379"
|
||||
|
||||
# etcd config key. This is the configuration key that flannel queries
|
||||
# For address range assignment
|
||||
|
@ -105,7 +111,7 @@ Now check the interfaces on the nodes. Notice there is now a flannel.1 interface
|
|||
From any node in the cluster, check the cluster members by issuing a query to etcd server via curl (only partial output is shown using `grep -E "\{|\}|key|value"`). If you set up a 1 master and 3 nodes cluster, you should see one block for each node showing the subnets they have been assigned. You can associate those subnets to each node by the MAC address (VtepMAC) and IP address (Public IP) that is listed in the output.
|
||||
|
||||
```shell
|
||||
curl -s http://fed-master:4001/v2/keys/coreos.com/network/subnets | python -mjson.tool
|
||||
curl -s http://fed-master:2379/v2/keys/coreos.com/network/subnets | python -mjson.tool
|
||||
```
|
||||
|
||||
```json
|
||||
|
@ -149,7 +155,7 @@ bash-4.3#
|
|||
This will place you inside the container. Install iproute and iputils packages to install ip and ping utilities. Due to a [bug](https://bugzilla.redhat.com/show_bug.cgi?id=1142311), it is required to modify capabilities of ping binary to work around "Operation not permitted" error.
|
||||
|
||||
```shell
|
||||
bash-4.3# yum -y install iproute iputils
|
||||
bash-4.3# dnf -y install iproute iputils
|
||||
bash-4.3# setcap cap_net_raw-ep /usr/bin/ping
|
||||
```
|
||||
|
||||
|
|
|
@ -60,6 +60,7 @@ few commands, and have active community support.
|
|||
- [Azure](/docs/getting-started-guides/azure)
|
||||
- [CenturyLink Cloud](/docs/getting-started-guides/clc)
|
||||
- [IBM SoftLayer](https://github.com/patrocinio/kubernetes-softlayer)
|
||||
- [Stackpoint.io](/docs/getting-started-guides/stackpoint/)
|
||||
|
||||
### Custom Solutions
|
||||
|
||||
|
|
|
@ -19,7 +19,7 @@ The installation uses a tool called `kubeadm` which is part of Kubernetes.
|
|||
This process works with local VMs, physical servers and/or cloud servers.
|
||||
It is simple enough that you can easily integrate its use into your own automation (Terraform, Chef, Puppet, etc).
|
||||
|
||||
See the full [`kubeadm` reference](/docs/admin/kubeadm) for information on all `kubeadm` command-line flags and for advice on automating `kubeadm` itself.
|
||||
See the full `kubeadm` [reference](/docs/admin/kubeadm) for information on all `kubeadm` command-line flags and for advice on automating `kubeadm` itself.
|
||||
|
||||
**The `kubeadm` tool is currently in alpha but please try it out and give us [feedback](/docs/getting-started-guides/kubeadm/#feedback)!
|
||||
Be sure to read the [limitations](#limitations); in particular note that kubeadm doesn't have great support for
|
||||
|
@ -69,18 +69,18 @@ For each host in turn:
|
|||
* SSH into the machine and become `root` if you are not already (for example, run `sudo su -`).
|
||||
* If the machine is running Ubuntu or HypriotOS, run:
|
||||
|
||||
# curl -s https://packages.cloud.google.com/apt/doc/apt-key.gpg | apt-key add -
|
||||
# cat <<EOF > /etc/apt/sources.list.d/kubernetes.list
|
||||
curl -s https://packages.cloud.google.com/apt/doc/apt-key.gpg | apt-key add -
|
||||
cat <<EOF > /etc/apt/sources.list.d/kubernetes.list
|
||||
deb http://apt.kubernetes.io/ kubernetes-xenial main
|
||||
EOF
|
||||
# apt-get update
|
||||
# # Install docker if you don't have it already.
|
||||
# apt-get install -y docker.io
|
||||
# apt-get install -y kubelet kubeadm kubectl kubernetes-cni
|
||||
apt-get update
|
||||
# Install docker if you don't have it already.
|
||||
apt-get install -y docker.io
|
||||
apt-get install -y kubelet kubeadm kubectl kubernetes-cni
|
||||
|
||||
If the machine is running CentOS, run:
|
||||
|
||||
# cat <<EOF > /etc/yum.repos.d/kubernetes.repo
|
||||
cat <<EOF > /etc/yum.repos.d/kubernetes.repo
|
||||
[kubernetes]
|
||||
name=Kubernetes
|
||||
baseurl=http://yum.kubernetes.io/repos/kubernetes-el7-x86_64
|
||||
|
@ -90,10 +90,10 @@ For each host in turn:
|
|||
gpgkey=https://packages.cloud.google.com/yum/doc/yum-key.gpg
|
||||
https://packages.cloud.google.com/yum/doc/rpm-package-key.gpg
|
||||
EOF
|
||||
# setenforce 0
|
||||
# yum install -y docker kubelet kubeadm kubectl kubernetes-cni
|
||||
# systemctl enable docker && systemctl start docker
|
||||
# systemctl enable kubelet && systemctl start kubelet
|
||||
setenforce 0
|
||||
yum install -y docker kubelet kubeadm kubectl kubernetes-cni
|
||||
systemctl enable docker && systemctl start docker
|
||||
systemctl enable kubelet && systemctl start kubelet
|
||||
|
||||
The kubelet is now restarting every few seconds, as it waits in a crashloop for `kubeadm` to tell it what to do.
|
||||
|
||||
|
|
|
@ -116,7 +116,7 @@ plugins, if required.
|
|||
|
||||
### Reusing the Docker daemon
|
||||
|
||||
When using a single VM of kubernetes its really handy to reuse the Docker daemon inside the VM; as this means you don't have to build on your host machine and push the image into a docker registry - you can just build inside the same docker daemon as minikube which speeds up local experiments.
|
||||
When using a single VM of kubernetes, it's really handy to reuse the minikube's built-in Docker daemon; as this means you don't have to build a docker registry on your host machine and push the image into it - you can just build inside the same docker daemon as minikube which speeds up local experiments. Just make sure you tag your Docker image with something other than 'latest' and use that tag while you pull the image. Otherwise, if you do not specify version of your image, it will be assumed as `:latest`, with pull image policy of `Always` correspondingly, which may eventually result in `ErrImagePull` as you may not have any versions of your Docker image out there in the default docker registry (usually DockerHub) yet.
|
||||
|
||||
To be able to work with the docker daemon on your mac/linux host use the [docker-env command](./docs/minikube_docker-env.md) in your shell:
|
||||
|
||||
|
|
|
@ -646,7 +646,7 @@ This pod mounts several node file system directories using the `hostPath` volum
|
|||
|
||||
Apiserver supports several cloud providers.
|
||||
|
||||
- options for `--cloud-provider` flag are `aws`, `gce`, `mesos`, `openshift`, `ovirt`, `rackspace`, `vagrant`, or unset.
|
||||
- options for `--cloud-provider` flag are `aws`, `azure`, `cloudstack`, `fake`, `gce`, `mesos`, `openstack`, `ovirt`, `photon`, `rackspace`, `vsphere`, or unset.
|
||||
- unset used for e.g. bare metal setups.
|
||||
- support for new IaaS is added by contributing code [here](https://releases.k8s.io/{{page.githubbranch}}/pkg/cloudprovider/providers)
|
||||
|
||||
|
|
|
@ -0,0 +1,203 @@
|
|||
---
|
||||
assignees:
|
||||
- baldwinspc
|
||||
|
||||
---
|
||||
|
||||
* TOC
|
||||
{:toc}
|
||||
|
||||
|
||||
## Introduction
|
||||
|
||||
StackPointCloud is the universal control plane for Kubernetes Anywhere. StackPointCloud allows you to deploy and manage a Kubernetes cluster to the cloud provider of your choice in 3 steps using a web-based interface.
|
||||
|
||||
## AWS
|
||||
|
||||
To create a Kubernetes cluster on AWS, you will need an Access Key ID and a Secret Access Key from AWS.
|
||||
|
||||
### Choose a Provider
|
||||
|
||||
Log in to [stackpoint.io](https://stackpoint.io) with a GitHub, Google, or Twitter account.
|
||||
|
||||
Click **+ADD A CLUSTER NOW**.
|
||||
|
||||
Click to select Amazon Web Services (AWS).
|
||||
|
||||
### Configure Your Provider
|
||||
|
||||
Add your Access Key ID and a Secret Access Key from AWS. Select your default StackPointCloud SSH keypair, or click **ADD SSH KEY** to add a new keypair.
|
||||
|
||||
Click **SUBMIT** to submit the authorization information.
|
||||
|
||||
### Configure Your Cluster
|
||||
|
||||
Choose any extra options you may want to include with your cluster, then click **SUBMIT** to create the cluster.
|
||||
|
||||
### Running the Cluster
|
||||
|
||||
You can monitor the status of your cluster and suspend or delete it from [your stackpoint.io dashboard](https://stackpoint.io/#/clusters).
|
||||
|
||||
For information on using and managing a Kubernetes cluster on AWS, [consult the Kubernetes documentation](http://kubernetes.io/docs/getting-started-guides/aws/).
|
||||
|
||||
|
||||
|
||||
|
||||
## GCE
|
||||
|
||||
To create a Kubernetes cluster on GCE, you will need the Service Account JSON Data from Google.
|
||||
|
||||
|
||||
### Choose a Provider
|
||||
|
||||
Log in to [stackpoint.io](https://stackpoint.io) with a GitHub, Google, or Twitter account.
|
||||
|
||||
Click **+ADD A CLUSTER NOW**.
|
||||
|
||||
Click to select Google Compute Engine (GCE).
|
||||
|
||||
### Configure Your Provider
|
||||
|
||||
Add your Service Account JSON Data from Google. Select your default StackPointCloud SSH keypair, or click **ADD SSH KEY** to add a new keypair.
|
||||
|
||||
Click **SUBMIT** to submit the authorization information.
|
||||
|
||||
### Configure Your Cluster
|
||||
|
||||
Choose any extra options you may want to include with your cluster, then click **SUBMIT** to create the cluster.
|
||||
|
||||
### Running the Cluster
|
||||
|
||||
You can monitor the status of your cluster and suspend or delete it from [your stackpoint.io dashboard](https://stackpoint.io/#/clusters).
|
||||
|
||||
For information on using and managing a Kubernetes cluster on GCE, [consult the Kubernetes documentation](http://kubernetes.io/docs/getting-started-guides/gce).
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
## GKE
|
||||
|
||||
To create a Kubernetes cluster on GKE, you will need the Service Account JSON Data from Google.
|
||||
|
||||
### Choose a Provider
|
||||
|
||||
Log in to [stackpoint.io](https://stackpoint.io) with a GitHub, Google, or Twitter account.
|
||||
|
||||
Click **+ADD A CLUSTER NOW**.
|
||||
|
||||
Click to select Google Container Engine (GKE).
|
||||
|
||||
### Configure Your Provider
|
||||
|
||||
Add your Service Account JSON Data from Google. Select your default StackPointCloud SSH keypair, or click **ADD SSH KEY** to add a new keypair.
|
||||
|
||||
Click **SUBMIT** to submit the authorization information.
|
||||
|
||||
### Configure Your Cluster
|
||||
|
||||
Choose any extra options you may want to include with your cluster, then click **SUBMIT** to create the cluster.
|
||||
|
||||
|
||||
### Running the Cluster
|
||||
|
||||
You can monitor the status of your cluster and suspend or delete it from [your stackpoint.io dashboard](https://stackpoint.io/#/clusters).
|
||||
|
||||
For information on using and managing a Kubernetes cluster on GKE, consult [the official documentation](http://kubernetes.io/docs/).
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
## DigitalOcean
|
||||
|
||||
To create a Kubernetes cluster on DigitalOcean, you will need a DigitalOcean API Token.
|
||||
|
||||
### Choose a Provider
|
||||
|
||||
Log in to [stackpoint.io](https://stackpoint.io) with a GitHub, Google, or Twitter account.
|
||||
|
||||
Click **+ADD A CLUSTER NOW**.
|
||||
|
||||
Click to select DigitalOcean.
|
||||
|
||||
### Configure Your Provider
|
||||
|
||||
Add your DigitalOcean API Token. Select your default StackPointCloud SSH keypair, or click **ADD SSH KEY** to add a new keypair.
|
||||
|
||||
Click **SUBMIT** to submit the authorization information.
|
||||
|
||||
### Configure Your Cluster
|
||||
|
||||
Choose any extra options you may want to include with your cluster, then click **SUBMIT** to create the cluster.
|
||||
|
||||
### Running the Cluster
|
||||
|
||||
You can monitor the status of your cluster and suspend or delete it from [your stackpoint.io dashboard](https://stackpoint.io/#/clusters).
|
||||
|
||||
For information on using and managing a Kubernetes cluster on DigitalOcean, consult [the official documentation](http://kubernetes.io/docs/).
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
## Microsoft Azure
|
||||
|
||||
To create a Kubernetes cluster on Microsoft Azure, you will need an Azure Subscription ID, Username/Email, and Password.
|
||||
|
||||
### Choose a Provider
|
||||
|
||||
Log in to [stackpoint.io](https://stackpoint.io) with a GitHub, Google, or Twitter account.
|
||||
|
||||
Click **+ADD A CLUSTER NOW**.
|
||||
|
||||
Click to select Microsoft Azure.
|
||||
|
||||
### Configure Your Provider
|
||||
|
||||
Add your Azure Subscription ID, Username/Email, and Password. Select your default StackPointCloud SSH keypair, or click **ADD SSH KEY** to add a new keypair.
|
||||
|
||||
Click **SUBMIT** to submit the authorization information.
|
||||
|
||||
### Configure Your Cluster
|
||||
|
||||
Choose any extra options you may want to include with your cluster, then click **SUBMIT** to create the cluster.
|
||||
|
||||
|
||||
### Running the Cluster
|
||||
|
||||
You can monitor the status of your cluster and suspend or delete it from [your stackpoint.io dashboard](https://stackpoint.io/#/clusters).
|
||||
|
||||
For information on using and managing a Kubernetes cluster on Azure, [consult the Kubernetes documentation](http://kubernetes.io/docs/getting-started-guides/azure/).
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
## Packet
|
||||
|
||||
To create a Kubernetes cluster on Packet, you will need a Packet API Key.
|
||||
|
||||
### Choose a Provider
|
||||
|
||||
Log in to [stackpoint.io](https://stackpoint.io) with a GitHub, Google, or Twitter account.
|
||||
|
||||
Click **+ADD A CLUSTER NOW**.
|
||||
|
||||
Click to select Packet.
|
||||
|
||||
### Configure Your Provider
|
||||
|
||||
Add your Packet API Key. Select your default StackPointCloud SSH keypair, or click **ADD SSH KEY** to add a new keypair.
|
||||
|
||||
Click **SUBMIT** to submit the authorization information.
|
||||
|
||||
### Configure Your Cluster
|
||||
|
||||
Choose any extra options you may want to include with your cluster, then click **SUBMIT** to create the cluster.
|
||||
|
||||
### Running the Cluster
|
||||
|
||||
You can monitor the status of your cluster and suspend or delete it from [your stackpoint.io dashboard](https://stackpoint.io/#/clusters).
|
||||
|
||||
For information on using and managing a Kubernetes cluster on Packet, consult [the official documentation](http://kubernetes.io/docs/).
|
|
@ -12,7 +12,7 @@ In Kubernetes version 1.5, Windows Server Containers for Kubernetes is supported
|
|||
1. Kubernetes control plane running on existing Linux infrastructure (version 1.5 or later)
|
||||
2. Kubenet network plugin setup on the Linux nodes
|
||||
3. Windows Server 2016 (RTM version 10.0.14393 or later)
|
||||
4. Docker Version 1.12.2-cs2-ws-beta or later
|
||||
4. Docker Version 1.12.2-cs2-ws-beta or later for Windows Server nodes (Linux nodes and Kubernetes control plane can run any Kubernetes supported Docker Version)
|
||||
|
||||
## Networking
|
||||
Network is achieved using L3 routing. Because third-party networking plugins (e.g. flannel, calico, etc) don't natively work on Windows Server, existing technology that is built into the Windows and Linux operating systems is relied on. In this L3 networking approach, a /16 subnet is chosen for the cluster nodes, and a /24 subnet is assigned to each worker node. All pods on a given worker node will be connected to the /24 subnet. This allows pods on the same node to communicate with each other. In order to enable networking between pods running on different nodes, routing features that are built into Windows Server 2016 and Linux are used.
|
||||
|
@ -40,6 +40,7 @@ To run Windows Server Containers on Kubernetes, you'll need to set up both your
|
|||
2. DNS support for Windows recently got merged to docker master and is currently not supported in a stable docker release. To use DNS build docker from master or download the binary from [Docker master](https://master.dockerproject.org/)
|
||||
3. Pull the `apprenda/pause` image from `https://hub.docker.com/r/apprenda/pause`
|
||||
4. RRAS (Routing) Windows feature enabled
|
||||
5. Install a VMSwitch of type `Internal`, by running `New-VMSwitch -Name KubeProxySwitch -SwitchType Internal` command in *PowerShell* window. This will create a new Network Interface with name `vEthernet (KubeProxySwitch)`. This interface will be used by kube-proxy to add Service IPs.
|
||||
|
||||
**Linux Host Setup**
|
||||
|
||||
|
@ -127,14 +128,14 @@ To start kube-proxy on your Windows node:
|
|||
|
||||
Run the following in a PowerShell window with administrative privileges. Be aware that if the node reboots or the process exits, you will have to rerun the commands below to restart the kube-proxy.
|
||||
|
||||
1. Set environment variable *INTERFACE_TO_ADD_SERVICE_IP* value to a node only network interface. The interface created when docker is installed should work
|
||||
`$env:INTERFACE_TO_ADD_SERVICE_IP = "vEthernet (HNS Internal NIC)"`
|
||||
1. Set environment variable *INTERFACE_TO_ADD_SERVICE_IP* value to `vEthernet (KubeProxySwitch)` which we created in **_Windows Host Setup_** above
|
||||
`$env:INTERFACE_TO_ADD_SERVICE_IP = "vEthernet (KubeProxySwitch)"`
|
||||
|
||||
2. Run *kube-proxy* executable using the below command
|
||||
`.\proxy.exe --v=3 --proxy-mode=userspace --hostname-override=<ip address/hostname of the windows node> --master=<api server location> --bind-address=<ip address of the windows node>`
|
||||
|
||||
## Scheduling Pods on Windows
|
||||
Because your cluster has both Linux and Windows nodes, you must explictly set the nodeSelector constraint to be able to schedule Pods to Windows nodes. You must set nodeSelector with the label beta.kubernetes.io/os to the value windows; see the following example:
|
||||
Because your cluster has both Linux and Windows nodes, you must explicitly set the nodeSelector constraint to be able to schedule Pods to Windows nodes. You must set nodeSelector with the label beta.kubernetes.io/os to the value windows; see the following example:
|
||||
|
||||
```
|
||||
{
|
||||
|
|
|
@ -59,13 +59,13 @@ The first step is to write the application. Save this code in a folder called "`
|
|||
#### server.js
|
||||
|
||||
```javascript
|
||||
var http = require('http');
|
||||
var handleRequest = function(request, response) {
|
||||
const http = require('http');
|
||||
const handleRequest = (request, response) => {
|
||||
console.log('Received request for URL: ' + request.url);
|
||||
response.writeHead(200);
|
||||
response.end('Hello World!');
|
||||
};
|
||||
var www = http.createServer(handleRequest);
|
||||
const www = http.createServer(handleRequest);
|
||||
www.listen(8080);
|
||||
```
|
||||
|
||||
|
@ -88,7 +88,7 @@ Next, create a file, also within `hellonode/` named `Dockerfile`. A Dockerfile d
|
|||
#### Dockerfile
|
||||
|
||||
```conf
|
||||
FROM node:4.4
|
||||
FROM node:4.5
|
||||
EXPOSE 8080
|
||||
COPY server.js .
|
||||
CMD node server.js
|
||||
|
|
|
@ -7,7 +7,10 @@ In the reference section, you can find reference documentation for Kubernetes AP
|
|||
## API References
|
||||
|
||||
* [Kubernetes API](/docs/api/) - The core API for Kubernetes.
|
||||
* [Extensions API](/docs/api-reference/extensions/v1beta1/operations/) - Manages extensions resources such as Jobs, Ingress and HorizontalPodAutoscalers.
|
||||
* [Autoscaling API](/docs/api-reference/autoscaling/v1/operations/) - Manages autoscaling resources such as HorizontalPodAutoscalers.
|
||||
* [Batch API](/docs/api-reference/batch/v1/operations/) - Manages batch resources such as Jobs.
|
||||
* [Apps API](/docs/api-reference/apps/v1beta1/operations/) - Manages apps resources such as StatefulSets.
|
||||
* [Extensions API](/docs/api-reference/extensions/v1beta1/operations/) - Manages extensions resources such as Ingress, Deployments, and ReplicaSets.
|
||||
|
||||
|
||||
## CLI References
|
||||
|
|
|
@ -12,7 +12,15 @@ external IP address.
|
|||
|
||||
{% capture prerequisites %}
|
||||
|
||||
{% include task-tutorial-prereqs.md %}
|
||||
* Install [kubectl](http://kubernetes.io/docs/user-guide/prereqs).
|
||||
|
||||
* Use a cloud provider like Google Container Engine or Amazon Web Services to
|
||||
create a Kubernetes cluster. This tutorial creates an
|
||||
[external load balancer](/docs/user-guide/load-balancer/),
|
||||
which requires a cloud provider.
|
||||
|
||||
* Configure `kubectl` to communicate with your Kubernetes API server. For
|
||||
instructions, see the documentation for your cloud provider.
|
||||
|
||||
{% endcapture %}
|
||||
|
||||
|
|
|
@ -328,7 +328,7 @@ Host: k8s-master:8080
|
|||
```
|
||||
|
||||
To consume opaque resources in pods, include the name of the opaque
|
||||
resource as a key in the the `spec.containers[].resources.requests` map.
|
||||
resource as a key in the `spec.containers[].resources.requests` map.
|
||||
|
||||
The pod will be scheduled only if all of the resource requests are
|
||||
satisfied (including cpu, memory and any opaque resources.) The pod will
|
||||
|
|
|
@ -20,7 +20,9 @@ or used to store configuration data for system components such as controllers.
|
|||
to [Secrets](/docs/user-guide/secrets/), but designed to more conveniently support working with strings that do not
|
||||
contain sensitive information.
|
||||
|
||||
Let's look at a made-up example:
|
||||
Note: ConfigMaps are not intended to act as a replacement for a properties file. ConfigMaps are intended to act as a reference to multiple properties files. You can think of them as way to represent something similar to the /etc directory, and the files within, on a Linux computer. One example of this model is creating Kubernetes Volumes from ConfigMaps, where each data item in the ConfigMap becomes a new file.
|
||||
|
||||
Consider the following example:
|
||||
|
||||
```yaml
|
||||
kind: ConfigMap
|
||||
|
|
|
@ -90,7 +90,7 @@ The cluster has to be started with `ENABLE_CUSTOM_METRICS` environment variable
|
|||
### Pod configuration
|
||||
|
||||
The pods to be scaled must have cAdvisor-specific custom (aka application) metrics endpoint configured. The configuration format is described [here](https://github.com/google/cadvisor/blob/master/docs/application_metrics.md). Kubernetes expects the configuration to
|
||||
be placed in `definition.json` mounted via a [config map](/docs/user-guide/horizontal-pod-autoscaling/configmap/) in `/etc/custom-metrics`. A sample config map may look like this:
|
||||
be placed in `definition.json` mounted via a [config map](/docs/user-guide/configmap/) in `/etc/custom-metrics`. A sample config map may look like this:
|
||||
|
||||
```yaml
|
||||
apiVersion: v1
|
||||
|
|
|
@ -69,7 +69,7 @@ kubectl get [(-o|--output=)json|yaml|wide|custom-columns=...|custom-columns-file
|
|||
kubectl get -f pod.yaml -o json
|
||||
|
||||
# Return only the phase value of the specified pod.
|
||||
kubectl get -o template pod/web-pod-13je7 --template={% raw %}{{.status.phase}}{% endraw %}
|
||||
kubectl get -o template pod/web-pod-13je7 --template={{.status.phase}}
|
||||
|
||||
# List all replication controllers and services together in ps output format.
|
||||
kubectl get rc,services
|
||||
|
|
|
@ -70,7 +70,36 @@ When a user is done with their volume, they can delete the PVC objects from the
|
|||
|
||||
### Reclaiming
|
||||
|
||||
The reclaim policy for a `PersistentVolume` tells the cluster what to do with the volume after it has been released of its claim. Currently, volumes can either be Retained, Recycled or Deleted. Retention allows for manual reclamation of the resource. For those volume plugins that support it, deletion removes both the `PersistentVolume` object from Kubernetes as well as deletes associated storage asset in external infrastructure such as AWS EBS, GCE PD or Cinder volume. Volumes that were dynamically provisioned are always deleted. If supported by appropriate volume plugin, recycling performs a basic scrub (`rm -rf /thevolume/*`) on the volume and makes it available again for a new claim.
|
||||
The reclaim policy for a `PersistentVolume` tells the cluster what to do with the volume after it has been released of its claim. Currently, volumes can either be Retained, Recycled or Deleted. Retention allows for manual reclamation of the resource. For those volume plugins that support it, deletion removes both the `PersistentVolume` object from Kubernetes as well as deletes associated storage asset in external infrastructure such as AWS EBS, GCE PD or Cinder volume. Volumes that were dynamically provisioned are always deleted.
|
||||
|
||||
#### Recycling
|
||||
|
||||
If supported by appropriate volume plugin, recycling performs a basic scrub (`rm -rf /thevolume/*`) on the volume and makes it available again for a new claim.
|
||||
|
||||
However, an administrator can configure a custom recycler pod templates using the Kubernetes controller manager command line arguments as described [here](/docs/admin/kube-controller-manager/). The custom recycler pod template must contain a `volumes` specification, as shown in the example below:
|
||||
|
||||
```yaml
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: pv-recycler-
|
||||
namespace: default
|
||||
spec:
|
||||
restartPolicy: Never
|
||||
volumes:
|
||||
- name: vol
|
||||
hostPath:
|
||||
path: /any/path/it/will/be/replaced
|
||||
containers:
|
||||
- name: pv-recycler
|
||||
image: "gcr.io/google_containers/busybox"
|
||||
command: ["/bin/sh", "-c", "test -e /scrub && rm -rf /scrub/..?* /scrub/.[!.]* /scrub/* && test -z \"$(ls -A /scrub)\" || exit 1"]
|
||||
volumeMounts:
|
||||
- name: vol
|
||||
mountPath: /scrub
|
||||
```
|
||||
|
||||
However, the particular path specified in the custom recycler pod template in the `volumes` part is replaced with the particular path of the volume that is being recycled.
|
||||
|
||||
## Types of Persistent Volumes
|
||||
|
||||
|
|
|
@ -28,6 +28,7 @@ To expose your service to the public internet, run:
|
|||
$ kubectl expose deployment my-nginx --target-port=80 --type=LoadBalancer
|
||||
service "my-nginx" exposed
|
||||
```
|
||||
Note: The type, LoadBalancer, is highly dependent upon the underlying platform that Kubernetes is running on. If your cloud provider doesn't have a load balancer implementation (e.g. OpenStack) for Kubernetes, you can simply use the allocated [NodePort](http://kubernetes.io/docs/user-guide/services/#type-nodeport) as a rudimentary form of load balancing across your endpoints.
|
||||
|
||||
You can see that they are running by:
|
||||
|
||||
|
|
|
@ -158,7 +158,7 @@ type: Opaque
|
|||
Decode the password field:
|
||||
|
||||
```shell
|
||||
$ echo "MWYyZDFlMmU2N2Rm" | base64 -d
|
||||
$ echo "MWYyZDFlMmU2N2Rm" | base64 --decode
|
||||
1f2d1e2e67df
|
||||
```
|
||||
|
||||
|
|
File diff suppressed because one or more lines are too long
Before Width: | Height: | Size: 9.4 KiB After Width: | Height: | Size: 11 KiB |
File diff suppressed because one or more lines are too long
Before Width: | Height: | Size: 9.4 KiB After Width: | Height: | Size: 10 KiB |
|
@ -0,0 +1,13 @@
|
|||
<?xml version="1.0" encoding="utf-8"?>
|
||||
<!-- Generator: Adobe Illustrator 16.0.0, SVG Export Plug-In . SVG Version: 6.00 Build 0) -->
|
||||
<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.1//EN" "http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd">
|
||||
<svg version="1.1" id="Layer_1" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" x="0px" y="0px"
|
||||
width="575.996px" height="576px" viewBox="512 32 575.996 576" enable-background="new 512 32 575.996 576" xml:space="preserve">
|
||||
<path fill="#3371E3" d="M1076.525,541.14L960.947,425.562c70.432-96.992,61.952-233.465-25.498-320.915
|
||||
C887.015,56.218,823.533,32,760.051,32c-63.481,0-126.963,24.218-175.398,72.653c-96.87,96.871-96.87,253.926,0,350.796
|
||||
c48.436,48.436,111.917,72.653,175.398,72.653c51.13,0,102.24-15.737,145.511-47.155l115.577,115.577
|
||||
c7.643,7.648,17.671,11.476,27.693,11.476s20.051-3.827,27.693-11.476C1091.82,581.235,1091.82,556.436,1076.525,541.14z
|
||||
M623.424,416.679c-75.334-75.335-75.334-197.92,0-273.255c36.493-36.493,85.018-56.595,136.627-56.595
|
||||
c51.61,0,100.135,20.096,136.628,56.595c75.334,75.334,75.334,197.92,0,273.255c-36.493,36.492-85.018,56.595-136.628,56.595
|
||||
C708.441,473.273,659.923,453.171,623.424,416.679z"/>
|
||||
</svg>
|
After Width: | Height: | Size: 1.2 KiB |
18
js/script.js
18
js/script.js
|
@ -503,3 +503,21 @@ var pushmenu = (function(){
|
|||
show: show
|
||||
};
|
||||
})();
|
||||
|
||||
$(function() {
|
||||
|
||||
// Make global nav be active based on pathname
|
||||
if ((location.pathname.split("/")[1]) !== ""){
|
||||
$('.global-nav li a[href^="/' + location.pathname.split("/")[1] + '"]').addClass('active');
|
||||
}
|
||||
|
||||
// If vendor strip doesn't exist add className
|
||||
if ( !$('#vendorStrip').length > 0 ) {
|
||||
$('#hero').addClass('bot-bar');
|
||||
}
|
||||
|
||||
// If is not homepage add class to hero section
|
||||
if (!$('#home').length > 0 ) {
|
||||
$('#hero').addClass('no-sub');
|
||||
}
|
||||
});
|
Loading…
Reference in New Issue