Ran update-imported-docs.sh
parent
6a4b9a2d99
commit
aed351a334
|
@ -1,5 +1,4 @@
|
|||
overrides:
|
||||
- path: v1.1/docs/man
|
||||
- path: v1.1/docs/proposals
|
||||
- path: v1.1/docs/api-reference
|
||||
- path: v1.1/docs/user-guide/kubectl
|
||||
|
@ -8,7 +7,6 @@ overrides:
|
|||
- path: v1.1/docs/admin/kube-proxy.md
|
||||
- path: v1.1/docs/admin/kube-scheduler.md
|
||||
- path: v1.1/docs/admin/kubelet.md
|
||||
- path: v1.2/docs/man
|
||||
- path: v1.2/docs/proposals
|
||||
- path: v1.2/docs/api-reference
|
||||
- path: v1.2/docs/user-guide/kubectl
|
||||
|
|
|
@ -1,22 +1,24 @@
|
|||
#git clone https://github.com/kubernetes/kubernetes.git k8s
|
||||
#cd k8s
|
||||
#git checkout gh-pages
|
||||
#cd ..
|
||||
|
||||
|
||||
git clone https://github.com/kubernetes/kubernetes.git k8s
|
||||
cd k8s
|
||||
git checkout gh-pages
|
||||
cd ..
|
||||
|
||||
while read line || [[ -n ${line} ]]; do
|
||||
mystring="line"
|
||||
|
||||
IFS=',' read -a myarray <<< "$mystring"
|
||||
|
||||
echo "IP: ${myarray[0]}"
|
||||
echo "STATUS: ${myarray[3]}"
|
||||
CLEARPATH=${line}
|
||||
K8SSOURCE='k8s/_'${line}
|
||||
DESTINATION=${line%/*}
|
||||
echo "rm -rf ${CLEARPATH}"
|
||||
echo "mv ${K8SSOURCE} ${DESTINATION}"
|
||||
IFS=': ' read -a myarray <<< "${line}"
|
||||
# echo "arraypos0: ${myarray[0]}"
|
||||
# echo "arraypos1: ${myarray[1]}"
|
||||
# echo "arraypos2: ${myarray[2]}"
|
||||
if [ "${myarray[1]}" = "path" ]; then
|
||||
TARGET="${myarray[2]}"
|
||||
CLEARPATH="${TARGET}"
|
||||
K8SSOURCE='k8s/_'${TARGET}
|
||||
DESTINATION=${TARGET%/*}
|
||||
rm -rf ${CLEARPATH}
|
||||
mv -f ${K8SSOURCE} ${DESTINATION}
|
||||
fi
|
||||
done <_data/overrides.yml
|
||||
|
||||
#rm -rf k8s
|
||||
rm -rf k8s
|
||||
git add .
|
||||
git commit -m "Ran update-imported-docs.sh"
|
||||
echo "Docs imported! Run 'git status' to see proposed changes, 'git push' to upload them"
|
|
@ -0,0 +1,102 @@
|
|||
---
|
||||
layout: docwithnav
|
||||
title: "kube-apiserver"
|
||||
---
|
||||
<!-- BEGIN MUNGE: UNVERSIONED_WARNING -->
|
||||
|
||||
|
||||
<!-- END MUNGE: UNVERSIONED_WARNING -->
|
||||
|
||||
## kube-apiserver
|
||||
|
||||
|
||||
|
||||
### Synopsis
|
||||
|
||||
|
||||
The Kubernetes API server validates and configures data
|
||||
for the api objects which include pods, services, replicationcontrollers, and
|
||||
others. The API Server services REST operations and provides the frontend to the
|
||||
cluster's shared state through which all other components interact.
|
||||
|
||||
```
|
||||
{% raw %}
|
||||
kube-apiserver
|
||||
{% endraw %}
|
||||
```
|
||||
|
||||
### Options
|
||||
|
||||
```
|
||||
{% raw %}
|
||||
--admission-control="AlwaysAdmit": Ordered list of plug-ins to do admission control of resources into cluster. Comma-delimited list of: AlwaysAdmit, AlwaysDeny, DenyEscalatingExec, DenyExecOnPrivileged, InitialResources, LimitRanger, NamespaceAutoProvision, NamespaceExists, NamespaceLifecycle, ResourceQuota, SecurityContextDeny, ServiceAccount
|
||||
--admission-control-config-file="": File with admission control configuration.
|
||||
--advertise-address=<nil>: The IP address on which to advertise the apiserver to members of the cluster. This address must be reachable by the rest of the cluster. If blank, the --bind-address will be used. If --bind-address is unspecified, the host's default interface will be used.
|
||||
--allow-privileged[=false]: If true, allow privileged containers.
|
||||
--authorization-mode="AlwaysAllow": Ordered list of plug-ins to do authorization on secure port. Comma-delimited list of: AlwaysAllow,AlwaysDeny,ABAC
|
||||
--authorization-policy-file="": File with authorization policy in csv format, used with --authorization-mode=ABAC, on the secure port.
|
||||
--basic-auth-file="": If set, the file that will be used to admit requests to the secure port of the API server via http basic authentication.
|
||||
--bind-address=0.0.0.0: The IP address on which to serve the --read-only-port and --secure-port ports. The associated interface(s) must be reachable by the rest of the cluster, and by CLI/web clients. If blank, all interfaces will be used (0.0.0.0).
|
||||
--cert-dir="/var/run/kubernetes": The directory where the TLS certs are located (by default /var/run/kubernetes). If --tls-cert-file and --tls-private-key-file are provided, this flag will be ignored.
|
||||
--client-ca-file="": If set, any request presenting a client certificate signed by one of the authorities in the client-ca-file is authenticated with an identity corresponding to the CommonName of the client certificate.
|
||||
--cloud-config="": The path to the cloud provider configuration file. Empty string for no configuration file.
|
||||
--cloud-provider="": The provider for cloud services. Empty string for no provider.
|
||||
--cluster-name="kubernetes": The instance prefix for the cluster
|
||||
--cors-allowed-origins=[]: List of allowed origins for CORS, comma separated. An allowed origin can be a regular expression to support subdomain matching. If this list is empty CORS will not be enabled.
|
||||
--etcd-config="": The config file for the etcd client. Mutually exclusive with -etcd-servers.
|
||||
--etcd-prefix="/registry": The prefix for all resource paths in etcd.
|
||||
--etcd-servers=[]: List of etcd servers to watch (http://ip:port), comma separated. Mutually exclusive with -etcd-config
|
||||
--etcd-servers-overrides=[]: Per-resource etcd servers overrides, comma separated. The individual override format: group/resource#servers, where servers are http://ip:port, semicolon separated.
|
||||
--event-ttl=1h0m0s: Amount of time to retain events. Default 1 hour.
|
||||
--experimental-keystone-url="": If passed, activates the keystone authentication plugin
|
||||
--external-hostname="": The hostname to use when generating externalized URLs for this master (e.g. Swagger API Docs.)
|
||||
--google-json-key="": The Google Cloud Platform Service Account JSON Key to use for authentication.
|
||||
--insecure-bind-address=127.0.0.1: The IP address on which to serve the --insecure-port (set to 0.0.0.0 for all interfaces). Defaults to localhost.
|
||||
--insecure-port=8080: The port on which to serve unsecured, unauthenticated access. Default 8080. It is assumed that firewall rules are set up such that this port is not reachable from outside of the cluster and that port 443 on the cluster's public address is proxied to this port. This is performed by nginx in the default setup.
|
||||
--kubelet-certificate-authority="": Path to a cert. file for the certificate authority.
|
||||
--kubelet-client-certificate="": Path to a client cert file for TLS.
|
||||
--kubelet-client-key="": Path to a client key file for TLS.
|
||||
--kubelet-https[=true]: Use https for kubelet connections
|
||||
--kubelet-port=10250: Kubelet port
|
||||
--kubelet-timeout=5s: Timeout for kubelet operations
|
||||
--log-flush-frequency=5s: Maximum number of seconds between log flushes
|
||||
--long-running-request-regexp="(/|^)((watch|proxy)(/|$)|(logs?|portforward|exec|attach)/?$)": A regular expression matching long running requests which should be excluded from maximum inflight request handling.
|
||||
--master-service-namespace="default": The namespace from which the kubernetes master services should be injected into pods
|
||||
--max-connection-bytes-per-sec=0: If non-zero, throttle each user connection to this number of bytes/sec. Currently only applies to long-running requests
|
||||
--max-requests-inflight=400: The maximum number of requests in flight at a given time. When the server exceeds this, it rejects requests. Zero for no limit.
|
||||
--min-request-timeout=1800: An optional field indicating the minimum number of seconds a handler must keep a request open before timing it out. Currently only honored by the watch request handler, which picks a randomized value above this number as the connection timeout, to spread out load.
|
||||
--oidc-ca-file="": If set, the OpenID server's certificate will be verified by one of the authorities in the oidc-ca-file, otherwise the host's root CA set will be used
|
||||
--oidc-client-id="": The client ID for the OpenID Connect client, must be set if oidc-issuer-url is set
|
||||
--oidc-issuer-url="": The URL of the OpenID issuer, only HTTPS scheme will be accepted. If set, it will be used to verify the OIDC JSON Web Token (JWT)
|
||||
--oidc-username-claim="sub": The OpenID claim to use as the user name. Note that claims other than the default ('sub') is not guaranteed to be unique and immutable. This flag is experimental, please see the authentication documentation for further details.
|
||||
--profiling[=true]: Enable profiling via web interface host:port/debug/pprof/
|
||||
--runtime-config=: A set of key=value pairs that describe runtime configuration that may be passed to apiserver. apis/<groupVersion> key can be used to turn on/off specific api versions. apis/<groupVersion>/<resource> can be used to turn on/off specific resources. api/all and api/legacy are special keys to control all and legacy api versions respectively.
|
||||
--secure-port=6443: The port on which to serve HTTPS with authentication and authorization. If 0, don't serve HTTPS at all.
|
||||
--service-account-key-file="": File containing PEM-encoded x509 RSA private or public key, used to verify ServiceAccount tokens. If unspecified, --tls-private-key-file is used.
|
||||
--service-account-lookup[=false]: If true, validate ServiceAccount tokens exist in etcd as part of authentication.
|
||||
--service-cluster-ip-range=<nil>: A CIDR notation IP range from which to assign service cluster IPs. This must not overlap with any IP ranges assigned to nodes for pods.
|
||||
--service-node-port-range=: A port range to reserve for services with NodePort visibility. Example: '30000-32767'. Inclusive at both ends of the range.
|
||||
--ssh-keyfile="": If non-empty, use secure SSH proxy to the nodes, using this user keyfile
|
||||
--ssh-user="": If non-empty, use secure SSH proxy to the nodes, using this user name
|
||||
--storage-versions="extensions/v1beta1,v1": The versions to store resources with. Different groups may be stored in different versions. Specified in the format "group1/version1,group2/version2...". This flag expects a complete list of storage versions of ALL groups registered in the server. It defaults to a list of preferred versions of all registered groups, which is derived from the KUBE_API_VERSIONS environment variable.
|
||||
--tls-cert-file="": File containing x509 Certificate for HTTPS. (CA cert, if any, concatenated after server cert). If HTTPS serving is enabled, and --tls-cert-file and --tls-private-key-file are not provided, a self-signed certificate and key are generated for the public address and saved to /var/run/kubernetes.
|
||||
--tls-private-key-file="": File containing x509 private key matching --tls-cert-file.
|
||||
--token-auth-file="": If set, the file that will be used to secure the secure port of the API server via token authentication.
|
||||
--watch-cache[=true]: Enable watch caching in the apiserver
|
||||
{% endraw %}
|
||||
```
|
||||
|
||||
###### Auto generated by spf13/cobra at 2015-10-29 20:12:33.554980405 +0000 UTC
|
||||
|
||||
|
||||
|
||||
|
||||
<!-- BEGIN MUNGE: IS_VERSIONED -->
|
||||
<!-- TAG IS_VERSIONED -->
|
||||
<!-- END MUNGE: IS_VERSIONED -->
|
||||
|
||||
|
||||
<!-- BEGIN MUNGE: GENERATED_ANALYTICS -->
|
||||
[]()
|
||||
<!-- END MUNGE: GENERATED_ANALYTICS -->
|
||||
|
|
@ -0,0 +1,89 @@
|
|||
---
|
||||
layout: docwithnav
|
||||
title: "kube-controller-manager"
|
||||
---
|
||||
<!-- BEGIN MUNGE: UNVERSIONED_WARNING -->
|
||||
|
||||
|
||||
<!-- END MUNGE: UNVERSIONED_WARNING -->
|
||||
|
||||
## kube-controller-manager
|
||||
|
||||
|
||||
|
||||
### Synopsis
|
||||
|
||||
|
||||
The Kubernetes controller manager is a daemon that embeds
|
||||
the core control loops shipped with Kubernetes. In applications of robotics and
|
||||
automation, a control loop is a non-terminating loop that regulates the state of
|
||||
the system. In Kubernetes, a controller is a control loop that watches the shared
|
||||
state of the cluster through the apiserver and makes changes attempting to move the
|
||||
current state towards the desired state. Examples of controllers that ship with
|
||||
Kubernetes today are the replication controller, endpoints controller, namespace
|
||||
controller, and serviceaccounts controller.
|
||||
|
||||
```
|
||||
{% raw %}
|
||||
kube-controller-manager
|
||||
{% endraw %}
|
||||
```
|
||||
|
||||
### Options
|
||||
|
||||
```
|
||||
{% raw %}
|
||||
--address=127.0.0.1: The IP address to serve on (set to 0.0.0.0 for all interfaces)
|
||||
--allocate-node-cidrs[=false]: Should CIDRs for Pods be allocated and set on the cloud provider.
|
||||
--cloud-config="": The path to the cloud provider configuration file. Empty string for no configuration file.
|
||||
--cloud-provider="": The provider for cloud services. Empty string for no provider.
|
||||
--cluster-cidr=<nil>: CIDR Range for Pods in cluster.
|
||||
--cluster-name="kubernetes": The instance prefix for the cluster
|
||||
--concurrent-endpoint-syncs=5: The number of endpoint syncing operations that will be done concurrently. Larger number = faster endpoint updating, but more CPU (and network) load
|
||||
--concurrent_rc_syncs=5: The number of replication controllers that are allowed to sync concurrently. Larger number = more reponsive replica management, but more CPU (and network) load
|
||||
--deleting-pods-burst=10: Number of nodes on which pods are bursty deleted in case of node failure. For more details look into RateLimiter.
|
||||
--deleting-pods-qps=0.1: Number of nodes per second on which pods are deleted in case of node failure.
|
||||
--deployment-controller-sync-period=30s: Period for syncing the deployments.
|
||||
--google-json-key="": The Google Cloud Platform Service Account JSON Key to use for authentication.
|
||||
--horizontal-pod-autoscaler-sync-period=30s: The period for syncing the number of pods in horizontal pod autoscaler.
|
||||
--kubeconfig="": Path to kubeconfig file with authorization and master location information.
|
||||
--log-flush-frequency=5s: Maximum number of seconds between log flushes
|
||||
--master="": The address of the Kubernetes API server (overrides any value in kubeconfig)
|
||||
--min-resync-period=12h0m0s: The resync period in reflectors will be random between MinResyncPeriod and 2*MinResyncPeriod
|
||||
--namespace-sync-period=5m0s: The period for syncing namespace life-cycle updates
|
||||
--node-monitor-grace-period=40s: Amount of time which we allow running Node to be unresponsive before marking it unhealty. Must be N times more than kubelet's nodeStatusUpdateFrequency, where N means number of retries allowed for kubelet to post node status.
|
||||
--node-monitor-period=5s: The period for syncing NodeStatus in NodeController.
|
||||
--node-startup-grace-period=1m0s: Amount of time which we allow starting Node to be unresponsive before marking it unhealty.
|
||||
--node-sync-period=10s: The period for syncing nodes from cloudprovider. Longer periods will result in fewer calls to cloud provider, but may delay addition of new nodes to cluster.
|
||||
--pod-eviction-timeout=5m0s: The grace period for deleting pods on failed nodes.
|
||||
--port=10252: The port that the controller-manager's http service runs on
|
||||
--profiling[=true]: Enable profiling via web interface host:port/debug/pprof/
|
||||
--pv-recycler-increment-timeout-nfs=30: the increment of time added per Gi to ActiveDeadlineSeconds for an NFS scrubber pod
|
||||
--pv-recycler-minimum-timeout-hostpath=60: The minimum ActiveDeadlineSeconds to use for a HostPath Recycler pod. This is for development and testing only and will not work in a multi-node cluster.
|
||||
--pv-recycler-minimum-timeout-nfs=300: The minimum ActiveDeadlineSeconds to use for an NFS Recycler pod
|
||||
--pv-recycler-pod-template-filepath-hostpath="": The file path to a pod definition used as a template for HostPath persistent volume recycling. This is for development and testing only and will not work in a multi-node cluster.
|
||||
--pv-recycler-pod-template-filepath-nfs="": The file path to a pod definition used as a template for NFS persistent volume recycling
|
||||
--pv-recycler-timeout-increment-hostpath=30: the increment of time added per Gi to ActiveDeadlineSeconds for a HostPath scrubber pod. This is for development and testing only and will not work in a multi-node cluster.
|
||||
--pvclaimbinder-sync-period=10s: The period for syncing persistent volumes and persistent volume claims
|
||||
--resource-quota-sync-period=10s: The period for syncing quota usage status in the system
|
||||
--root-ca-file="": If set, this root certificate authority will be included in service account's token secret. This must be a valid PEM-encoded CA bundle.
|
||||
--service-account-private-key-file="": Filename containing a PEM-encoded private RSA key used to sign service account tokens.
|
||||
--service-sync-period=5m0s: The period for syncing services with their external load balancers
|
||||
--terminated-pod-gc-threshold=12500: Number of terminated pods that can exist before the terminated pod garbage collector starts deleting terminated pods. If <= 0, the terminated pod garbage collector is disabled.
|
||||
{% endraw %}
|
||||
```
|
||||
|
||||
###### Auto generated by spf13/cobra at 2015-10-29 20:12:25.539938496 +0000 UTC
|
||||
|
||||
|
||||
|
||||
|
||||
<!-- BEGIN MUNGE: IS_VERSIONED -->
|
||||
<!-- TAG IS_VERSIONED -->
|
||||
<!-- END MUNGE: IS_VERSIONED -->
|
||||
|
||||
|
||||
<!-- BEGIN MUNGE: GENERATED_ANALYTICS -->
|
||||
[]()
|
||||
<!-- END MUNGE: GENERATED_ANALYTICS -->
|
||||
|
|
@ -0,0 +1,67 @@
|
|||
---
|
||||
layout: docwithnav
|
||||
title: "kube-proxy"
|
||||
---
|
||||
<!-- BEGIN MUNGE: UNVERSIONED_WARNING -->
|
||||
|
||||
|
||||
<!-- END MUNGE: UNVERSIONED_WARNING -->
|
||||
|
||||
## kube-proxy
|
||||
|
||||
|
||||
|
||||
### Synopsis
|
||||
|
||||
|
||||
The Kubernetes network proxy runs on each node. This
|
||||
reflects services as defined in the Kubernetes API on each node and can do simple
|
||||
TCP,UDP stream forwarding or round robin TCP,UDP forwarding across a set of backends.
|
||||
Service cluster ips and ports are currently found through Docker-links-compatible
|
||||
environment variables specifying ports opened by the service proxy. There is an optional
|
||||
addon that provides cluster DNS for these cluster IPs. The user must create a service
|
||||
with the apiserver API to configure the proxy.
|
||||
|
||||
```
|
||||
{% raw %}
|
||||
kube-proxy
|
||||
{% endraw %}
|
||||
```
|
||||
|
||||
### Options
|
||||
|
||||
```
|
||||
{% raw %}
|
||||
--bind-address=0.0.0.0: The IP address for the proxy server to serve on (set to 0.0.0.0 for all interfaces)
|
||||
--cleanup-iptables[=false]: If true cleanup iptables rules and exit.
|
||||
--google-json-key="": The Google Cloud Platform Service Account JSON Key to use for authentication.
|
||||
--healthz-bind-address=127.0.0.1: The IP address for the health check server to serve on, defaulting to 127.0.0.1 (set to 0.0.0.0 for all interfaces)
|
||||
--healthz-port=10249: The port to bind the health check server. Use 0 to disable.
|
||||
--hostname-override="": If non-empty, will use this string as identification instead of the actual hostname.
|
||||
--iptables-sync-period=30s: How often iptables rules are refreshed (e.g. '5s', '1m', '2h22m'). Must be greater than 0.
|
||||
--kubeconfig="": Path to kubeconfig file with authorization information (the master location is set by the master flag).
|
||||
--log-flush-frequency=5s: Maximum number of seconds between log flushes
|
||||
--masquerade-all[=false]: If using the pure iptables proxy, SNAT everything
|
||||
--master="": The address of the Kubernetes API server (overrides any value in kubeconfig)
|
||||
--oom-score-adj=-999: The oom-score-adj value for kube-proxy process. Values must be within the range [-1000, 1000]
|
||||
--proxy-mode="": Which proxy mode to use: 'userspace' (older, stable) or 'iptables' (experimental). If blank, look at the Node object on the Kubernetes API and respect the 'net.experimental.kubernetes.io/proxy-mode' annotation if provided. Otherwise use the best-available proxy (currently userspace, but may change in future versions). If the iptables proxy is selected, regardless of how, but the system's kernel or iptables versions are insufficient, this always falls back to the userspace proxy.
|
||||
--proxy-port-range=: Range of host ports (beginPort-endPort, inclusive) that may be consumed in order to proxy service traffic. If unspecified (0-0) then ports will be randomly chosen.
|
||||
--resource-container="/kube-proxy": Absolute name of the resource-only container to create and run the Kube-proxy in (Default: /kube-proxy).
|
||||
--udp-timeout=250ms: How long an idle UDP connection will be kept open (e.g. '250ms', '2s'). Must be greater than 0. Only applicable for proxy-mode=userspace
|
||||
{% endraw %}
|
||||
```
|
||||
|
||||
###### Auto generated by spf13/cobra at 2015-10-29 20:12:28.465584706 +0000 UTC
|
||||
|
||||
|
||||
|
||||
|
||||
<!-- BEGIN MUNGE: IS_VERSIONED -->
|
||||
<!-- TAG IS_VERSIONED -->
|
||||
<!-- END MUNGE: IS_VERSIONED -->
|
||||
|
||||
|
||||
<!-- BEGIN MUNGE: GENERATED_ANALYTICS -->
|
||||
[]()
|
||||
<!-- END MUNGE: GENERATED_ANALYTICS -->
|
||||
|
|
@ -0,0 +1,62 @@
|
|||
---
|
||||
layout: docwithnav
|
||||
title: "kube-scheduler"
|
||||
---
|
||||
<!-- BEGIN MUNGE: UNVERSIONED_WARNING -->
|
||||
|
||||
|
||||
<!-- END MUNGE: UNVERSIONED_WARNING -->
|
||||
|
||||
## kube-scheduler
|
||||
|
||||
|
||||
|
||||
### Synopsis
|
||||
|
||||
|
||||
The Kubernetes scheduler is a policy-rich, topology-aware,
|
||||
workload-specific function that significantly impacts availability, performance,
|
||||
and capacity. The scheduler needs to take into account individual and collective
|
||||
resource requirements, quality of service requirements, hardware/software/policy
|
||||
constraints, affinity and anti-affinity specifications, data locality, inter-workload
|
||||
interference, deadlines, and so on. Workload-specific requirements will be exposed
|
||||
through the API as necessary.
|
||||
|
||||
```
|
||||
{% raw %}
|
||||
kube-scheduler
|
||||
{% endraw %}
|
||||
```
|
||||
|
||||
### Options
|
||||
|
||||
```
|
||||
{% raw %}
|
||||
--address=127.0.0.1: The IP address to serve on (set to 0.0.0.0 for all interfaces)
|
||||
--algorithm-provider="DefaultProvider": The scheduling algorithm provider to use, one of: DefaultProvider
|
||||
--bind-pods-burst=100: Number of bindings per second scheduler is allowed to make during bursts
|
||||
--bind-pods-qps=50: Number of bindings per second scheduler is allowed to continuously make
|
||||
--google-json-key="": The Google Cloud Platform Service Account JSON Key to use for authentication.
|
||||
--kubeconfig="": Path to kubeconfig file with authorization and master location information.
|
||||
--log-flush-frequency=5s: Maximum number of seconds between log flushes
|
||||
--master="": The address of the Kubernetes API server (overrides any value in kubeconfig)
|
||||
--policy-config-file="": File with scheduler policy configuration
|
||||
--port=10251: The port that the scheduler's http service runs on
|
||||
--profiling[=true]: Enable profiling via web interface host:port/debug/pprof/
|
||||
{% endraw %}
|
||||
```
|
||||
|
||||
###### Auto generated by spf13/cobra at 2015-10-29 20:12:20.542446971 +0000 UTC
|
||||
|
||||
|
||||
|
||||
|
||||
<!-- BEGIN MUNGE: IS_VERSIONED -->
|
||||
<!-- TAG IS_VERSIONED -->
|
||||
<!-- END MUNGE: IS_VERSIONED -->
|
||||
|
||||
|
||||
<!-- BEGIN MUNGE: GENERATED_ANALYTICS -->
|
||||
[]()
|
||||
<!-- END MUNGE: GENERATED_ANALYTICS -->
|
||||
|
|
@ -0,0 +1,129 @@
|
|||
---
|
||||
layout: docwithnav
|
||||
title: "kubelet"
|
||||
---
|
||||
<!-- BEGIN MUNGE: UNVERSIONED_WARNING -->
|
||||
|
||||
|
||||
<!-- END MUNGE: UNVERSIONED_WARNING -->
|
||||
|
||||
## kubelet
|
||||
|
||||
|
||||
|
||||
### Synopsis
|
||||
|
||||
|
||||
The kubelet is the primary "node agent" that runs on each
|
||||
node. The kubelet works in terms of a PodSpec. A PodSpec is a YAML or JSON object
|
||||
that describes a pod. The kubelet takes a set of PodSpecs that are provided through
|
||||
various mechanisms (primarily through the apiserver) and ensures that the containers
|
||||
described in those PodSpecs are running and healthy.
|
||||
|
||||
Other than from an PodSpec from the apiserver, there are three ways that a container
|
||||
manifest can be provided to the Kubelet.
|
||||
|
||||
File: Path passed as a flag on the command line. This file is rechecked every 20
|
||||
seconds (configurable with a flag).
|
||||
|
||||
HTTP endpoint: HTTP endpoint passed as a parameter on the command line. This endpoint
|
||||
is checked every 20 seconds (also configurable with a flag).
|
||||
|
||||
HTTP server: The kubelet can also listen for HTTP and respond to a simple API
|
||||
(underspec'd currently) to submit a new manifest.
|
||||
|
||||
```
|
||||
{% raw %}
|
||||
kubelet
|
||||
{% endraw %}
|
||||
```
|
||||
|
||||
### Options
|
||||
|
||||
```
|
||||
{% raw %}
|
||||
--address=0.0.0.0: The IP address for the Kubelet to serve on (set to 0.0.0.0 for all interfaces)
|
||||
--allow-privileged[=false]: If true, allow containers to request privileged mode. [default=false]
|
||||
--api-servers=[]: List of Kubernetes API servers for publishing events, and reading pods and services. (ip:port), comma separated.
|
||||
--cadvisor-port=4194: The port of the localhost cAdvisor endpoint
|
||||
--cert-dir="/var/run/kubernetes": The directory where the TLS certs are located (by default /var/run/kubernetes). If --tls-cert-file and --tls-private-key-file are provided, this flag will be ignored.
|
||||
--cgroup-root="": Optional root cgroup to use for pods. This is handled by the container runtime on a best effort basis. Default: '', which means use the container runtime default.
|
||||
--chaos-chance=0: If > 0.0, introduce random client errors and latency. Intended for testing. [default=0.0]
|
||||
--cloud-config="": The path to the cloud provider configuration file. Empty string for no configuration file.
|
||||
--cloud-provider="": The provider for cloud services. Empty string for no provider.
|
||||
--cluster-dns=<nil>: IP address for a cluster DNS server. If set, kubelet will configure all containers to use this for DNS resolution in addition to the host's DNS servers
|
||||
--cluster-domain="": Domain for this cluster. If set, kubelet will configure all containers to search this domain in addition to the host's search domains
|
||||
--config="": Path to the config file or directory of files
|
||||
--configure-cbr0[=false]: If true, kubelet will configure cbr0 based on Node.Spec.PodCIDR.
|
||||
--container-runtime="docker": The container runtime to use. Possible values: 'docker', 'rkt'. Default: 'docker'.
|
||||
--containerized[=false]: Experimental support for running kubelet in a container. Intended for testing. [default=false]
|
||||
--cpu-cfs-quota[=false]: Enable CPU CFS quota enforcement for containers that specify CPU limits
|
||||
--docker-endpoint="": If non-empty, use this for the docker endpoint to communicate with
|
||||
--docker-exec-handler="native": Handler to use when executing a command in a container. Valid values are 'native' and 'nsenter'. Defaults to 'native'.
|
||||
--enable-debugging-handlers[=true]: Enables server endpoints for log collection and local running of containers and commands
|
||||
--enable-server[=true]: Enable the Kubelet's server
|
||||
--event-burst=0: Maximum size of a bursty event records, temporarily allows event records to burst to this number, while still not exceeding event-qps. Only used if --event-qps > 0
|
||||
--event-qps=0: If > 0, limit event creations per second to this value. If 0, unlimited. [default=0.0]
|
||||
--file-check-frequency=20s: Duration between checking config files for new data
|
||||
--google-json-key="": The Google Cloud Platform Service Account JSON Key to use for authentication.
|
||||
--healthz-bind-address=127.0.0.1: The IP address for the healthz server to serve on, defaulting to 127.0.0.1 (set to 0.0.0.0 for all interfaces)
|
||||
--healthz-port=10248: The port of the localhost healthz endpoint
|
||||
--host-ipc-sources="*": Comma-separated list of sources from which the Kubelet allows pods to use the host ipc namespace. [default="*"]
|
||||
--host-network-sources="*": Comma-separated list of sources from which the Kubelet allows pods to use of host network. [default="*"]
|
||||
--host-pid-sources="*": Comma-separated list of sources from which the Kubelet allows pods to use the host pid namespace. [default="*"]
|
||||
--hostname-override="": If non-empty, will use this string as identification instead of the actual hostname.
|
||||
--http-check-frequency=20s: Duration between checking http for new data
|
||||
--image-gc-high-threshold=90: The percent of disk usage after which image garbage collection is always run. Default: 90%%
|
||||
--image-gc-low-threshold=80: The percent of disk usage before which image garbage collection is never run. Lowest disk usage to garbage collect to. Default: 80%%
|
||||
--kubeconfig="/var/lib/kubelet/kubeconfig": Path to a kubeconfig file, specifying how to authenticate to API server (the master location is set by the api-servers flag).
|
||||
--log-flush-frequency=5s: Maximum number of seconds between log flushes
|
||||
--low-diskspace-threshold-mb=256: The absolute free disk space, in MB, to maintain. When disk space falls below this threshold, new pods would be rejected. Default: 256
|
||||
--manifest-url="": URL for accessing the container manifest
|
||||
--manifest-url-header="": HTTP header to use when accessing the manifest URL, with the key separated from the value with a ':', as in 'key:value'
|
||||
--master-service-namespace="default": The namespace from which the kubernetes master services should be injected into pods
|
||||
--max-open-files=1000000: Number of files that can be opened by Kubelet process. [default=1000000]
|
||||
--max-pods=40: Number of Pods that can run on this Kubelet.
|
||||
--maximum-dead-containers=100: Maximum number of old instances of containers to retain globally. Each container takes up some disk space. Default: 100.
|
||||
--maximum-dead-containers-per-container=2: Maximum number of old instances to retain per container. Each container takes up some disk space. Default: 2.
|
||||
--minimum-container-ttl-duration=1m0s: Minimum age for a finished container before it is garbage collected. Examples: '300ms', '10s' or '2h45m'
|
||||
--network-plugin="": <Warning: Alpha feature> The name of the network plugin to be invoked for various events in kubelet/pod lifecycle
|
||||
--network-plugin-dir="/usr/libexec/kubernetes/kubelet-plugins/net/exec/": <Warning: Alpha feature> The full path of the directory in which to search for network plugins
|
||||
--node-status-update-frequency=10s: Specifies how often kubelet posts node status to master. Note: be cautious when changing the constant, it must work with nodeMonitorGracePeriod in nodecontroller. Default: 10s
|
||||
--oom-score-adj=-999: The oom-score-adj value for kubelet process. Values must be within the range [-1000, 1000]
|
||||
--pod-cidr="": The CIDR to use for pod IP addresses, only used in standalone mode. In cluster mode, this is obtained from the master.
|
||||
--pod-infra-container-image="gcr.io/google_containers/pause:0.8.0": The image whose network/ipc namespaces containers in each pod will use.
|
||||
--port=10250: The port for the Kubelet to serve on. Note that "kubectl logs" will not work if you set this flag.
|
||||
--read-only-port=10255: The read-only port for the Kubelet to serve on (set to 0 to disable)
|
||||
--really-crash-for-testing[=false]: If true, when panics occur crash. Intended for testing.
|
||||
--register-node[=true]: Register the node with the apiserver (defaults to true if --api-servers is set)
|
||||
--registry-burst=10: Maximum size of a bursty pulls, temporarily allows pulls to burst to this number, while still not exceeding registry-qps. Only used if --registry-qps > 0
|
||||
--registry-qps=0: If > 0, limit registry pull QPS to this value. If 0, unlimited. [default=0.0]
|
||||
--resolv-conf="/etc/resolv.conf": Resolver configuration file used as the basis for the container DNS resolution configuration.
|
||||
--resource-container="/kubelet": Absolute name of the resource-only container to create and run the Kubelet in (Default: /kubelet).
|
||||
--rkt-path="": Path of rkt binary. Leave empty to use the first rkt in $PATH. Only used if --container-runtime='rkt'
|
||||
--rkt-stage1-image="": image to use as stage1. Local paths and http/https URLs are supported. If empty, the 'stage1.aci' in the same directory as '--rkt-path' will be used
|
||||
--root-dir="/var/lib/kubelet": Directory path for managing kubelet files (volume mounts,etc).
|
||||
--runonce[=false]: If true, exit after spawning pods from local manifests or remote urls. Exclusive with --api-servers, and --enable-server
|
||||
--serialize-image-pulls[=true]: Pull images one at a time. We recommend *not* changing the default value on nodes that run docker daemon with version < 1.9 or an Aufs storage backend. Issue #10959 has more details. [default=true]
|
||||
--streaming-connection-idle-timeout=0: Maximum time a streaming connection can be idle before the connection is automatically closed. Example: '5m'
|
||||
--sync-frequency=10s: Max period between synchronizing running containers and config
|
||||
--system-container="": Optional resource-only container in which to place all non-kernel processes that are not already in a container. Empty for no container. Rolling back the flag requires a reboot. (Default: "").
|
||||
--tls-cert-file="": File containing x509 Certificate for HTTPS. (CA cert, if any, concatenated after server cert). If --tls-cert-file and --tls-private-key-file are not provided, a self-signed certificate and key are generated for the public address and saved to the directory passed to --cert-dir.
|
||||
--tls-private-key-file="": File containing x509 private key matching --tls-cert-file.
|
||||
{% endraw %}
|
||||
```
|
||||
|
||||
###### Auto generated by spf13/cobra at 2015-10-29 20:12:15.480131233 +0000 UTC
|
||||
|
||||
|
||||
|
||||
|
||||
<!-- BEGIN MUNGE: IS_VERSIONED -->
|
||||
<!-- TAG IS_VERSIONED -->
|
||||
<!-- END MUNGE: IS_VERSIONED -->
|
||||
|
||||
|
||||
<!-- BEGIN MUNGE: GENERATED_ANALYTICS -->
|
||||
[]()
|
||||
<!-- END MUNGE: GENERATED_ANALYTICS -->
|
||||
|
|
@ -0,0 +1,22 @@
|
|||
---
|
||||
layout: docwithnav
|
||||
title: "api-reference/extensions/v1beta1/definitions"
|
||||
---
|
||||
<!-- BEGIN MUNGE: UNVERSIONED_WARNING -->
|
||||
|
||||
|
||||
<!-- END MUNGE: UNVERSIONED_WARNING -->
|
||||
<!-- needed for gh-pages to render html files when imported -->
|
||||
{% include v1.1/extensions-v1beta1-definitions.html %}
|
||||
|
||||
<!-- BEGIN MUNGE: IS_VERSIONED -->
|
||||
<!-- TAG IS_VERSIONED -->
|
||||
<!-- END MUNGE: IS_VERSIONED -->
|
||||
|
||||
|
||||
|
||||
|
||||
<!-- BEGIN MUNGE: GENERATED_ANALYTICS -->
|
||||
[]()
|
||||
<!-- END MUNGE: GENERATED_ANALYTICS -->
|
||||
|
|
@ -0,0 +1,23 @@
|
|||
---
|
||||
layout: docwithnav
|
||||
title: "api-reference/extensions/v1beta1/operations"
|
||||
---
|
||||
<!-- BEGIN MUNGE: UNVERSIONED_WARNING -->
|
||||
|
||||
|
||||
<!-- END MUNGE: UNVERSIONED_WARNING -->
|
||||
<!-- needed for gh-pages to render html files when imported -->
|
||||
{% include v1.1/extensions-v1beta1-operations.html %}
|
||||
|
||||
|
||||
<!-- BEGIN MUNGE: IS_VERSIONED -->
|
||||
<!-- TAG IS_VERSIONED -->
|
||||
<!-- END MUNGE: IS_VERSIONED -->
|
||||
|
||||
|
||||
|
||||
|
||||
<!-- BEGIN MUNGE: GENERATED_ANALYTICS -->
|
||||
[]()
|
||||
<!-- END MUNGE: GENERATED_ANALYTICS -->
|
||||
|
|
@ -0,0 +1,25 @@
|
|||
---
|
||||
layout: docwithnav
|
||||
title: "api-reference/v1/definitions"
|
||||
---
|
||||
<!-- BEGIN MUNGE: UNVERSIONED_WARNING -->
|
||||
|
||||
|
||||
<!-- END MUNGE: UNVERSIONED_WARNING -->
|
||||
<!-- needed for gh-pages to render html files when imported -->
|
||||
{% include v1.1/v1-definitions.html %}
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
<!-- BEGIN MUNGE: IS_VERSIONED -->
|
||||
<!-- TAG IS_VERSIONED -->
|
||||
<!-- END MUNGE: IS_VERSIONED -->
|
||||
|
||||
|
||||
<!-- BEGIN MUNGE: GENERATED_ANALYTICS -->
|
||||
[]()
|
||||
<!-- END MUNGE: GENERATED_ANALYTICS -->
|
||||
|
|
@ -0,0 +1,25 @@
|
|||
---
|
||||
layout: docwithnav
|
||||
title: "api-reference/v1/operations"
|
||||
---
|
||||
<!-- BEGIN MUNGE: UNVERSIONED_WARNING -->
|
||||
|
||||
|
||||
<!-- END MUNGE: UNVERSIONED_WARNING -->
|
||||
<!-- needed for gh-pages to render html files when imported -->
|
||||
{% include v1.1/v1-operations.html %}
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
<!-- BEGIN MUNGE: IS_VERSIONED -->
|
||||
<!-- TAG IS_VERSIONED -->
|
||||
<!-- END MUNGE: IS_VERSIONED -->
|
||||
|
||||
|
||||
<!-- BEGIN MUNGE: GENERATED_ANALYTICS -->
|
||||
[]()
|
||||
<!-- END MUNGE: GENERATED_ANALYTICS -->
|
||||
|
Binary file not shown.
After Width: | Height: | Size: 30 KiB |
|
@ -0,0 +1,136 @@
|
|||
---
|
||||
layout: docwithnav
|
||||
title: "Supporting multiple API groups"
|
||||
---
|
||||
<!-- BEGIN MUNGE: UNVERSIONED_WARNING -->
|
||||
|
||||
|
||||
<!-- END MUNGE: UNVERSIONED_WARNING -->
|
||||
|
||||
# Supporting multiple API groups
|
||||
|
||||
## Goal
|
||||
|
||||
1. Breaking the monolithic v1 API into modular groups and allowing groups to be enabled/disabled individually. This allows us to break the monolithic API server to smaller components in the future.
|
||||
|
||||
2. Supporting different versions in different groups. This allows different groups to evolve at different speed.
|
||||
|
||||
3. Supporting identically named kinds to exist in different groups. This is useful when we experiment new features of an API in the experimental group while supporting the stable API in the original group at the same time.
|
||||
|
||||
4. Exposing the API groups and versions supported by the server. This is required to develop a dynamic client.
|
||||
|
||||
5. Laying the basis for [API Plugin](../../docs/design/extending-api.html).
|
||||
|
||||
6. Keeping the user interaction easy. For example, we should allow users to omit group name when using kubectl if there is no ambiguity.
|
||||
|
||||
|
||||
## Bookkeeping for groups
|
||||
|
||||
1. No changes to TypeMeta:
|
||||
|
||||
Currently many internal structures, such as RESTMapper and Scheme, are indexed and retrieved by APIVersion. For a fast implementation targeting the v1.1 deadline, we will concatenate group with version, in the form of "group/version", and use it where a version string is expected, so that many code can be reused. This implies we will not add a new field to TypeMeta, we will use TypeMeta.APIVersion to hold "group/version".
|
||||
|
||||
For backward compatibility, v1 objects belong to the group with an empty name, so existing v1 config files will remain valid.
|
||||
|
||||
2. /pkg/conversion#Scheme:
|
||||
|
||||
The key of /pkg/conversion#Scheme.versionMap for versioned types will be "group/version". For now, the internal version types of all groups will be registered to versionMap[""], as we don't have any identically named kinds in different groups yet. In the near future, internal version types will be registered to versionMap["group/"], and pkg/conversion#Scheme.InternalVersion will have type []string.
|
||||
|
||||
We will need a mechanism to express if two kinds in different groups (e.g., compute/pods and experimental/pods) are convertible, and auto-generate the conversions if they are.
|
||||
|
||||
3. meta.RESTMapper:
|
||||
|
||||
Each group will have its own RESTMapper (of type DefaultRESTMapper), and these mappers will be registered to pkg/api#RESTMapper (of type MultiRESTMapper).
|
||||
|
||||
To support identically named kinds in different groups, We need to expand the input of RESTMapper.VersionAndKindForResource from (resource string) to (group, resource string). If group is not specified and there is ambiguity (i.e., the resource exists in multiple groups), an error should be returned to force the user to specify the group.
|
||||
|
||||
## Server-side implementation
|
||||
|
||||
1. resource handlers' URL:
|
||||
|
||||
We will force the URL to be in the form of prefix/group/version/...
|
||||
|
||||
Prefix is used to differentiate API paths from other paths like /healthz. All groups will use the same prefix="apis", except when backward compatibility requires otherwise. No "/" is allowed in prefix, group, or version. Specifically,
|
||||
|
||||
* for /api/v1, we set the prefix="api" (which is populated from cmd/kube-apiserver/app#APIServer.APIPrefix), group="", version="v1", so the URL remains to be /api/v1.
|
||||
|
||||
* for new kube API groups, we will set the prefix="apis" (we will add a field in type APIServer to hold this prefix), group=GROUP_NAME, version=VERSION. For example, the URL of the experimental resources will be /apis/experimental/v1alpha1.
|
||||
|
||||
* for OpenShift v1 API, because it's currently registered at /oapi/v1, to be backward compatible, OpenShift may set prefix="oapi", group="".
|
||||
|
||||
* for other new third-party API, they should also use the prefix="apis" and choose the group and version. This can be done through the thirdparty API plugin mechanism in [13000](http://pr.k8s.io/13000).
|
||||
|
||||
2. supporting API discovery:
|
||||
|
||||
* At /prefix (e.g., /apis), API server will return the supported groups and their versions using pkg/api/unversioned#APIVersions type, setting the Versions field to "group/version". This is backward compatible, because currently API server does return "v1" encoded in pkg/api/unversioned#APIVersions at /api. (We will also rename the JSON field name from `versions` to `apiVersions`, to be consistent with pkg/api#TypeMeta.APIVersion field)
|
||||
|
||||
* At /prefix/group, API server will return all supported versions of the group. We will create a new type VersionList (name is open to discussion) in pkg/api/unversioned as the API.
|
||||
|
||||
* At /prefix/group/version, API server will return all supported resources in this group, and whether each resource is namespaced. We will create a new type APIResourceList (name is open to discussion) in pkg/api/unversioned as the API.
|
||||
|
||||
We will design how to handle deeper path in other proposals.
|
||||
|
||||
* At /swaggerapi/swagger-version/prefix/group/version, API server will return the Swagger spec of that group/version in `swagger-version` (e.g. we may support both Swagger v1.2 and v2.0).
|
||||
|
||||
3. handling common API objects:
|
||||
|
||||
* top-level common API objects:
|
||||
|
||||
To handle the top-level API objects that are used by all groups, we either have to register them to all schemes, or we can choose not to encode them to a version. We plan to take the latter approach and place such types in a new package called `unversioned`, because many of the common top-level objects, such as APIVersions, VersionList, and APIResourceList, which are used in the API discovery, and pkg/api#Status, are part of the protocol between client and server, and do not belong to the domain-specific parts of the API, which will evolve independently over time.
|
||||
|
||||
Types in the unversioned package will not have the APIVersion field, but may retain the Kind field.
|
||||
|
||||
For backward compatibility, when hanlding the Status, the server will encode it to v1 if the client expects the Status to be encoded in v1, otherwise the server will send the unversioned#Status. If an error occurs before the version can be determined, the server will send the unversioned#Status.
|
||||
|
||||
* non-top-level common API objects:
|
||||
|
||||
Assuming object o belonging to group X is used as a field in an object belonging to group Y, currently genconversion will generate the conversion functions for o in package Y. Hence, we don't need any special treatment for non-top-level common API objects.
|
||||
|
||||
TypeMeta is an exception, because it is a common object that is used by objects in all groups but does not logically belong to any group. We plan to move it to the package `unversioned`.
|
||||
|
||||
## Client-side implementation
|
||||
|
||||
1. clients:
|
||||
|
||||
Currently we have structured (pkg/client/unversioned#ExperimentalClient, pkg/client/unversioned#Client) and unstructured (pkg/kubectl/resource#Helper) clients. The structured clients are not scalable because each of them implements specific interface, e.g., [here](https://releases.k8s.io/release-1.1/pkg/client/unversioned/client.go#L32). Only the unstructured clients are scalable. We should either auto-generate the code for structured clients or migrate to use the unstructured clients as much as possible.
|
||||
|
||||
We should also move the unstructured client to pkg/client/.
|
||||
|
||||
2. Spelling the URL:
|
||||
|
||||
The URL is in the form of prefix/group/version/. The prefix is hard-coded in the client/unversioned.Config. The client should be able to figure out `group` and `version` using the RESTMapper. For a third-party client which does not have access to the RESTMapper, it should discover the mapping of `group`, `version` and `kind` by querying the server as described in point 2 of #server-side-implementation.
|
||||
|
||||
3. kubectl:
|
||||
|
||||
kubectl should accept arguments like `group/resource`, `group/resource/name`. Nevertheless, the user can omit the `group`, then kubectl shall rely on RESTMapper.VersionAndKindForResource() to figure out the default group/version of the resource. For example, for resources (like `node`) that exist in both k8s v1 API and k8s modularized API (like `infra/v2`), we should set kubectl default to use one of them. If there is no default group, kubectl should return an error for the ambiguity.
|
||||
|
||||
When kubectl is used with a single resource type, the --api-version and --output-version flag of kubectl should accept values in the form of `group/version`, and they should work as they do today. For multi-resource operations, we will disable these two flags initially.
|
||||
|
||||
Currently, by setting pkg/client/unversioned/clientcmd/api/v1#Config.NamedCluster[x].Cluster.APIVersion ([here](https://releases.k8s.io/release-1.1/pkg/client/unversioned/clientcmd/api/v1/types.go#L58)), user can configure the default apiVersion used by kubectl to talk to server. It does not make sense to set a global version used by kubectl when there are multiple groups, so we plan to deprecate this field. We may extend the version negotiation function to negotiate the preferred version of each group. Details will be in another proposal.
|
||||
|
||||
## OpenShift integration
|
||||
|
||||
OpenShift can take a similar approach to break monolithic v1 API: keeping the v1 where they are, and gradually adding groups.
|
||||
|
||||
For the v1 objects in OpenShift, they should keep doing what they do now: they should remain registered to Scheme.versionMap["v1"] scheme, they should keep being added to originMapper.
|
||||
|
||||
For new OpenShift groups, they should do the same as native Kubernetes groups would do: each group should register to Scheme.versionMap["group/version"], each should has separate RESTMapper and the register the MultiRESTMapper.
|
||||
|
||||
To expose a list of the supported Openshift groups to clients, OpenShift just has to call to pkg/cmd/server/origin#call initAPIVersionRoute() as it does now, passing in the supported "group/versions" instead of "versions".
|
||||
|
||||
|
||||
## Future work
|
||||
|
||||
1. Dependencies between groups: we need an interface to register the dependencies between groups. It is not our priority now as the use cases are not clear yet.
|
||||
|
||||
|
||||
|
||||
<!-- BEGIN MUNGE: IS_VERSIONED -->
|
||||
<!-- TAG IS_VERSIONED -->
|
||||
<!-- END MUNGE: IS_VERSIONED -->
|
||||
|
||||
|
||||
<!-- BEGIN MUNGE: GENERATED_ANALYTICS -->
|
||||
[]()
|
||||
<!-- END MUNGE: GENERATED_ANALYTICS -->
|
||||
|
|
@ -0,0 +1,162 @@
|
|||
---
|
||||
layout: docwithnav
|
||||
title: "Abstract"
|
||||
---
|
||||
<!-- BEGIN MUNGE: UNVERSIONED_WARNING -->
|
||||
|
||||
|
||||
<!-- END MUNGE: UNVERSIONED_WARNING -->
|
||||
|
||||
## Abstract
|
||||
|
||||
In the current system, all watch requests send to apiserver are in general
|
||||
redirected to etcd. This means that for every watch request to apiserver,
|
||||
apiserver opens a watch on etcd.
|
||||
|
||||
The purpose of the proposal is to improve the overall performance of the system
|
||||
by solving the following problems:
|
||||
|
||||
- having too many open watches on etcd
|
||||
- avoiding deserializing/converting the same objects multiple times in different
|
||||
watch results
|
||||
|
||||
In the future, we would also like to add an indexing mechanism to the watch.
|
||||
Although Indexer is not part of this proposal, it is supposed to be compatible
|
||||
with it - in the future Indexer should be incorporated into the proposed new
|
||||
watch solution in apiserver without requiring any redesign.
|
||||
|
||||
|
||||
## High level design
|
||||
|
||||
We are going to solve those problems by allowing many clients to watch the same
|
||||
storage in the apiserver, without being redirected to etcd.
|
||||
|
||||
At the high level, apiserver will have a single watch open to etcd, watching all
|
||||
the objects (of a given type) without any filtering. The changes delivered from
|
||||
etcd will then be stored in a cache in apiserver. This cache is in fact a
|
||||
"rolling history window" that will support clients having some amount of latency
|
||||
between their list and watch calls. Thus it will have a limited capacity and
|
||||
whenever a new change comes from etcd when a cache is full, the oldest change
|
||||
will be remove to make place for the new one.
|
||||
|
||||
When a client sends a watch request to apiserver, instead of redirecting it to
|
||||
etcd, it will cause:
|
||||
|
||||
- registering a handler to receive all new changes coming from etcd
|
||||
- iterating though a watch window, starting at the requested resourceVersion
|
||||
to the head and sending filtered changes directory to the client, blocking
|
||||
the above until this iteration has caught up
|
||||
|
||||
This will be done be creating a go-routine per watcher that will be responsible
|
||||
for performing the above.
|
||||
|
||||
The following section describes the proposal in more details, analyzes some
|
||||
corner cases and divides the whole design in more fine-grained steps.
|
||||
|
||||
|
||||
## Proposal details
|
||||
|
||||
We would like the cache to be __per-resource-type__ and __optional__. Thanks to
|
||||
it we will be able to:
|
||||
- have different cache sizes for different resources (e.g. bigger cache
|
||||
[= longer history] for pods, which can significantly affect performance)
|
||||
- avoid any overhead for objects that are watched very rarely (e.g. events
|
||||
are almost not watched at all, but there are a lot of them)
|
||||
- filter the cache for each watcher more effectively
|
||||
|
||||
If we decide to support watches spanning different resources in the future and
|
||||
we have an efficient indexing mechanisms, it should be relatively simple to unify
|
||||
the cache to be common for all the resources.
|
||||
|
||||
The rest of this section describes the concrete steps that need to be done
|
||||
to implement the proposal.
|
||||
|
||||
1. Since we want the watch in apiserver to be optional for different resource
|
||||
types, this needs to be self-contained and hidden behind a well defined API.
|
||||
This should be a layer very close to etcd - in particular all registries:
|
||||
"pkg/registry/generic/etcd" should be build on top of it.
|
||||
We will solve it by turning tools.EtcdHelper by extracting its interface
|
||||
and treating this interface as this API - the whole watch mechanisms in
|
||||
apiserver will be hidden behind that interface.
|
||||
Thanks to it we will get an initial implementation for free and we will just
|
||||
need to reimplement few relevant functions (probably just Watch and List).
|
||||
Mover, this will not require any changes in other parts of the code.
|
||||
This step is about extracting the interface of tools.EtcdHelper.
|
||||
|
||||
2. Create a FIFO cache with a given capacity. In its "rolling history window"
|
||||
we will store two things:
|
||||
|
||||
- the resourceVersion of the object (being an etcdIndex)
|
||||
- the object watched from etcd itself (in a deserialized form)
|
||||
|
||||
This should be as simple as having an array an treating it as a cyclic buffer.
|
||||
Obviously resourceVersion of objects watched from etcd will be increasing, but
|
||||
they are necessary for registering a new watcher that is interested in all the
|
||||
changes since a given etcdIndex.
|
||||
|
||||
Additionally, we should support LIST operation, otherwise clients can never
|
||||
start watching at now. We may consider passing lists through etcd, however
|
||||
this will not work once we have Indexer, so we will need that information
|
||||
in memory anyway.
|
||||
Thus, we should support LIST operation from the "end of the history" - i.e.
|
||||
from the moment just after the newest cached watched event. It should be
|
||||
pretty simple to do, because we can incrementally update this list whenever
|
||||
the new watch event is watched from etcd.
|
||||
We may consider reusing existing structures cache.Store or cache.Indexer
|
||||
("pkg/client/cache") but this is not a hard requirement.
|
||||
|
||||
3. Create the new implementation of the API, that will internally have a
|
||||
single watch open to etcd and will store the data received from etcd in
|
||||
the FIFO cache - this includes implementing registration of a new watcher
|
||||
which will start a new go-routine responsible for iterating over the cache
|
||||
and sending all the objects watcher is interested in (by applying filtering
|
||||
function) to the watcher.
|
||||
|
||||
4. Add a support for processing "error too old" from etcd, which will require:
|
||||
- disconnect all the watchers
|
||||
- clear the internal cache and relist all objects from etcd
|
||||
- start accepting watchers again
|
||||
|
||||
5. Enable watch in apiserver for some of the existing resource types - this
|
||||
should require only changes at the initialization level.
|
||||
|
||||
6. The next step will be to incorporate some indexing mechanism, but details
|
||||
of it are TBD.
|
||||
|
||||
|
||||
|
||||
### Future optimizations:
|
||||
|
||||
1. The implementation of watch in apiserver internally will open a single
|
||||
watch to etcd, responsible for watching all the changes of objects of a given
|
||||
resource type. However, this watch can potentially expire at any time and
|
||||
reconnecting can return "too old resource version". In that case relisting is
|
||||
necessary. In such case, to avoid LIST requests coming from all watchers at
|
||||
the same time, we can introduce an additional etcd event type:
|
||||
[EtcdResync](https://releases.k8s.io/release-1.1/pkg/storage/etcd/etcd_watcher.go#L36)
|
||||
|
||||
Whenever relisting will be done to refresh the internal watch to etcd,
|
||||
EtcdResync event will be send to all the watchers. It will contain the
|
||||
full list of all the objects the watcher is interested in (appropriately
|
||||
filtered) as the parameter of this watch event.
|
||||
Thus, we need to create the EtcdResync event, extend watch.Interface and
|
||||
its implementations to support it and handle those events appropriately
|
||||
in places like
|
||||
[Reflector](https://releases.k8s.io/release-1.1/pkg/client/cache/reflector.go)
|
||||
|
||||
However, this might turn out to be unnecessary optimization if apiserver
|
||||
will always keep up (which is possible in the new design). We will work
|
||||
out all necessary details at that point.
|
||||
|
||||
|
||||
|
||||
|
||||
<!-- BEGIN MUNGE: IS_VERSIONED -->
|
||||
<!-- TAG IS_VERSIONED -->
|
||||
<!-- END MUNGE: IS_VERSIONED -->
|
||||
|
||||
|
||||
<!-- BEGIN MUNGE: GENERATED_ANALYTICS -->
|
||||
[]()
|
||||
<!-- END MUNGE: GENERATED_ANALYTICS -->
|
||||
|
|
@ -0,0 +1,288 @@
|
|||
---
|
||||
layout: docwithnav
|
||||
title: "WARNING:"
|
||||
---
|
||||
<!-- BEGIN MUNGE: UNVERSIONED_WARNING -->
|
||||
|
||||
|
||||
<!-- END MUNGE: UNVERSIONED_WARNING -->
|
||||
|
||||
---
|
||||
|
||||
# WARNING:
|
||||
|
||||
## This document is outdated. It is superseded by [the horizontal pod autoscaler design doc](../design/horizontal-pod-autoscaler.html).
|
||||
|
||||
---
|
||||
|
||||
## Abstract
|
||||
|
||||
Auto-scaling is a data-driven feature that allows users to increase or decrease capacity as needed by controlling the
|
||||
number of pods deployed within the system automatically.
|
||||
|
||||
## Motivation
|
||||
|
||||
Applications experience peaks and valleys in usage. In order to respond to increases and decreases in load, administrators
|
||||
scale their applications by adding computing resources. In the cloud computing environment this can be
|
||||
done automatically based on statistical analysis and thresholds.
|
||||
|
||||
### Goals
|
||||
|
||||
* Provide a concrete proposal for implementing auto-scaling pods within Kubernetes
|
||||
* Implementation proposal should be in line with current discussions in existing issues:
|
||||
* Scale verb - [1629](http://issue.k8s.io/1629)
|
||||
* Config conflicts - [Config](https://github.com/kubernetes/kubernetes/blob/c7cb991987193d4ca33544137a5cb7d0292cf7df/docs/config.md#automated-re-configuration-processes)
|
||||
* Rolling updates - [1353](http://issue.k8s.io/1353)
|
||||
* Multiple scalable types - [1624](http://issue.k8s.io/1624)
|
||||
|
||||
## Constraints and Assumptions
|
||||
|
||||
* This proposal is for horizontal scaling only. Vertical scaling will be handled in [issue 2072](http://issue.k8s.io/2072)
|
||||
* `ReplicationControllers` will not know about the auto-scaler, they are the target of the auto-scaler. The `ReplicationController` responsibilities are
|
||||
constrained to only ensuring that the desired number of pods are operational per the [Replication Controller Design](../user-guide/replication-controller.html#responsibilities-of-the-replication-controller)
|
||||
* Auto-scalers will be loosely coupled with data gathering components in order to allow a wide variety of input sources
|
||||
* Auto-scalable resources will support a scale verb ([1629](http://issue.k8s.io/1629))
|
||||
such that the auto-scaler does not directly manipulate the underlying resource.
|
||||
* Initially, most thresholds will be set by application administrators. It should be possible for an autoscaler to be
|
||||
written later that sets thresholds automatically based on past behavior (CPU used vs incoming requests).
|
||||
* The auto-scaler must be aware of user defined actions so it does not override them unintentionally (for instance someone
|
||||
explicitly setting the replica count to 0 should mean that the auto-scaler does not try to scale the application up)
|
||||
* It should be possible to write and deploy a custom auto-scaler without modifying existing auto-scalers
|
||||
* Auto-scalers must be able to monitor multiple replication controllers while only targeting a single scalable
|
||||
object (for now a ReplicationController, but in the future it could be a job or any resource that implements scale)
|
||||
|
||||
## Use Cases
|
||||
|
||||
### Scaling based on traffic
|
||||
|
||||
The current, most obvious, use case is scaling an application based on network traffic like requests per second. Most
|
||||
applications will expose one or more network endpoints for clients to connect to. Many of those endpoints will be load
|
||||
balanced or situated behind a proxy - the data from those proxies and load balancers can be used to estimate client to
|
||||
server traffic for applications. This is the primary, but not sole, source of data for making decisions.
|
||||
|
||||
Within Kubernetes a [kube proxy](../user-guide/services.html#ips-and-vips)
|
||||
running on each node directs service requests to the underlying implementation.
|
||||
|
||||
While the proxy provides internal inter-pod connections, there will be L3 and L7 proxies and load balancers that manage
|
||||
traffic to backends. OpenShift, for instance, adds a "route" resource for defining external to internal traffic flow.
|
||||
The "routers" are HAProxy or Apache load balancers that aggregate many different services and pods and can serve as a
|
||||
data source for the number of backends.
|
||||
|
||||
### Scaling based on predictive analysis
|
||||
|
||||
Scaling may also occur based on predictions of system state like anticipated load, historical data, etc. Hand in hand
|
||||
with scaling based on traffic, predictive analysis may be used to determine anticipated system load and scale the application automatically.
|
||||
|
||||
### Scaling based on arbitrary data
|
||||
|
||||
Administrators may wish to scale the application based on any number of arbitrary data points such as job execution time or
|
||||
duration of active sessions. There are any number of reasons an administrator may wish to increase or decrease capacity which
|
||||
means the auto-scaler must be a configurable, extensible component.
|
||||
|
||||
## Specification
|
||||
|
||||
In order to facilitate talking about auto-scaling the following definitions are used:
|
||||
|
||||
* `ReplicationController` - the first building block of auto scaling. Pods are deployed and scaled by a `ReplicationController`.
|
||||
* kube proxy - The proxy handles internal inter-pod traffic, an example of a data source to drive an auto-scaler
|
||||
* L3/L7 proxies - A routing layer handling outside to inside traffic requests, an example of a data source to drive an auto-scaler
|
||||
* auto-scaler - scales replicas up and down by using the `scale` endpoint provided by scalable resources (`ReplicationController`)
|
||||
|
||||
|
||||
### Auto-Scaler
|
||||
|
||||
The Auto-Scaler is a state reconciler responsible for checking data against configured scaling thresholds
|
||||
and calling the `scale` endpoint to change the number of replicas. The scaler will
|
||||
use a client/cache implementation to receive watch data from the data aggregators and respond to them by
|
||||
scaling the application. Auto-scalers are created and defined like other resources via REST endpoints and belong to the
|
||||
namespace just as a `ReplicationController` or `Service`.
|
||||
|
||||
Since an auto-scaler is a durable object it is best represented as a resource.
|
||||
|
||||
{% highlight go %}
|
||||
{% raw %}
|
||||
//The auto scaler interface
|
||||
type AutoScalerInterface interface {
|
||||
//ScaleApplication adjusts a resource's replica count. Calls scale endpoint.
|
||||
//Args to this are based on what the endpoint
|
||||
//can support. See http://issue.k8s.io/1629
|
||||
ScaleApplication(num int) error
|
||||
}
|
||||
|
||||
type AutoScaler struct {
|
||||
//common construct
|
||||
TypeMeta
|
||||
//common construct
|
||||
ObjectMeta
|
||||
|
||||
//Spec defines the configuration options that drive the behavior for this auto-scaler
|
||||
Spec AutoScalerSpec
|
||||
|
||||
//Status defines the current status of this auto-scaler.
|
||||
Status AutoScalerStatus
|
||||
}
|
||||
|
||||
type AutoScalerSpec struct {
|
||||
//AutoScaleThresholds holds a collection of AutoScaleThresholds that drive the auto scaler
|
||||
AutoScaleThresholds []AutoScaleThreshold
|
||||
|
||||
//Enabled turns auto scaling on or off
|
||||
Enabled boolean
|
||||
|
||||
//MaxAutoScaleCount defines the max replicas that the auto scaler can use.
|
||||
//This value must be greater than 0 and >= MinAutoScaleCount
|
||||
MaxAutoScaleCount int
|
||||
|
||||
//MinAutoScaleCount defines the minimum number replicas that the auto scaler can reduce to,
|
||||
//0 means that the application is allowed to idle
|
||||
MinAutoScaleCount int
|
||||
|
||||
//TargetSelector provides the scalable target(s). Right now this is a ReplicationController
|
||||
//in the future it could be a job or any resource that implements scale.
|
||||
TargetSelector map[string]string
|
||||
|
||||
//MonitorSelector defines a set of capacity that the auto-scaler is monitoring
|
||||
//(replication controllers). Monitored objects are used by thresholds to examine
|
||||
//statistics. Example: get statistic X for object Y to see if threshold is passed
|
||||
MonitorSelector map[string]string
|
||||
}
|
||||
|
||||
type AutoScalerStatus struct {
|
||||
// TODO: open for discussion on what meaningful information can be reported in the status
|
||||
// The status may return the replica count here but we may want more information
|
||||
// such as if the count reflects a threshold being passed
|
||||
}
|
||||
|
||||
|
||||
//AutoScaleThresholdInterface abstracts the data analysis from the auto-scaler
|
||||
//example: scale by 1 (Increment) when RequestsPerSecond (Type) pass
|
||||
//comparison (Comparison) of 50 (Value) for 30 seconds (Duration)
|
||||
type AutoScaleThresholdInterface interface {
|
||||
//called by the auto-scaler to determine if this threshold is met or not
|
||||
ShouldScale() boolean
|
||||
}
|
||||
|
||||
|
||||
//AutoScaleThreshold is a single statistic used to drive the auto-scaler in scaling decisions
|
||||
type AutoScaleThreshold struct {
|
||||
// Type is the type of threshold being used, intention or value
|
||||
Type AutoScaleThresholdType
|
||||
|
||||
// ValueConfig holds the config for value based thresholds
|
||||
ValueConfig AutoScaleValueThresholdConfig
|
||||
|
||||
// IntentionConfig holds the config for intention based thresholds
|
||||
IntentionConfig AutoScaleIntentionThresholdConfig
|
||||
}
|
||||
|
||||
// AutoScaleIntentionThresholdConfig holds configuration for intention based thresholds
|
||||
// a intention based threshold defines no increment, the scaler will adjust by 1 accordingly
|
||||
// and maintain once the intention is reached. Also, no selector is defined, the intention
|
||||
// should dictate the selector used for statistics. Same for duration although we
|
||||
// may want a configurable duration later so intentions are more customizable.
|
||||
type AutoScaleIntentionThresholdConfig struct {
|
||||
// Intent is the lexicon of what intention is requested
|
||||
Intent AutoScaleIntentionType
|
||||
|
||||
// Value is intention dependent in terms of above, below, equal and represents
|
||||
// the value to check against
|
||||
Value float
|
||||
}
|
||||
|
||||
// AutoScaleValueThresholdConfig holds configuration for value based thresholds
|
||||
type AutoScaleValueThresholdConfig struct {
|
||||
//Increment determines how the auot-scaler should scale up or down (positive number to
|
||||
//scale up based on this threshold negative number to scale down by this threshold)
|
||||
Increment int
|
||||
//Selector represents the retrieval mechanism for a statistic value from statistics
|
||||
//storage. Once statistics are better defined the retrieval mechanism may change.
|
||||
//Ultimately, the selector returns a representation of a statistic that can be
|
||||
//compared against the threshold value.
|
||||
Selector map[string]string
|
||||
//Duration is the time lapse after which this threshold is considered passed
|
||||
Duration time.Duration
|
||||
//Value is the number at which, after the duration is passed, this threshold is considered
|
||||
//to be triggered
|
||||
Value float
|
||||
//Comparison component to be applied to the value.
|
||||
Comparison string
|
||||
}
|
||||
|
||||
// AutoScaleThresholdType is either intention based or value based
|
||||
type AutoScaleThresholdType string
|
||||
|
||||
// AutoScaleIntentionType is a lexicon for intentions such as "cpu-utilization",
|
||||
// "max-rps-per-endpoint"
|
||||
type AutoScaleIntentionType string
|
||||
{% endraw %}
|
||||
{% endhighlight %}
|
||||
|
||||
#### Boundary Definitions
|
||||
|
||||
The `AutoScaleThreshold` definitions provide the boundaries for the auto-scaler. By defining comparisons that form a range
|
||||
along with positive and negative increments you may define bi-directional scaling. For example the upper bound may be
|
||||
specified as "when requests per second rise above 50 for 30 seconds scale the application up by 1" and a lower bound may
|
||||
be specified as "when requests per second fall below 25 for 30 seconds scale the application down by 1 (implemented by using -1)".
|
||||
|
||||
### Data Aggregator
|
||||
|
||||
This section has intentionally been left empty. I will defer to folks who have more experience gathering and analyzing
|
||||
time series statistics.
|
||||
|
||||
Data aggregation is opaque to the auto-scaler resource. The auto-scaler is configured to use `AutoScaleThresholds`
|
||||
that know how to work with the underlying data in order to know if an application must be scaled up or down. Data aggregation
|
||||
must feed a common data structure to ease the development of `AutoScaleThreshold`s but it does not matter to the
|
||||
auto-scaler whether this occurs in a push or pull implementation, whether or not the data is stored at a granular level,
|
||||
or what algorithm is used to determine the final statistics value. Ultimately, the auto-scaler only requires that a statistic
|
||||
resolves to a value that can be checked against a configured threshold.
|
||||
|
||||
Of note: If the statistics gathering mechanisms can be initialized with a registry other components storing statistics can
|
||||
potentially piggyback on this registry.
|
||||
|
||||
### Multi-target Scaling Policy
|
||||
|
||||
If multiple scalable targets satisfy the `TargetSelector` criteria the auto-scaler should be configurable as to which
|
||||
target(s) are scaled. To begin with, if multiple targets are found the auto-scaler will scale the largest target up
|
||||
or down as appropriate. In the future this may be more configurable.
|
||||
|
||||
### Interactions with a deployment
|
||||
|
||||
In a deployment it is likely that multiple replication controllers must be monitored. For instance, in a [rolling deployment](../user-guide/replication-controller.html#rolling-updates)
|
||||
there will be multiple replication controllers, with one scaling up and another scaling down. This means that an
|
||||
auto-scaler must be aware of the entire set of capacity that backs a service so it does not fight with the deployer. `AutoScalerSpec.MonitorSelector`
|
||||
is what provides this ability. By using a selector that spans the entire service the auto-scaler can monitor capacity
|
||||
of multiple replication controllers and check that capacity against the `AutoScalerSpec.MaxAutoScaleCount` and
|
||||
`AutoScalerSpec.MinAutoScaleCount` while still only targeting a specific set of `ReplicationController`s with `TargetSelector`.
|
||||
|
||||
In the course of a deployment it is up to the deployment orchestration to decide how to manage the labels
|
||||
on the replication controllers if it needs to ensure that only specific replication controllers are targeted by
|
||||
the auto-scaler. By default, the auto-scaler will scale the largest replication controller that meets the target label
|
||||
selector criteria.
|
||||
|
||||
During deployment orchestration the auto-scaler may be making decisions to scale its target up or down. In order to prevent
|
||||
the scaler from fighting with a deployment process that is scaling one replication controller up and scaling another one
|
||||
down the deployment process must assume that the current replica count may be changed by objects other than itself and
|
||||
account for this in the scale up or down process. Therefore, the deployment process may no longer target an exact number
|
||||
of instances to be deployed. It must be satisfied that the replica count for the deployment meets or exceeds the number
|
||||
of requested instances.
|
||||
|
||||
Auto-scaling down in a deployment scenario is a special case. In order for the deployment to complete successfully the
|
||||
deployment orchestration must ensure that the desired number of instances that are supposed to be deployed has been met.
|
||||
If the auto-scaler is trying to scale the application down (due to no traffic, or other statistics) then the deployment
|
||||
process and auto-scaler are fighting to increase and decrease the count of the targeted replication controller. In order
|
||||
to prevent this, deployment orchestration should notify the auto-scaler that a deployment is occurring. This will
|
||||
temporarily disable negative decrement thresholds until the deployment process is completed. It is more important for
|
||||
an auto-scaler to be able to grow capacity during a deployment than to shrink the number of instances precisely.
|
||||
|
||||
|
||||
|
||||
|
||||
<!-- BEGIN MUNGE: IS_VERSIONED -->
|
||||
<!-- TAG IS_VERSIONED -->
|
||||
<!-- END MUNGE: IS_VERSIONED -->
|
||||
|
||||
|
||||
<!-- BEGIN MUNGE: GENERATED_ANALYTICS -->
|
||||
[]()
|
||||
<!-- END MUNGE: GENERATED_ANALYTICS -->
|
||||
|
|
@ -0,0 +1,165 @@
|
|||
---
|
||||
layout: docwithnav
|
||||
title: "Kubernetes compute resource metrics API"
|
||||
---
|
||||
<!-- BEGIN MUNGE: UNVERSIONED_WARNING -->
|
||||
|
||||
|
||||
<!-- END MUNGE: UNVERSIONED_WARNING -->
|
||||
|
||||
# Kubernetes compute resource metrics API
|
||||
|
||||
## Goals
|
||||
|
||||
Provide resource usage metrics on pods and nodes on the API server to be used
|
||||
by the scheduler to improve job placement, utilization, etc. and by end users
|
||||
to understand the resource utilization of their jobs. Horizontal and vertical
|
||||
auto-scaling are also near-term uses.
|
||||
|
||||
## Current state
|
||||
|
||||
Right now, the Kubelet exports container metrics via an API endpoint. This
|
||||
information is not gathered nor served by the Kubernetes API server.
|
||||
|
||||
## Use cases
|
||||
|
||||
The first user will be kubectl. The resource usage data can be shown to the
|
||||
user via a periodically refreshing interface similar to `top` on Unix-like
|
||||
systems. This info could let users assign resource limits more efficiently.
|
||||
|
||||
```
|
||||
{% raw %}
|
||||
$ kubectl top kubernetes-minion-abcd
|
||||
POD CPU MEM
|
||||
monitoring-heapster-abcde 0.12 cores 302 MB
|
||||
kube-ui-v1-nd7in 0.07 cores 130 MB
|
||||
{% endraw %}
|
||||
```
|
||||
|
||||
A second user will be the scheduler. To assign pods to nodes efficiently, the
|
||||
scheduler needs to know the current free resources on each node.
|
||||
|
||||
## Proposed endpoints
|
||||
|
||||
/api/v1/namespaces/myns/podMetrics/mypod
|
||||
/api/v1/nodeMetrics/myNode
|
||||
|
||||
The derived metrics include the mean, max and a few percentiles of the list of
|
||||
values.
|
||||
|
||||
We are not adding new methods to pods and nodes, e.g.
|
||||
`/api/v1/namespaces/myns/pods/mypod/metrics`, for a number of reasons. For
|
||||
example, having a separate endpoint allows fetching all the pod metrics in a
|
||||
single request. The rate of change of the data is also too high to include in
|
||||
the pod resource.
|
||||
|
||||
In the future, if any uses cases are found that would benefit from RC,
|
||||
namespace or service aggregation, metrics at those levels could also be
|
||||
exposed taking advantage of the fact that Heapster already does aggregation
|
||||
and metrics for them.
|
||||
|
||||
Initially, this proposal included raw metrics alongside the derived metrics.
|
||||
After revising the use cases, it was clear that raw metrics could be left out
|
||||
of this proposal. They can be dealt with in a separate proposal, exposing them
|
||||
in the Kubelet API via proper versioned endpoints for Heapster to poll
|
||||
periodically.
|
||||
|
||||
This also means that the amount of data pushed by each Kubelet to the API
|
||||
server will be much smaller.
|
||||
|
||||
## Data gathering
|
||||
|
||||
We will use a push based system. Each kubelet will periodically - every 10s -
|
||||
POST its derived metrics to the API server. Then, any users of the metrics can
|
||||
register as watchers to receive the new metrics when they are available.
|
||||
|
||||
Users of the metrics may also periodically poll the API server instead of
|
||||
registering as a watcher, having in mind that new data may only be available
|
||||
every 10 seconds. If any user requires metrics that are either more specific
|
||||
(e.g. last 1s) or updated more often, they should use the metrics pipeline via
|
||||
Heapster.
|
||||
|
||||
The API server will not hold any of this data directly. For our initial
|
||||
purposes, it will hold the most recent metrics obtained from each node in
|
||||
etcd. Then, when polled for metrics, the API server would only serve said most
|
||||
recent data per node.
|
||||
|
||||
Benchmarks will be run with etcd to see if it can keep up with the frequent
|
||||
writes of data. If it turns out that etcd doesn't scale well enough, we will
|
||||
have to switch to a different storage system.
|
||||
|
||||
If a pod gets deleted, the API server will get rid of any metrics it may
|
||||
currently be holding for it.
|
||||
|
||||
The clients watching the metrics data may cache it for longer periods of time.
|
||||
The clearest example would be Heapster.
|
||||
|
||||
In the future, we might want to store the metrics differently:
|
||||
|
||||
* via heapster - Since heapster keeps data for a period of time, we could
|
||||
redirect requests to the API server to heapster instead of using etcd. This
|
||||
would also allow serving metrics other than the latest ones.
|
||||
|
||||
An edge case that this proposal doesn't take into account is kubelets being
|
||||
restarted. If any of them are, with a simple implementation they would lose
|
||||
historical data and thus take hours to gather enough information to provide
|
||||
relevant metrics again. We might want to use persistent storage directly or in
|
||||
the future to improve that situation.
|
||||
|
||||
More information on kubelet checkpoints can be read on
|
||||
[#489](https://issues.k8s.io/489).
|
||||
|
||||
## Data structure
|
||||
|
||||
{% highlight go %}
|
||||
{% raw %}
|
||||
type DerivedPodMetrics struct {
|
||||
TypeMeta
|
||||
ObjectMeta // should have pod name
|
||||
// the key is the container name
|
||||
Containers []struct {
|
||||
ContainerReference *Container
|
||||
Metrics MetricsWindows
|
||||
}
|
||||
}
|
||||
|
||||
type DerivedNodeMetrics struct {
|
||||
TypeMeta
|
||||
ObjectMeta // should have node name
|
||||
NodeMetrics MetricsWindows
|
||||
SystemContainers []struct {
|
||||
ContainerReference *Container
|
||||
Metrics MetricsWindows
|
||||
}
|
||||
}
|
||||
|
||||
// Last overlapping 10s, 1m, 1h and 1d as a start
|
||||
// Updated every 10s, so the 10s window is sequential and the rest are
|
||||
// rolling.
|
||||
type MetricsWindows map[time.Duration]DerivedMetrics
|
||||
|
||||
type DerivedMetrics struct {
|
||||
// End time of all the time windows in Metrics
|
||||
EndTime unversioned.Time `json:"endtime"`
|
||||
|
||||
Mean ResourceUsage `json:"mean"`
|
||||
Max ResourceUsage `json:"max"`
|
||||
NinetyFive ResourceUsage `json:"95th"`
|
||||
}
|
||||
|
||||
type ResourceUsage map[resource.Type]resource.Quantity
|
||||
{% endraw %}
|
||||
{% endhighlight %}
|
||||
|
||||
|
||||
|
||||
|
||||
<!-- BEGIN MUNGE: IS_VERSIONED -->
|
||||
<!-- TAG IS_VERSIONED -->
|
||||
<!-- END MUNGE: IS_VERSIONED -->
|
||||
|
||||
|
||||
<!-- BEGIN MUNGE: GENERATED_ANALYTICS -->
|
||||
[]()
|
||||
<!-- END MUNGE: GENERATED_ANALYTICS -->
|
||||
|
|
@ -0,0 +1,257 @@
|
|||
---
|
||||
layout: docwithnav
|
||||
title: "Deployment"
|
||||
---
|
||||
<!-- BEGIN MUNGE: UNVERSIONED_WARNING -->
|
||||
|
||||
|
||||
<!-- END MUNGE: UNVERSIONED_WARNING -->
|
||||
|
||||
# Deployment
|
||||
|
||||
## Abstract
|
||||
|
||||
A proposal for implementing a new resource - Deployment - which will enable
|
||||
declarative config updates for Pods and ReplicationControllers.
|
||||
|
||||
Users will be able to create a Deployment, which will spin up
|
||||
a ReplicationController to bring up the desired pods.
|
||||
Users can also target the Deployment at existing ReplicationControllers, in
|
||||
which case the new RC will replace the existing ones. The exact mechanics of
|
||||
replacement depends on the DeploymentStrategy chosen by the user.
|
||||
DeploymentStrategies are explained in detail in a later section.
|
||||
|
||||
## Implementation
|
||||
|
||||
### API Object
|
||||
|
||||
The `Deployment` API object will have the following structure:
|
||||
|
||||
{% highlight go %}
|
||||
{% raw %}
|
||||
type Deployment struct {
|
||||
TypeMeta
|
||||
ObjectMeta
|
||||
|
||||
// Specification of the desired behavior of the Deployment.
|
||||
Spec DeploymentSpec
|
||||
|
||||
// Most recently observed status of the Deployment.
|
||||
Status DeploymentStatus
|
||||
}
|
||||
|
||||
type DeploymentSpec struct {
|
||||
// Number of desired pods. This is a pointer to distinguish between explicit
|
||||
// zero and not specified. Defaults to 1.
|
||||
Replicas *int
|
||||
|
||||
// Label selector for pods. Existing ReplicationControllers whose pods are
|
||||
// selected by this will be scaled down. New ReplicationControllers will be
|
||||
// created with this selector, with a unique label as defined by UniqueLabelKey.
|
||||
// If Selector is empty, it is defaulted to the labels present on the Pod template.
|
||||
Selector map[string]string
|
||||
|
||||
// Describes the pods that will be created.
|
||||
Template *PodTemplateSpec
|
||||
|
||||
// The deployment strategy to use to replace existing pods with new ones.
|
||||
Strategy DeploymentStrategy
|
||||
|
||||
// Key of the selector that is added to existing RCs (and label key that is
|
||||
// added to its pods) to prevent the existing RCs to select new pods (and old
|
||||
// pods being selected by new RC).
|
||||
// Users can set this to an empty string to indicate that the system should
|
||||
// not add any selector and label. If unspecified, system uses
|
||||
// "deployment.kubernetes.io/podTemplateHash".
|
||||
// Value of this key is hash of DeploymentSpec.PodTemplateSpec.
|
||||
UniqueLabelKey *string
|
||||
}
|
||||
|
||||
type DeploymentStrategy struct {
|
||||
// Type of deployment. Can be "Recreate" or "RollingUpdate".
|
||||
Type DeploymentStrategyType
|
||||
|
||||
// TODO: Update this to follow our convention for oneOf, whatever we decide it
|
||||
// to be.
|
||||
// Rolling update config params. Present only if DeploymentStrategyType =
|
||||
// RollingUpdate.
|
||||
RollingUpdate *RollingUpdateDeploymentStrategy
|
||||
}
|
||||
|
||||
type DeploymentStrategyType string
|
||||
|
||||
const (
|
||||
// Kill all existing pods before creating new ones.
|
||||
RecreateDeploymentStrategyType DeploymentStrategyType = "Recreate"
|
||||
|
||||
// Replace the old RCs by new one using rolling update i.e gradually scale down the old RCs and scale up the new one.
|
||||
RollingUpdateDeploymentStrategyType DeploymentStrategyType = "RollingUpdate"
|
||||
)
|
||||
|
||||
// Spec to control the desired behavior of rolling update.
|
||||
type RollingUpdateDeploymentStrategy struct {
|
||||
// The maximum number of pods that can be unavailable during the update.
|
||||
// Value can be an absolute number (ex: 5) or a percentage of total pods at the start of update (ex: 10%).
|
||||
// Absolute number is calculated from percentage by rounding up.
|
||||
// This can not be 0 if MaxSurge is 0.
|
||||
// By default, a fixed value of 1 is used.
|
||||
// Example: when this is set to 30%, the old RC can be scaled down by 30%
|
||||
// immediately when the rolling update starts. Once new pods are ready, old RC
|
||||
// can be scaled down further, followed by scaling up the new RC, ensuring
|
||||
// that at least 70% of original number of pods are available at all times
|
||||
// during the update.
|
||||
MaxUnavailable IntOrString
|
||||
|
||||
// The maximum number of pods that can be scheduled above the original number of
|
||||
// pods.
|
||||
// Value can be an absolute number (ex: 5) or a percentage of total pods at
|
||||
// the start of the update (ex: 10%). This can not be 0 if MaxUnavailable is 0.
|
||||
// Absolute number is calculated from percentage by rounding up.
|
||||
// By default, a value of 1 is used.
|
||||
// Example: when this is set to 30%, the new RC can be scaled up by 30%
|
||||
// immediately when the rolling update starts. Once old pods have been killed,
|
||||
// new RC can be scaled up further, ensuring that total number of pods running
|
||||
// at any time during the update is atmost 130% of original pods.
|
||||
MaxSurge IntOrString
|
||||
|
||||
// Minimum number of seconds for which a newly created pod should be ready
|
||||
// without any of its container crashing, for it to be considered available.
|
||||
// Defaults to 0 (pod will be considered available as soon as it is ready)
|
||||
MinReadySeconds int
|
||||
}
|
||||
|
||||
type DeploymentStatus struct {
|
||||
// Total number of ready pods targeted by this deployment (this
|
||||
// includes both the old and new pods).
|
||||
Replicas int
|
||||
|
||||
// Total number of new ready pods with the desired template spec.
|
||||
UpdatedReplicas int
|
||||
}
|
||||
|
||||
{% endraw %}
|
||||
{% endhighlight %}
|
||||
|
||||
### Controller
|
||||
|
||||
#### Deployment Controller
|
||||
|
||||
The DeploymentController will make Deployments happen.
|
||||
It will watch Deployment objects in etcd.
|
||||
For each pending deployment, it will:
|
||||
|
||||
1. Find all RCs whose label selector is a superset of DeploymentSpec.Selector.
|
||||
- For now, we will do this in the client - list all RCs and then filter the
|
||||
ones we want. Eventually, we want to expose this in the API.
|
||||
2. The new RC can have the same selector as the old RC and hence we add a unique
|
||||
selector to all these RCs (and the corresponding label to their pods) to ensure
|
||||
that they do not select the newly created pods (or old pods get selected by
|
||||
new RC).
|
||||
- The label key will be "deployment.kubernetes.io/podTemplateHash".
|
||||
- The label value will be hash of the podTemplateSpec for that RC without
|
||||
this label. This value will be unique for all RCs, since PodTemplateSpec should be unique.
|
||||
- If the RCs and pods dont already have this label and selector:
|
||||
- We will first add this to RC.PodTemplateSpec.Metadata.Labels for all RCs to
|
||||
ensure that all new pods that they create will have this label.
|
||||
- Then we will add this label to their existing pods and then add this as a selector
|
||||
to that RC.
|
||||
3. Find if there exists an RC for which value of "deployment.kubernetes.io/podTemplateHash" label
|
||||
is same as hash of DeploymentSpec.PodTemplateSpec. If it exists already, then
|
||||
this is the RC that will be ramped up. If there is no such RC, then we create
|
||||
a new one using DeploymentSpec and then add a "deployment.kubernetes.io/podTemplateHash" label
|
||||
to it. RCSpec.replicas = 0 for a newly created RC.
|
||||
4. Scale up the new RC and scale down the olds ones as per the DeploymentStrategy.
|
||||
- Raise an event if we detect an error, like new pods failing to come up.
|
||||
5. Go back to step 1 unless the new RC has been ramped up to desired replicas
|
||||
and the old RCs have been ramped down to 0.
|
||||
6. Cleanup.
|
||||
|
||||
DeploymentController is stateless so that it can recover incase it crashes during a deployment.
|
||||
|
||||
### MinReadySeconds
|
||||
|
||||
We will implement MinReadySeconds using the Ready condition in Pod. We will add
|
||||
a LastTransitionTime to PodCondition and update kubelet to set Ready to false,
|
||||
each time any container crashes. Kubelet will set Ready condition back to true once
|
||||
all containers are ready. For containers without a readiness probe, we will
|
||||
assume that they are ready as soon as they are up.
|
||||
https://github.com/kubernetes/kubernetes/issues/11234 tracks updating kubelet
|
||||
and https://github.com/kubernetes/kubernetes/issues/12615 tracks adding
|
||||
LastTransitionTime to PodCondition.
|
||||
|
||||
## Changing Deployment mid-way
|
||||
|
||||
### Updating
|
||||
|
||||
Users can update an ongoing deployment before it is completed.
|
||||
In this case, the existing deployment will be stalled and the new one will
|
||||
begin.
|
||||
For ex: consider the following case:
|
||||
- User creates a deployment to rolling-update 10 pods with image:v1 to
|
||||
pods with image:v2.
|
||||
- User then updates this deployment to create pods with image:v3,
|
||||
when the image:v2 RC had been ramped up to 5 pods and the image:v1 RC
|
||||
had been ramped down to 5 pods.
|
||||
- When Deployment Controller observes the new deployment, it will create
|
||||
a new RC for creating pods with image:v3. It will then start ramping up this
|
||||
new RC to 10 pods and will ramp down both the existing RCs to 0.
|
||||
|
||||
### Deleting
|
||||
|
||||
Users can pause/cancel a deployment by deleting it before it is completed.
|
||||
Recreating the same deployment will resume it.
|
||||
For ex: consider the following case:
|
||||
- User creates a deployment to rolling-update 10 pods with image:v1 to
|
||||
pods with image:v2.
|
||||
- User then deletes this deployment while the old and new RCs are at 5 replicas each.
|
||||
User will end up with 2 RCs with 5 replicas each.
|
||||
User can then create the same deployment again in which case, DeploymentController will
|
||||
notice that the second RC exists already which it can ramp up while ramping down
|
||||
the first one.
|
||||
|
||||
### Rollback
|
||||
|
||||
We want to allow the user to rollback a deployment. To rollback a
|
||||
completed (or ongoing) deployment, user can create (or update) a deployment with
|
||||
DeploymentSpec.PodTemplateSpec = oldRC.PodTemplateSpec.
|
||||
|
||||
## Deployment Strategies
|
||||
|
||||
DeploymentStrategy specifies how the new RC should replace existing RCs.
|
||||
To begin with, we will support 2 types of deployment:
|
||||
* Recreate: We kill all existing RCs and then bring up the new one. This results
|
||||
in quick deployment but there is a downtime when old pods are down but
|
||||
the new ones have not come up yet.
|
||||
* Rolling update: We gradually scale down old RCs while scaling up the new one.
|
||||
This results in a slower deployment, but there is no downtime. At all times
|
||||
during the deployment, there are a few pods available (old or new). The number
|
||||
of available pods and when is a pod considered "available" can be configured
|
||||
using RollingUpdateDeploymentStrategy.
|
||||
|
||||
In future, we want to support more deployment types.
|
||||
|
||||
## Future
|
||||
|
||||
Apart from the above, we want to add support for the following:
|
||||
* Running the deployment process in a pod: In future, we can run the deployment process in a pod. Then users can define their own custom deployments and we can run it using the image name.
|
||||
* More DeploymentStrategyTypes: https://github.com/openshift/origin/blob/master/examples/deployment/README.md#deployment-types lists most commonly used ones.
|
||||
* Triggers: Deployment will have a trigger field to identify what triggered the deployment. Options are: Manual/UserTriggered, Autoscaler, NewImage.
|
||||
* Automatic rollback on error: We want to support automatic rollback on error or timeout.
|
||||
|
||||
## References
|
||||
|
||||
- https://github.com/kubernetes/kubernetes/issues/1743 has most of the
|
||||
discussion that resulted in this proposal.
|
||||
|
||||
|
||||
|
||||
|
||||
<!-- BEGIN MUNGE: IS_VERSIONED -->
|
||||
<!-- TAG IS_VERSIONED -->
|
||||
<!-- END MUNGE: IS_VERSIONED -->
|
||||
|
||||
|
||||
<!-- BEGIN MUNGE: GENERATED_ANALYTICS -->
|
||||
[]()
|
||||
<!-- END MUNGE: GENERATED_ANALYTICS -->
|
||||
|
Binary file not shown.
After Width: | Height: | Size: 31 KiB |
|
@ -0,0 +1,663 @@
|
|||
---
|
||||
layout: docwithnav
|
||||
title: "Kubernetes Cluster Federation"
|
||||
---
|
||||
<!-- BEGIN MUNGE: UNVERSIONED_WARNING -->
|
||||
|
||||
|
||||
<!-- END MUNGE: UNVERSIONED_WARNING -->
|
||||
|
||||
# Kubernetes Cluster Federation
|
||||
|
||||
## (a.k.a. "Ubernetes")
|
||||
|
||||
## Requirements Analysis and Product Proposal
|
||||
|
||||
## _by Quinton Hoole ([quinton@google.com](mailto:quinton@google.com))_
|
||||
|
||||
_Initial revision: 2015-03-05_
|
||||
_Last updated: 2015-08-20_
|
||||
This doc: [tinyurl.com/ubernetesv2](http://tinyurl.com/ubernetesv2)
|
||||
Original slides: [tinyurl.com/ubernetes-slides](http://tinyurl.com/ubernetes-slides)
|
||||
Updated slides: [tinyurl.com/ubernetes-whereto](http://tinyurl.com/ubernetes-whereto)
|
||||
|
||||
## Introduction
|
||||
|
||||
Today, each Kubernetes cluster is a relatively self-contained unit,
|
||||
which typically runs in a single "on-premise" data centre or single
|
||||
availability zone of a cloud provider (Google's GCE, Amazon's AWS,
|
||||
etc).
|
||||
|
||||
Several current and potential Kubernetes users and customers have
|
||||
expressed a keen interest in tying together ("federating") multiple
|
||||
clusters in some sensible way in order to enable the following kinds
|
||||
of use cases (intentionally vague):
|
||||
|
||||
1. _"Preferentially run my workloads in my on-premise cluster(s), but
|
||||
automatically overflow to my cloud-hosted cluster(s) if I run out
|
||||
of on-premise capacity"_.
|
||||
1. _"Most of my workloads should run in my preferred cloud-hosted
|
||||
cluster(s), but some are privacy-sensitive, and should be
|
||||
automatically diverted to run in my secure, on-premise
|
||||
cluster(s)"_.
|
||||
1. _"I want to avoid vendor lock-in, so I want my workloads to run
|
||||
across multiple cloud providers all the time. I change my set of
|
||||
such cloud providers, and my pricing contracts with them,
|
||||
periodically"_.
|
||||
1. _"I want to be immune to any single data centre or cloud
|
||||
availability zone outage, so I want to spread my service across
|
||||
multiple such zones (and ideally even across multiple cloud
|
||||
providers)."_
|
||||
|
||||
The above use cases are by necessity left imprecisely defined. The
|
||||
rest of this document explores these use cases and their implications
|
||||
in further detail, and compares a few alternative high level
|
||||
approaches to addressing them. The idea of cluster federation has
|
||||
informally become known as_ "Ubernetes"_.
|
||||
|
||||
## Summary/TL;DR
|
||||
|
||||
Four primary customer-driven use cases are explored in more detail.
|
||||
The two highest priority ones relate to High Availability and
|
||||
Application Portability (between cloud providers, and between
|
||||
on-premise and cloud providers).
|
||||
|
||||
Four primary federation primitives are identified (location affinity,
|
||||
cross-cluster scheduling, service discovery and application
|
||||
migration). Fortunately not all four of these primitives are required
|
||||
for each primary use case, so incremental development is feasible.
|
||||
|
||||
## What exactly is a Kubernetes Cluster?
|
||||
|
||||
A central design concept in Kubernetes is that of a _cluster_. While
|
||||
loosely speaking, a cluster can be thought of as running in a single
|
||||
data center, or cloud provider availability zone, a more precise
|
||||
definition is that each cluster provides:
|
||||
|
||||
1. a single Kubernetes API entry point,
|
||||
1. a consistent, cluster-wide resource naming scheme
|
||||
1. a scheduling/container placement domain
|
||||
1. a service network routing domain
|
||||
1. an authentication and authorization model.
|
||||
|
||||
The above in turn imply the need for a relatively performant, reliable
|
||||
and cheap network within each cluster.
|
||||
|
||||
There is also assumed to be some degree of failure correlation across
|
||||
a cluster, i.e. whole clusters are expected to fail, at least
|
||||
occasionally (due to cluster-wide power and network failures, natural
|
||||
disasters etc). Clusters are often relatively homogenous in that all
|
||||
compute nodes are typically provided by a single cloud provider or
|
||||
hardware vendor, and connected by a common, unified network fabric.
|
||||
But these are not hard requirements of Kubernetes.
|
||||
|
||||
Other classes of Kubernetes deployments than the one sketched above
|
||||
are technically feasible, but come with some challenges of their own,
|
||||
and are not yet common or explicitly supported.
|
||||
|
||||
More specifically, having a Kubernetes cluster span multiple
|
||||
well-connected availability zones within a single geographical region
|
||||
(e.g. US North East, UK, Japan etc) is worthy of further
|
||||
consideration, in particular because it potentially addresses
|
||||
some of these requirements.
|
||||
|
||||
## What use cases require Cluster Federation?
|
||||
|
||||
Let's name a few concrete use cases to aid the discussion:
|
||||
|
||||
## 1.Capacity Overflow
|
||||
|
||||
_"I want to preferentially run my workloads in my on-premise cluster(s), but automatically "overflow" to my cloud-hosted cluster(s) when I run out of on-premise capacity."_
|
||||
|
||||
This idea is known in some circles as "[cloudbursting](http://searchcloudcomputing.techtarget.com/definition/cloud-bursting)".
|
||||
|
||||
**Clarifying questions:** What is the unit of overflow? Individual
|
||||
pods? Probably not always. Replication controllers and their
|
||||
associated sets of pods? Groups of replication controllers
|
||||
(a.k.a. distributed applications)? How are persistent disks
|
||||
overflowed? Can the "overflowed" pods communicate with their
|
||||
brethren and sistren pods and services in the other cluster(s)?
|
||||
Presumably yes, at higher cost and latency, provided that they use
|
||||
external service discovery. Is "overflow" enabled only when creating
|
||||
new workloads/replication controllers, or are existing workloads
|
||||
dynamically migrated between clusters based on fluctuating available
|
||||
capacity? If so, what is the desired behaviour, and how is it
|
||||
achieved? How, if at all, does this relate to quota enforcement
|
||||
(e.g. if we run out of on-premise capacity, can all or only some
|
||||
quotas transfer to other, potentially more expensive off-premise
|
||||
capacity?)
|
||||
|
||||
It seems that most of this boils down to:
|
||||
|
||||
1. **location affinity** (pods relative to each other, and to other
|
||||
stateful services like persistent storage - how is this expressed
|
||||
and enforced?)
|
||||
1. **cross-cluster scheduling** (given location affinity constraints
|
||||
and other scheduling policy, which resources are assigned to which
|
||||
clusters, and by what?)
|
||||
1. **cross-cluster service discovery** (how do pods in one cluster
|
||||
discover and communicate with pods in another cluster?)
|
||||
1. **cross-cluster migration** (how do compute and storage resources,
|
||||
and the distributed applications to which they belong, move from
|
||||
one cluster to another)
|
||||
1. **cross-cluster load-balancing** (how does is user traffic directed
|
||||
to an appropriate cluster?)
|
||||
1. **cross-cluster monitoring and auditing** (a.k.a. Unified Visibility)
|
||||
|
||||
## 2. Sensitive Workloads
|
||||
|
||||
_"I want most of my workloads to run in my preferred cloud-hosted
|
||||
cluster(s), but some are privacy-sensitive, and should be
|
||||
automatically diverted to run in my secure, on-premise cluster(s). The
|
||||
list of privacy-sensitive workloads changes over time, and they're
|
||||
subject to external auditing."_
|
||||
|
||||
**Clarifying questions:**
|
||||
1. What kinds of rules determine which
|
||||
workloads go where?
|
||||
1. Is there in fact a requirement to have these rules be
|
||||
declaratively expressed and automatically enforced, or is it
|
||||
acceptable/better to have users manually select where to run
|
||||
their workloads when starting them?
|
||||
1. Is a static mapping from container (or more typically,
|
||||
replication controller) to cluster maintained and enforced?
|
||||
1. If so, is it only enforced on startup, or are things migrated
|
||||
between clusters when the mappings change?
|
||||
|
||||
This starts to look quite similar to "1. Capacity Overflow", and again
|
||||
seems to boil down to:
|
||||
|
||||
1. location affinity
|
||||
1. cross-cluster scheduling
|
||||
1. cross-cluster service discovery
|
||||
1. cross-cluster migration
|
||||
1. cross-cluster monitoring and auditing
|
||||
1. cross-cluster load balancing
|
||||
|
||||
## 3. Vendor lock-in avoidance
|
||||
|
||||
_"My CTO wants us to avoid vendor lock-in, so she wants our workloads
|
||||
to run across multiple cloud providers at all times. She changes our
|
||||
set of preferred cloud providers and pricing contracts with them
|
||||
periodically, and doesn't want to have to communicate and manually
|
||||
enforce these policy changes across the organization every time this
|
||||
happens. She wants it centrally and automatically enforced, monitored
|
||||
and audited."_
|
||||
|
||||
**Clarifying questions:**
|
||||
|
||||
1. How does this relate to other use cases (high availability,
|
||||
capacity overflow etc), as they may all be across multiple vendors.
|
||||
It's probably not strictly speaking a separate
|
||||
use case, but it's brought up so often as a requirement, that it's
|
||||
worth calling out explicitly.
|
||||
1. Is a useful intermediate step to make it as simple as possible to
|
||||
migrate an application from one vendor to another in a one-off fashion?
|
||||
|
||||
Again, I think that this can probably be
|
||||
reformulated as a Capacity Overflow problem - the fundamental
|
||||
principles seem to be the same or substantially similar to those
|
||||
above.
|
||||
|
||||
## 4. "High Availability"
|
||||
|
||||
_"I want to be immune to any single data centre or cloud availability
|
||||
zone outage, so I want to spread my service across multiple such zones
|
||||
(and ideally even across multiple cloud providers), and have my
|
||||
service remain available even if one of the availability zones or
|
||||
cloud providers "goes down"_.
|
||||
|
||||
It seems useful to split this into multiple sets of sub use cases:
|
||||
|
||||
1. Multiple availability zones within a single cloud provider (across
|
||||
which feature sets like private networks, load balancing,
|
||||
persistent disks, data snapshots etc are typically consistent and
|
||||
explicitly designed to inter-operate).
|
||||
1. within the same geographical region (e.g. metro) within which network
|
||||
is fast and cheap enough to be almost analogous to a single data
|
||||
center.
|
||||
1. across multiple geographical regions, where high network cost and
|
||||
poor network performance may be prohibitive.
|
||||
1. Multiple cloud providers (typically with inconsistent feature sets,
|
||||
more limited interoperability, and typically no cheap inter-cluster
|
||||
networking described above).
|
||||
|
||||
The single cloud provider case might be easier to implement (although
|
||||
the multi-cloud provider implementation should just work for a single
|
||||
cloud provider). Propose high-level design catering for both, with
|
||||
initial implementation targeting single cloud provider only.
|
||||
|
||||
**Clarifying questions:**
|
||||
**How does global external service discovery work?** In the steady
|
||||
state, which external clients connect to which clusters? GeoDNS or
|
||||
similar? What is the tolerable failover latency if a cluster goes
|
||||
down? Maybe something like (make up some numbers, notwithstanding
|
||||
some buggy DNS resolvers, TTL's, caches etc) ~3 minutes for ~90% of
|
||||
clients to re-issue DNS lookups and reconnect to a new cluster when
|
||||
their home cluster fails is good enough for most Kubernetes users
|
||||
(or at least way better than the status quo), given that these sorts
|
||||
of failure only happen a small number of times a year?
|
||||
|
||||
**How does dynamic load balancing across clusters work, if at all?**
|
||||
One simple starting point might be "it doesn't". i.e. if a service
|
||||
in a cluster is deemed to be "up", it receives as much traffic as is
|
||||
generated "nearby" (even if it overloads). If the service is deemed
|
||||
to "be down" in a given cluster, "all" nearby traffic is redirected
|
||||
to some other cluster within some number of seconds (failover could
|
||||
be automatic or manual). Failover is essentially binary. An
|
||||
improvement would be to detect when a service in a cluster reaches
|
||||
maximum serving capacity, and dynamically divert additional traffic
|
||||
to other clusters. But how exactly does all of this work, and how
|
||||
much of it is provided by Kubernetes, as opposed to something else
|
||||
bolted on top (e.g. external monitoring and manipulation of GeoDNS)?
|
||||
|
||||
**How does this tie in with auto-scaling of services?** More
|
||||
specifically, if I run my service across _n_ clusters globally, and
|
||||
one (or more) of them fail, how do I ensure that the remaining _n-1_
|
||||
clusters have enough capacity to serve the additional, failed-over
|
||||
traffic? Either:
|
||||
|
||||
1. I constantly over-provision all clusters by 1/n (potentially expensive), or
|
||||
1. I "manually" (or automatically) update my replica count configurations in the
|
||||
remaining clusters by 1/n when the failure occurs, and Kubernetes
|
||||
takes care of the rest for me, or
|
||||
1. Auto-scaling in the remaining clusters takes
|
||||
care of it for me automagically as the additional failed-over
|
||||
traffic arrives (with some latency). Note that this implies that
|
||||
the cloud provider keeps the necessary resources on hand to
|
||||
accommodate such auto-scaling (e.g. via something similar to AWS reserved
|
||||
and spot instances)
|
||||
|
||||
Up to this point, this use case ("Unavailability Zones") seems materially different from all the others above. It does not require dynamic cross-cluster service migration (we assume that the service is already running in more than one cluster when the failure occurs). Nor does it necessarily involve cross-cluster service discovery or location affinity. As a result, I propose that we address this use case somewhat independently of the others (although I strongly suspect that it will become substantially easier once we've solved the others).
|
||||
|
||||
All of the above (regarding "Unavailibility Zones") refers primarily
|
||||
to already-running user-facing services, and minimizing the impact on
|
||||
end users of those services becoming unavailable in a given cluster.
|
||||
What about the people and systems that deploy Kubernetes services
|
||||
(devops etc)? Should they be automatically shielded from the impact
|
||||
of the cluster outage? i.e. have their new resource creation requests
|
||||
automatically diverted to another cluster during the outage? While
|
||||
this specific requirement seems non-critical (manual fail-over seems
|
||||
relatively non-arduous, ignoring the user-facing issues above), it
|
||||
smells a lot like the first three use cases listed above ("Capacity
|
||||
Overflow, Sensitive Services, Vendor lock-in..."), so if we address
|
||||
those, we probably get this one free of charge.
|
||||
|
||||
## Core Challenges of Cluster Federation
|
||||
|
||||
As we saw above, a few common challenges fall out of most of the use
|
||||
cases considered above, namely:
|
||||
|
||||
## Location Affinity
|
||||
|
||||
Can the pods comprising a single distributed application be
|
||||
partitioned across more than one cluster? More generally, how far
|
||||
apart, in network terms, can a given client and server within a
|
||||
distributed application reasonably be? A server need not necessarily
|
||||
be a pod, but could instead be a persistent disk housing data, or some
|
||||
other stateful network service. What is tolerable is typically
|
||||
application-dependent, primarily influenced by network bandwidth
|
||||
consumption, latency requirements and cost sensitivity.
|
||||
|
||||
For simplicity, lets assume that all Kubernetes distributed
|
||||
applications fall into one of three categories with respect to relative
|
||||
location affinity:
|
||||
|
||||
1. **"Strictly Coupled"**: Those applications that strictly cannot be
|
||||
partitioned between clusters. They simply fail if they are
|
||||
partitioned. When scheduled, all pods _must_ be scheduled to the
|
||||
same cluster. To move them, we need to shut the whole distributed
|
||||
application down (all pods) in one cluster, possibly move some
|
||||
data, and then bring the up all of the pods in another cluster. To
|
||||
avoid downtime, we might bring up the replacement cluster and
|
||||
divert traffic there before turning down the original, but the
|
||||
principle is much the same. In some cases moving the data might be
|
||||
prohibitively expensive or time-consuming, in which case these
|
||||
applications may be effectively _immovable_.
|
||||
1. **"Strictly Decoupled"**: Those applications that can be
|
||||
indefinitely partitioned across more than one cluster, to no
|
||||
disadvantage. An embarrassingly parallel YouTube porn detector,
|
||||
where each pod repeatedly dequeues a video URL from a remote work
|
||||
queue, downloads and chews on the video for a few hours, and
|
||||
arrives at a binary verdict, might be one such example. The pods
|
||||
derive no benefit from being close to each other, or anything else
|
||||
(other than the source of YouTube videos, which is assumed to be
|
||||
equally remote from all clusters in this example). Each pod can be
|
||||
scheduled independently, in any cluster, and moved at any time.
|
||||
1. **"Preferentially Coupled"**: Somewhere between Coupled and
|
||||
Decoupled. These applications prefer to have all of their pods
|
||||
located in the same cluster (e.g. for failure correlation, network
|
||||
latency or bandwidth cost reasons), but can tolerate being
|
||||
partitioned for "short" periods of time (for example while
|
||||
migrating the application from one cluster to another). Most small
|
||||
to medium sized LAMP stacks with not-very-strict latency goals
|
||||
probably fall into this category (provided that they use sane
|
||||
service discovery and reconnect-on-fail, which they need to do
|
||||
anyway to run effectively, even in a single Kubernetes cluster).
|
||||
|
||||
From a fault isolation point of view, there are also opposites of the
|
||||
above. For example a master database and it's slave replica might
|
||||
need to be in different availability zones. We'll refer to this a
|
||||
anti-affinity, although it is largely outside the scope of this
|
||||
document.
|
||||
|
||||
Note that there is somewhat of a continuum with respect to network
|
||||
cost and quality between any two nodes, ranging from two nodes on the
|
||||
same L2 network segment (lowest latency and cost, highest bandwidth)
|
||||
to two nodes on different continents (highest latency and cost, lowest
|
||||
bandwidth). One interesting point on that continuum relates to
|
||||
multiple availability zones within a well-connected metro or region
|
||||
and single cloud provider. Despite being in different data centers,
|
||||
or areas within a mega data center, network in this case is often very fast
|
||||
and effectively free or very cheap. For the purposes of this network location
|
||||
affinity discussion, this case is considered analogous to a single
|
||||
availability zone. Furthermore, if a given application doesn't fit
|
||||
cleanly into one of the above, shoe-horn it into the best fit,
|
||||
defaulting to the "Strictly Coupled and Immovable" bucket if you're
|
||||
not sure.
|
||||
|
||||
And then there's what I'll call _absolute_ location affinity. Some
|
||||
applications are required to run in bounded geographical or network
|
||||
topology locations. The reasons for this are typically
|
||||
political/legislative (data privacy laws etc), or driven by network
|
||||
proximity to consumers (or data providers) of the application ("most
|
||||
of our users are in Western Europe, U.S. West Coast" etc).
|
||||
|
||||
**Proposal:** First tackle Strictly Decoupled applications (which can
|
||||
be trivially scheduled, partitioned or moved, one pod at a time).
|
||||
Then tackle Preferentially Coupled applications (which must be
|
||||
scheduled in totality in a single cluster, and can be moved, but
|
||||
ultimately in total, and necessarily within some bounded time).
|
||||
Leave strictly coupled applications to be manually moved between
|
||||
clusters as required for the foreseeable future.
|
||||
|
||||
## Cross-cluster service discovery
|
||||
|
||||
I propose having pods use standard discovery methods used by external
|
||||
clients of Kubernetes applications (i.e. DNS). DNS might resolve to a
|
||||
public endpoint in the local or a remote cluster. Other than Strictly
|
||||
Coupled applications, software should be largely oblivious of which of
|
||||
the two occurs.
|
||||
|
||||
_Aside:_ How do we avoid "tromboning" through an external VIP when DNS
|
||||
resolves to a public IP on the local cluster? Strictly speaking this
|
||||
would be an optimization for some cases, and probably only matters to
|
||||
high-bandwidth, low-latency communications. We could potentially
|
||||
eliminate the trombone with some kube-proxy magic if necessary. More
|
||||
detail to be added here, but feel free to shoot down the basic DNS
|
||||
idea in the mean time. In addition, some applications rely on private
|
||||
networking between clusters for security (e.g. AWS VPC or more
|
||||
generally VPN). It should not be necessary to forsake this in
|
||||
order to use Ubernetes, for example by being forced to use public
|
||||
connectivity between clusters.
|
||||
|
||||
## Cross-cluster Scheduling
|
||||
|
||||
This is closely related to location affinity above, and also discussed
|
||||
there. The basic idea is that some controller, logically outside of
|
||||
the basic Kubernetes control plane of the clusters in question, needs
|
||||
to be able to:
|
||||
|
||||
1. Receive "global" resource creation requests.
|
||||
1. Make policy-based decisions as to which cluster(s) should be used
|
||||
to fulfill each given resource request. In a simple case, the
|
||||
request is just redirected to one cluster. In a more complex case,
|
||||
the request is "demultiplexed" into multiple sub-requests, each to
|
||||
a different cluster. Knowledge of the (albeit approximate)
|
||||
available capacity in each cluster will be required by the
|
||||
controller to sanely split the request. Similarly, knowledge of
|
||||
the properties of the application (Location Affinity class --
|
||||
Strictly Coupled, Strictly Decoupled etc, privacy class etc) will
|
||||
be required. It is also conceivable that knowledge of service
|
||||
SLAs and monitoring thereof might provide an input into
|
||||
scheduling/placement algorithms.
|
||||
1. Multiplex the responses from the individual clusters into an
|
||||
aggregate response.
|
||||
|
||||
There is of course a lot of detail still missing from this section,
|
||||
including discussion of:
|
||||
|
||||
1. admission control
|
||||
1. initial placement of instances of a new
|
||||
service vs scheduling new instances of an existing service in response
|
||||
to auto-scaling
|
||||
1. rescheduling pods due to failure (response might be
|
||||
different depending on if it's failure of a node, rack, or whole AZ)
|
||||
1. data placement relative to compute capacity,
|
||||
etc.
|
||||
|
||||
## Cross-cluster Migration
|
||||
|
||||
Again this is closely related to location affinity discussed above,
|
||||
and is in some sense an extension of Cross-cluster Scheduling. When
|
||||
certain events occur, it becomes necessary or desirable for the
|
||||
cluster federation system to proactively move distributed applications
|
||||
(either in part or in whole) from one cluster to another. Examples of
|
||||
such events include:
|
||||
|
||||
1. A low capacity event in a cluster (or a cluster failure).
|
||||
1. A change of scheduling policy ("we no longer use cloud provider X").
|
||||
1. A change of resource pricing ("cloud provider Y dropped their
|
||||
prices - lets migrate there").
|
||||
|
||||
Strictly Decoupled applications can be trivially moved, in part or in
|
||||
whole, one pod at a time, to one or more clusters (within applicable
|
||||
policy constraints, for example "PrivateCloudOnly").
|
||||
|
||||
For Preferentially Decoupled applications, the federation system must
|
||||
first locate a single cluster with sufficient capacity to accommodate
|
||||
the entire application, then reserve that capacity, and incrementally
|
||||
move the application, one (or more) resources at a time, over to the
|
||||
new cluster, within some bounded time period (and possibly within a
|
||||
predefined "maintenance" window). Strictly Coupled applications (with
|
||||
the exception of those deemed completely immovable) require the
|
||||
federation system to:
|
||||
|
||||
1. start up an entire replica application in the destination cluster
|
||||
1. copy persistent data to the new application instance (possibly
|
||||
before starting pods)
|
||||
1. switch user traffic across
|
||||
1. tear down the original application instance
|
||||
|
||||
It is proposed that support for automated migration of Strictly
|
||||
Coupled applications be deferred to a later date.
|
||||
|
||||
## Other Requirements
|
||||
|
||||
These are often left implicit by customers, but are worth calling out explicitly:
|
||||
|
||||
1. Software failure isolation between Kubernetes clusters should be
|
||||
retained as far as is practically possible. The federation system
|
||||
should not materially increase the failure correlation across
|
||||
clusters. For this reason the federation control plane software
|
||||
should ideally be completely independent of the Kubernetes cluster
|
||||
control software, and look just like any other Kubernetes API
|
||||
client, with no special treatment. If the federation control plane
|
||||
software fails catastrophically, the underlying Kubernetes clusters
|
||||
should remain independently usable.
|
||||
1. Unified monitoring, alerting and auditing across federated Kubernetes clusters.
|
||||
1. Unified authentication, authorization and quota management across
|
||||
clusters (this is in direct conflict with failure isolation above,
|
||||
so there are some tough trade-offs to be made here).
|
||||
|
||||
## Proposed High-Level Architectures
|
||||
|
||||
Two distinct potential architectural approaches have emerged from discussions
|
||||
thus far:
|
||||
|
||||
1. An explicitly decoupled and hierarchical architecture, where the
|
||||
Federation Control Plane sits logically above a set of independent
|
||||
Kubernetes clusters, each of which is (potentially) unaware of the
|
||||
other clusters, and of the Federation Control Plane itself (other
|
||||
than to the extent that it is an API client much like any other).
|
||||
One possible example of this general architecture is illustrated
|
||||
below, and will be referred to as the "Decoupled, Hierarchical"
|
||||
approach.
|
||||
1. A more monolithic architecture, where a single instance of the
|
||||
Kubernetes control plane itself manages a single logical cluster
|
||||
composed of nodes in multiple availability zones and cloud
|
||||
providers.
|
||||
|
||||
A very brief, non-exhaustive list of pro's and con's of the two
|
||||
approaches follows. (In the interest of full disclosure, the author
|
||||
prefers the Decoupled Hierarchical model for the reasons stated below).
|
||||
|
||||
1. **Failure isolation:** The Decoupled Hierarchical approach provides
|
||||
better failure isolation than the Monolithic approach, as each
|
||||
underlying Kubernetes cluster, and the Federation Control Plane,
|
||||
can operate and fail completely independently of each other. In
|
||||
particular, their software and configurations can be updated
|
||||
independently. Such updates are, in our experience, the primary
|
||||
cause of control-plane failures, in general.
|
||||
1. **Failure probability:** The Decoupled Hierarchical model incorporates
|
||||
numerically more independent pieces of software and configuration
|
||||
than the Monolithic one. But the complexity of each of these
|
||||
decoupled pieces is arguably better contained in the Decoupled
|
||||
model (per standard arguments for modular rather than monolithic
|
||||
software design). Which of the two models presents higher
|
||||
aggregate complexity and consequent failure probability remains
|
||||
somewhat of an open question.
|
||||
1. **Scalability:** Conceptually the Decoupled Hierarchical model wins
|
||||
here, as each underlying Kubernetes cluster can be scaled
|
||||
completely independently w.r.t. scheduling, node state management,
|
||||
monitoring, network connectivity etc. It is even potentially
|
||||
feasible to stack "Ubernetes" federated clusters (i.e. create
|
||||
federations of federations) should scalability of the independent
|
||||
Federation Control Plane become an issue (although the author does
|
||||
not envision this being a problem worth solving in the short
|
||||
term).
|
||||
1. **Code complexity:** I think that an argument can be made both ways
|
||||
here. It depends on whether you prefer to weave the logic for
|
||||
handling nodes in multiple availability zones and cloud providers
|
||||
within a single logical cluster into the existing Kubernetes
|
||||
control plane code base (which was explicitly not designed for
|
||||
this), or separate it into a decoupled Federation system (with
|
||||
possible code sharing between the two via shared libraries). The
|
||||
author prefers the latter because it:
|
||||
1. Promotes better code modularity and interface design.
|
||||
1. Allows the code
|
||||
bases of Kubernetes and the Federation system to progress
|
||||
largely independently (different sets of developers, different
|
||||
release schedules etc).
|
||||
1. **Administration complexity:** Again, I think that this could be argued
|
||||
both ways. Superficially it would seem that administration of a
|
||||
single Monolithic multi-zone cluster might be simpler by virtue of
|
||||
being only "one thing to manage", however in practise each of the
|
||||
underlying availability zones (and possibly cloud providers) has
|
||||
it's own capacity, pricing, hardware platforms, and possibly
|
||||
bureaucratic boundaries (e.g. "our EMEA IT department manages those
|
||||
European clusters"). So explicitly allowing for (but not
|
||||
mandating) completely independent administration of each
|
||||
underlying Kubernetes cluster, and the Federation system itself,
|
||||
in the Decoupled Hierarchical model seems to have real practical
|
||||
benefits that outweigh the superficial simplicity of the
|
||||
Monolithic model.
|
||||
1. **Application development and deployment complexity:** It's not clear
|
||||
to me that there is any significant difference between the two
|
||||
models in this regard. Presumably the API exposed by the two
|
||||
different architectures would look very similar, as would the
|
||||
behavior of the deployed applications. It has even been suggested
|
||||
to write the code in such a way that it could be run in either
|
||||
configuration. It's not clear that this makes sense in practise
|
||||
though.
|
||||
1. **Control plane cost overhead:** There is a minimum per-cluster
|
||||
overhead -- two possibly virtual machines, or more for redundant HA
|
||||
deployments. For deployments of very small Kubernetes
|
||||
clusters with the Decoupled Hierarchical approach, this cost can
|
||||
become significant.
|
||||
|
||||
### The Decoupled, Hierarchical Approach - Illustrated
|
||||
|
||||

|
||||
|
||||
## Ubernetes API
|
||||
|
||||
It is proposed that this look a lot like the existing Kubernetes API
|
||||
but be explicitly multi-cluster.
|
||||
|
||||
+ Clusters become first class objects, which can be registered,
|
||||
listed, described, deregistered etc via the API.
|
||||
+ Compute resources can be explicitly requested in specific clusters,
|
||||
or automatically scheduled to the "best" cluster by Ubernetes (by a
|
||||
pluggable Policy Engine).
|
||||
+ There is a federated equivalent of a replication controller type (or
|
||||
perhaps a [deployment](deployment.html)),
|
||||
which is multicluster-aware, and delegates to cluster-specific
|
||||
replication controllers/deployments as required (e.g. a federated RC for n
|
||||
replicas might simply spawn multiple replication controllers in
|
||||
different clusters to do the hard work).
|
||||
|
||||
## Policy Engine and Migration/Replication Controllers
|
||||
|
||||
The Policy Engine decides which parts of each application go into each
|
||||
cluster at any point in time, and stores this desired state in the
|
||||
Desired Federation State store (an etcd or
|
||||
similar). Migration/Replication Controllers reconcile this against the
|
||||
desired states stored in the underlying Kubernetes clusters (by
|
||||
watching both, and creating or updating the underlying Replication
|
||||
Controllers and related Services accordingly).
|
||||
|
||||
## Authentication and Authorization
|
||||
|
||||
This should ideally be delegated to some external auth system, shared
|
||||
by the underlying clusters, to avoid duplication and inconsistency.
|
||||
Either that, or we end up with multilevel auth. Local readonly
|
||||
eventually consistent auth slaves in each cluster and in Ubernetes
|
||||
could potentially cache auth, to mitigate an SPOF auth system.
|
||||
|
||||
## Data consistency, failure and availability characteristics
|
||||
|
||||
The services comprising the Ubernetes Control Plane) have to run
|
||||
somewhere. Several options exist here:
|
||||
* For high availability Ubernetes deployments, these
|
||||
services may run in either:
|
||||
* a dedicated Kubernetes cluster, not co-located in the same
|
||||
availability zone with any of the federated clusters (for fault
|
||||
isolation reasons). If that cluster/availability zone, and hence the Federation
|
||||
system, fails catastrophically, the underlying pods and
|
||||
applications continue to run correctly, albeit temporarily
|
||||
without the Federation system.
|
||||
* across multiple Kubernetes availability zones, probably with
|
||||
some sort of cross-AZ quorum-based store. This provides
|
||||
theoretically higher availability, at the cost of some
|
||||
complexity related to data consistency across multiple
|
||||
availability zones.
|
||||
* For simpler, less highly available deployments, just co-locate the
|
||||
Federation control plane in/on/with one of the underlying
|
||||
Kubernetes clusters. The downside of this approach is that if
|
||||
that specific cluster fails, all automated failover and scaling
|
||||
logic which relies on the federation system will also be
|
||||
unavailable at the same time (i.e. precisely when it is needed).
|
||||
But if one of the other federated clusters fails, everything
|
||||
should work just fine.
|
||||
|
||||
There is some further thinking to be done around the data consistency
|
||||
model upon which the Federation system is based, and it's impact
|
||||
on the detailed semantics, failure and availability
|
||||
characteristics of the system.
|
||||
|
||||
## Proposed Next Steps
|
||||
|
||||
Identify concrete applications of each use case and configure a proof
|
||||
of concept service that exercises the use case. For example, cluster
|
||||
failure tolerance seems popular, so set up an apache frontend with
|
||||
replicas in each of three availability zones with either an Amazon Elastic
|
||||
Load Balancer or Google Cloud Load Balancer pointing at them? What
|
||||
does the zookeeper config look like for N=3 across 3 AZs -- and how
|
||||
does each replica find the other replicas and how do clients find
|
||||
their primary zookeeper replica? And now how do I do a shared, highly
|
||||
available redis database? Use a few common specific use cases like
|
||||
this to flesh out the detailed API and semantics of Ubernetes.
|
||||
|
||||
|
||||
|
||||
|
||||
<!-- BEGIN MUNGE: IS_VERSIONED -->
|
||||
<!-- TAG IS_VERSIONED -->
|
||||
<!-- END MUNGE: IS_VERSIONED -->
|
||||
|
||||
|
||||
<!-- BEGIN MUNGE: GENERATED_ANALYTICS -->
|
||||
[]()
|
||||
<!-- END MUNGE: GENERATED_ANALYTICS -->
|
||||
|
|
@ -0,0 +1,74 @@
|
|||
---
|
||||
layout: docwithnav
|
||||
title: "High Availability of Scheduling and Controller Components in Kubernetes"
|
||||
---
|
||||
<!-- BEGIN MUNGE: UNVERSIONED_WARNING -->
|
||||
|
||||
|
||||
<!-- END MUNGE: UNVERSIONED_WARNING -->
|
||||
|
||||
# High Availability of Scheduling and Controller Components in Kubernetes
|
||||
|
||||
This document serves as a proposal for high availability of the scheduler and controller components in Kubernetes. This proposal is intended to provide a simple High Availability api for Kubernetes components with the potential to extend to services running on Kubernetes. Those services would be subject to their own constraints.
|
||||
|
||||
## Design Options
|
||||
|
||||
For complete reference see [this](https://www.ibm.com/developerworks/community/blogs/RohitShetty/entry/high_availability_cold_warm_hot?lang=en)
|
||||
|
||||
1. Hot Standby: In this scenario, data and state are shared between the two components such that an immediate failure in one component causes the standby daemon to take over exactly where the failed component had left off. This would be an ideal solution for Kubernetes, however it poses a series of challenges in the case of controllers where component-state is cached locally and not persisted in a transactional way to a storage facility. This would also introduce additional load on the apiserver, which is not desirable. As a result, we are **NOT** planning on this approach at this time.
|
||||
|
||||
2. **Warm Standby**: In this scenario there is only one active component acting as the master and additional components running but not providing service or responding to requests. Data and state are not shared between the active and standby components. When a failure occurs, the standby component that becomes the master must determine the current state of the system before resuming functionality. This is the approach that this proposal will leverage.
|
||||
|
||||
3. Active-Active (Load Balanced): Clients can simply load-balance across any number of servers that are currently running. Their general availability can be continuously updated, or published, such that load balancing only occurs across active participants. This aspect of HA is outside of the scope of *this* proposal because there is already a partial implementation in the apiserver.
|
||||
|
||||
## Design Discussion Notes on Leader Election
|
||||
|
||||
Implementation References:
|
||||
* [zookeeper](http://zookeeper.apache.org/doc/trunk/recipes.html#sc_leaderElection)
|
||||
* [etcd](https://groups.google.com/forum/#!topic/etcd-dev/EbAa4fjypb4)
|
||||
* [initialPOC](https://github.com/rrati/etcd-ha)
|
||||
|
||||
In HA, the apiserver will provide an api for sets of replicated clients to do master election: acquire the lease, renew the lease, and release the lease. This api is component agnostic, so a client will need to provide the component type and the lease duration when attempting to become master. The lease duration should be tuned per component. The apiserver will attempt to create a key in etcd based on the component type that contains the client's hostname/ip and port information. This key will be created with a ttl from the lease duration provided in the request. Failure to create this key means there is already a master of that component type, and the error from etcd will propagate to the client. Successfully creating the key means the client making the request is the master. Only the current master can renew the lease. When renewing the lease, the apiserver will update the existing key with a new ttl. The location in etcd for the HA keys is TBD.
|
||||
|
||||
The first component to request leadership will become the master. All other components of that type will fail until the current leader releases the lease, or fails to renew the lease within the expiration time. On startup, all components should attempt to become master. The component that succeeds becomes the master, and should perform all functions of that component. The components that fail to become the master should not perform any tasks and sleep for their lease duration and then attempt to become the master again. A clean shutdown of the leader will cause a release of the lease and a new master will be elected.
|
||||
|
||||
The component that becomes master should create a thread to manage the lease. This thread should be created with a channel that the main process can use to release the master lease. The master should release the lease in cases of an unrecoverable error and clean shutdown. Otherwise, this process will renew the lease and sleep, waiting for the next renewal time or notification to release the lease. If there is a failure to renew the lease, this process should force the entire component to exit. Daemon exit is meant to prevent potential split-brain conditions. Daemon restart is implied in this scenario, by either the init system (systemd), or possible watchdog processes. (See Design Discussion Notes)
|
||||
|
||||
## Options added to components with HA functionality
|
||||
|
||||
Some command line options would be added to components that can do HA:
|
||||
|
||||
* Lease Duration - How long a component can be master
|
||||
|
||||
## Design Discussion Notes
|
||||
|
||||
Some components may run numerous threads in order to perform tasks in parallel. Upon losing master status, such components should exit instantly instead of attempting to gracefully shut down such threads. This is to ensure that, in the case there's some propagation delay in informing the threads they should stop, the lame-duck threads won't interfere with the new master. The component should exit with an exit code indicating that the component is not the master. Since all components will be run by systemd or some other monitoring system, this will just result in a restart.
|
||||
|
||||
There is a short window after a new master acquires the lease, during which data from the old master might be committed. This is because there is currently no way to condition a write on its source being the master. Having the daemons exit shortens this window but does not eliminate it. A proper solution for this problem will be addressed at a later date. The proposed solution is:
|
||||
|
||||
1. This requires transaction support in etcd (which is already planned - see [coreos/etcd#2675](https://github.com/coreos/etcd/pull/2675))
|
||||
|
||||
2. The entry in etcd that is tracking the lease for a given component (the "current master" entry) would have as its value the host:port of the lease-holder (as described earlier) and a sequence number. The sequence number is incremented whenever a new master gets the lease.
|
||||
|
||||
3. Master replica is aware of the latest sequence number.
|
||||
|
||||
4. Whenever master replica sends a mutating operation to the API server, it includes the sequence number.
|
||||
|
||||
5. When the API server makes the corresponding write to etcd, it includes it in a transaction that does a compare-and-swap on the "current master" entry (old value == new value == host:port and sequence number from the replica that sent the mutating operation). This basically guarantees that if we elect the new master, all transactions coming from the old master will fail. You can think of this as the master attaching a "precondition" of its belief about who is the latest master.
|
||||
|
||||
## Open Questions
|
||||
|
||||
* Is there a desire to keep track of all nodes for a specific component type?
|
||||
|
||||
|
||||
|
||||
|
||||
<!-- BEGIN MUNGE: IS_VERSIONED -->
|
||||
<!-- TAG IS_VERSIONED -->
|
||||
<!-- END MUNGE: IS_VERSIONED -->
|
||||
|
||||
|
||||
<!-- BEGIN MUNGE: GENERATED_ANALYTICS -->
|
||||
[]()
|
||||
<!-- END MUNGE: GENERATED_ANALYTICS -->
|
||||
|
|
@ -0,0 +1,95 @@
|
|||
---
|
||||
layout: docwithnav
|
||||
title: "Abstract"
|
||||
---
|
||||
<!-- BEGIN MUNGE: UNVERSIONED_WARNING -->
|
||||
|
||||
|
||||
<!-- END MUNGE: UNVERSIONED_WARNING -->
|
||||
|
||||
## Abstract
|
||||
|
||||
Initial Resources is a data-driven feature that based on historical data tries to estimate resource usage of a container without Resources specified
|
||||
and set them before the container is run. This document describes design of the component.
|
||||
|
||||
## Motivation
|
||||
|
||||
Since we want to make Kubernetes as simple as possible for its users we don’t want to require setting
|
||||
[Resources](resource-qos.html#resource-specifications)
|
||||
for container by its owner. On the other hand having Resources filled is critical for scheduling decisions.
|
||||
Current solution to set up Resources to hardcoded value has obvious drawbacks. We need to implement a component
|
||||
which will set initial Resources to a reasonable value.
|
||||
|
||||
## Design
|
||||
|
||||
InitialResources component will be implemented as an [admission plugin](https://releases.k8s.io/release-1.1/plugin/pkg/admission) and invoked right before
|
||||
[LimitRanger](https://github.com/kubernetes/kubernetes/blob/7c9bbef96ed7f2a192a1318aa312919b861aee00/cluster/gce/config-default.sh#L91).
|
||||
For every container without Resources specified it will try to predict amount of resources that should be sufficient for it.
|
||||
So that a pod without specified resources will be treated as
|
||||
[Burstable](resource-qos.html#qos-classes).
|
||||
|
||||
InitialResources will set only [request](resource-qos.html#resource-specifications)
|
||||
(independently for each resource type: cpu, memory)
|
||||
field in the first version to avoid killing containers due to OOM (however the container still may be killed if exceeds requested resources).
|
||||
To make the component work with LimitRanger the estimated value will be capped by min and max possible values if defined.
|
||||
It will prevent from situation when the pod is rejected due to too low or too high estimation.
|
||||
|
||||
The container won’t be marked as managed by this component in any way, however appropriate event will be exported.
|
||||
The predicting algorithm should have very low latency to not increase significantly e2e pod startup latency
|
||||
[#3954](https://github.com/kubernetes/kubernetes/pull/3954).
|
||||
|
||||
### Predicting algorithm details
|
||||
|
||||
In the first version estimation will be made based on historical data for the Docker image being run in the container (both the name and the tag matters).
|
||||
CPU/memory usage of each container is exported periodically (by default with 1 minute resolution) to the backend (see more in [Monitoring pipeline](#monitoring-pipeline)).
|
||||
|
||||
InitialResources will set Request for both cpu/mem as the 90th percentile of the first (in the following order) set of samples defined in the following way:
|
||||
|
||||
* 7 days same image:tag, assuming there is at least 60 samples (1 hour)
|
||||
* 30 days same image:tag, assuming there is at least 60 samples (1 hour)
|
||||
* 30 days same image, assuming there is at least 1 sample
|
||||
|
||||
If there is still no data the default value will be set by LimitRanger. Same parameters will be configurable with appropriate flags.
|
||||
|
||||
#### Example
|
||||
|
||||
If we have at least 60 samples from image:tag over the past 7 days, we will use the 90th percentile of all of the samples of image:tag over the past 7 days.
|
||||
Otherwise, if we have at least 60 samples from image:tag over the past 30 days, we will use the 90th percentile of all of the samples over of image:tag the past 30 days.
|
||||
Otherwise, if we have at least 1 sample from image over the past 30 days, we will use that the 90th percentile of all of the samples of image over the past 30 days.
|
||||
Otherwise we will use default value.
|
||||
|
||||
### Monitoring pipeline
|
||||
|
||||
In the first version there will be available 2 options for backend for predicting algorithm:
|
||||
|
||||
* [InfluxDB](../../docs/user-guide/monitoring.html#influxdb-and-grafana) - aggregation will be made in SQL query
|
||||
* [GCM](../../docs/user-guide/monitoring.html#google-cloud-monitoring) - since GCM is not as powerful as InfluxDB some aggregation will be made on the client side
|
||||
|
||||
Both will be hidden under an abstraction layer, so it would be easy to add another option.
|
||||
The code will be a part of Initial Resources component to not block development, however in the future it should be a part of Heapster.
|
||||
|
||||
|
||||
## Next steps
|
||||
|
||||
The first version will be quite simple so there is a lot of possible improvements. Some of them seem to have high priority
|
||||
and should be introduced shortly after the first version is done:
|
||||
|
||||
* observe OOM and then react to it by increasing estimation
|
||||
* add possibility to specify if estimation should be made, possibly as ```InitialResourcesPolicy``` with options: *always*, *if-not-set*, *never*
|
||||
* add other features to the model like *namespace*
|
||||
* remember predefined values for the most popular images like *mysql*, *nginx*, *redis*, etc.
|
||||
* dry mode, which allows to ask system for resource recommendation for a container without running it
|
||||
* add estimation as annotations for those containers that already has resources set
|
||||
* support for other data sources like [Hawkular](http://www.hawkular.org/)
|
||||
|
||||
|
||||
|
||||
<!-- BEGIN MUNGE: IS_VERSIONED -->
|
||||
<!-- TAG IS_VERSIONED -->
|
||||
<!-- END MUNGE: IS_VERSIONED -->
|
||||
|
||||
|
||||
<!-- BEGIN MUNGE: GENERATED_ANALYTICS -->
|
||||
[]()
|
||||
<!-- END MUNGE: GENERATED_ANALYTICS -->
|
||||
|
|
@ -0,0 +1,181 @@
|
|||
---
|
||||
layout: docwithnav
|
||||
title: "Job Controller"
|
||||
---
|
||||
<!-- BEGIN MUNGE: UNVERSIONED_WARNING -->
|
||||
|
||||
|
||||
<!-- END MUNGE: UNVERSIONED_WARNING -->
|
||||
|
||||
# Job Controller
|
||||
|
||||
## Abstract
|
||||
|
||||
A proposal for implementing a new controller - Job controller - which will be responsible
|
||||
for managing pod(s) that require running once to completion even if the machine
|
||||
the pod is running on fails, in contrast to what ReplicationController currently offers.
|
||||
|
||||
Several existing issues and PRs were already created regarding that particular subject:
|
||||
* Job Controller [#1624](https://github.com/kubernetes/kubernetes/issues/1624)
|
||||
* New Job resource [#7380](https://github.com/kubernetes/kubernetes/pull/7380)
|
||||
|
||||
|
||||
## Use Cases
|
||||
|
||||
1. Be able to start one or several pods tracked as a single entity.
|
||||
1. Be able to run batch-oriented workloads on Kubernetes.
|
||||
1. Be able to get the job status.
|
||||
1. Be able to specify the number of instances performing a job at any one time.
|
||||
1. Be able to specify the number of successfully finished instances required to finish a job.
|
||||
|
||||
|
||||
## Motivation
|
||||
|
||||
Jobs are needed for executing multi-pod computation to completion; a good example
|
||||
here would be the ability to implement any type of batch oriented tasks.
|
||||
|
||||
|
||||
## Implementation
|
||||
|
||||
Job controller is similar to replication controller in that they manage pods.
|
||||
This implies they will follow the same controller framework that replication
|
||||
controllers already defined. The biggest difference between a `Job` and a
|
||||
`ReplicationController` object is the purpose; `ReplicationController`
|
||||
ensures that a specified number of Pods are running at any one time, whereas
|
||||
`Job` is responsible for keeping the desired number of Pods to a completion of
|
||||
a task. This difference will be represented by the `RestartPolicy` which is
|
||||
required to always take value of `RestartPolicyNever` or `RestartOnFailure`.
|
||||
|
||||
|
||||
The new `Job` object will have the following content:
|
||||
|
||||
{% highlight go %}
|
||||
{% raw %}
|
||||
// Job represents the configuration of a single job.
|
||||
type Job struct {
|
||||
TypeMeta
|
||||
ObjectMeta
|
||||
|
||||
// Spec is a structure defining the expected behavior of a job.
|
||||
Spec JobSpec
|
||||
|
||||
// Status is a structure describing current status of a job.
|
||||
Status JobStatus
|
||||
}
|
||||
|
||||
// JobList is a collection of jobs.
|
||||
type JobList struct {
|
||||
TypeMeta
|
||||
ListMeta
|
||||
|
||||
Items []Job
|
||||
}
|
||||
{% endraw %}
|
||||
{% endhighlight %}
|
||||
|
||||
`JobSpec` structure is defined to contain all the information how the actual job execution
|
||||
will look like.
|
||||
|
||||
{% highlight go %}
|
||||
{% raw %}
|
||||
// JobSpec describes how the job execution will look like.
|
||||
type JobSpec struct {
|
||||
|
||||
// Parallelism specifies the maximum desired number of pods the job should
|
||||
// run at any given time. The actual number of pods running in steady state will
|
||||
// be less than this number when ((.spec.completions - .status.successful) < .spec.parallelism),
|
||||
// i.e. when the work left to do is less than max parallelism.
|
||||
Parallelism *int
|
||||
|
||||
// Completions specifies the desired number of successfully finished pods the
|
||||
// job should be run with. Defaults to 1.
|
||||
Completions *int
|
||||
|
||||
// Selector is a label query over pods running a job.
|
||||
Selector map[string]string
|
||||
|
||||
// Template is the object that describes the pod that will be created when
|
||||
// executing a job.
|
||||
Template *PodTemplateSpec
|
||||
}
|
||||
{% endraw %}
|
||||
{% endhighlight %}
|
||||
|
||||
`JobStatus` structure is defined to contain informations about pods executing
|
||||
specified job. The structure holds information about pods currently executing
|
||||
the job.
|
||||
|
||||
{% highlight go %}
|
||||
{% raw %}
|
||||
// JobStatus represents the current state of a Job.
|
||||
type JobStatus struct {
|
||||
Conditions []JobCondition
|
||||
|
||||
// CreationTime represents time when the job was created
|
||||
CreationTime unversioned.Time
|
||||
|
||||
// StartTime represents time when the job was started
|
||||
StartTime unversioned.Time
|
||||
|
||||
// CompletionTime represents time when the job was completed
|
||||
CompletionTime unversioned.Time
|
||||
|
||||
// Active is the number of actively running pods.
|
||||
Active int
|
||||
|
||||
// Successful is the number of pods successfully completed their job.
|
||||
Successful int
|
||||
|
||||
// Unsuccessful is the number of pods failures, this applies only to jobs
|
||||
// created with RestartPolicyNever, otherwise this value will always be 0.
|
||||
Unsuccessful int
|
||||
}
|
||||
|
||||
type JobConditionType string
|
||||
|
||||
// These are valid conditions of a job.
|
||||
const (
|
||||
// JobComplete means the job has completed its execution.
|
||||
JobComplete JobConditionType = "Complete"
|
||||
)
|
||||
|
||||
// JobCondition describes current state of a job.
|
||||
type JobCondition struct {
|
||||
Type JobConditionType
|
||||
Status ConditionStatus
|
||||
LastHeartbeatTime unversioned.Time
|
||||
LastTransitionTime unversioned.Time
|
||||
Reason string
|
||||
Message string
|
||||
}
|
||||
{% endraw %}
|
||||
{% endhighlight %}
|
||||
|
||||
## Events
|
||||
|
||||
Job controller will be emitting the following events:
|
||||
* JobStart
|
||||
* JobFinish
|
||||
|
||||
## Future evolution
|
||||
|
||||
Below are the possible future extensions to the Job controller:
|
||||
* Be able to limit the execution time for a job, similarly to ActiveDeadlineSeconds for Pods.
|
||||
* Be able to create a chain of jobs dependent one on another.
|
||||
* Be able to specify the work each of the workers should execute (see type 1 from
|
||||
[this comment](https://github.com/kubernetes/kubernetes/issues/1624#issuecomment-97622142))
|
||||
* Be able to inspect Pods running a Job, especially after a Job has finished, e.g.
|
||||
by providing pointers to Pods in the JobStatus ([see comment](https://github.com/kubernetes/kubernetes/pull/11746/files#r37142628)).
|
||||
|
||||
|
||||
|
||||
|
||||
<!-- BEGIN MUNGE: IS_VERSIONED -->
|
||||
<!-- TAG IS_VERSIONED -->
|
||||
<!-- END MUNGE: IS_VERSIONED -->
|
||||
|
||||
|
||||
<!-- BEGIN MUNGE: GENERATED_ANALYTICS -->
|
||||
[]()
|
||||
<!-- END MUNGE: GENERATED_ANALYTICS -->
|
||||
|
|
@ -0,0 +1,174 @@
|
|||
---
|
||||
layout: docwithnav
|
||||
title: "Kubemark proposal"
|
||||
---
|
||||
<!-- BEGIN MUNGE: UNVERSIONED_WARNING -->
|
||||
|
||||
|
||||
<!-- END MUNGE: UNVERSIONED_WARNING -->
|
||||
|
||||
# Kubemark proposal
|
||||
|
||||
## Goal of this document
|
||||
|
||||
This document describes a design of Kubemark - a system that allows performance testing of a Kubernetes cluster. It describes the
|
||||
assumption, high level design and discusses possible solutions for lower-level problems. It is supposed to be a starting point for more
|
||||
detailed discussion.
|
||||
|
||||
## Current state and objective
|
||||
|
||||
Currently performance testing happens on ‘live’ clusters of up to 100 Nodes. It takes quite a while to start such cluster or to push
|
||||
updates to all Nodes, and it uses quite a lot of resources. At this scale the amount of wasted time and used resources is still acceptable.
|
||||
In the next quarter or two we’re targeting 1000 Node cluster, which will push it way beyond ‘acceptable’ level. Additionally we want to
|
||||
enable people without many resources to run scalability tests on bigger clusters than they can afford at given time. Having an ability to
|
||||
cheaply run scalability tests will enable us to run some set of them on "normal" test clusters, which in turn would mean ability to run
|
||||
them on every PR.
|
||||
|
||||
This means that we need a system that will allow for realistic performance testing on (much) smaller number of “real” machines. First
|
||||
assumption we make is that Nodes are independent, i.e. number of existing Nodes do not impact performance of a single Node. This is not
|
||||
entirely true, as number of Nodes can increase latency of various components on Master machine, which in turn may increase latency of Node
|
||||
operations, but we’re not interested in measuring this effect here. Instead we want to measure how number of Nodes and the load imposed by
|
||||
Node daemons affects the performance of Master components.
|
||||
|
||||
## Kubemark architecture overview
|
||||
|
||||
The high-level idea behind Kubemark is to write library that allows running artificial "Hollow" Nodes that will be able to simulate a
|
||||
behavior of real Kubelet and KubeProxy in a single, lightweight binary. Hollow components will need to correctly respond to Controllers
|
||||
(via API server), and preferably, in the fullness of time, be able to ‘replay’ previously recorded real traffic (this is out of scope for
|
||||
initial version). To teach Hollow components replaying recorded traffic they will need to store data specifying when given Pod/Container
|
||||
should die (e.g. observed lifetime). Such data can be extracted e.g. from etcd Raft logs, or it can be reconstructed from Events. In the
|
||||
initial version we only want them to be able to fool Master components and put some configurable (in what way TBD) load on them.
|
||||
|
||||
When we have Hollow Node ready, we’ll be able to test performance of Master Components by creating a real Master Node, with API server,
|
||||
Controllers, etcd and whatnot, and create number of Hollow Nodes that will register to the running Master.
|
||||
|
||||
To make Kubemark easier to maintain when system evolves Hollow components will reuse real "production" code for Kubelet and KubeProxy, but
|
||||
will mock all the backends with no-op or very simple mocks. We believe that this approach is better in the long run than writing special
|
||||
"performance-test-aimed" separate version of them. This may take more time to create an initial version, but we think maintenance cost will
|
||||
be noticeably smaller.
|
||||
|
||||
### Option 1
|
||||
|
||||
For the initial version we will teach Master components to use port number to identify Kubelet/KubeProxy. This will allow running those
|
||||
components on non-default ports, and in the same time will allow to run multiple Hollow Nodes on a single machine. During setup we will
|
||||
generate credentials for cluster communication and pass them to HollowKubelet/HollowProxy to use. Master will treat all HollowNodes as
|
||||
normal ones.
|
||||
|
||||

|
||||
*Kubmark architecture diagram for option 1*
|
||||
|
||||
### Option 2
|
||||
|
||||
As a second (equivalent) option we will run Kubemark on top of 'real' Kubernetes cluster, where both Master and Hollow Nodes will be Pods.
|
||||
In this option we'll be able to use Kubernetes mechanisms to streamline setup, e.g. by using Kubernetes networking to ensure unique IPs for
|
||||
Hollow Nodes, or using Secrets to distribute Kubelet credentials. The downside of this configuration is that it's likely that some noise
|
||||
will appear in Kubemark results from either CPU/Memory pressure from other things running on Nodes (e.g. FluentD, or Kubelet) or running
|
||||
cluster over an overlay network. We believe that it'll be possible to turn off cluster monitoring for Kubemark runs, so that the impact
|
||||
of real Node daemons will be minimized, but we don't know what will be the impact of using higher level networking stack. Running a
|
||||
comparison will be an interesting test in itself.
|
||||
|
||||
### Discussion
|
||||
|
||||
Before taking a closer look at steps necessary to set up a minimal Hollow cluster it's hard to tell which approach will be simpler. It's
|
||||
quite possible that the initial version will end up as hybrid between running the Hollow cluster directly on top of VMs and running the
|
||||
Hollow cluster on top of a Kubernetes cluster that is running on top of VMs. E.g. running Nodes as Pods in Kubernetes cluster and Master
|
||||
directly on top of VM.
|
||||
|
||||
## Things to simulate
|
||||
|
||||
In real Kubernetes on a single Node we run two daemons that communicate with Master in some way: Kubelet and KubeProxy.
|
||||
|
||||
### KubeProxy
|
||||
|
||||
As a replacement for KubeProxy we'll use HollowProxy, which will be a real KubeProxy with injected no-op mocks everywhere it makes sense.
|
||||
|
||||
### Kubelet
|
||||
|
||||
As a replacement for Kubelet we'll use HollowKubelet, which will be a real Kubelet with injected no-op or simple mocks everywhere it makes
|
||||
sense.
|
||||
|
||||
Kubelet also exposes cadvisor endpoint which is scraped by Heapster, healthz to be read by supervisord, and we have FluentD running as a
|
||||
Pod on each Node that exports logs to Elasticsearch (or Google Cloud Logging). Both Heapster and Elasticsearch are running in Pods in the
|
||||
cluster so do not add any load on a Master components by themselves. There can be other systems that scrape Heapster through proxy running
|
||||
on Master, which adds additional load, but they're not the part of default setup, so in the first version we won't simulate this behavior.
|
||||
|
||||
In the first version we’ll assume that all started Pods will run indefinitely if not explicitly deleted. In the future we can add a model
|
||||
of short-running batch jobs, but in the initial version we’ll assume only serving-like Pods.
|
||||
|
||||
### Heapster
|
||||
|
||||
In addition to system components we run Heapster as a part of cluster monitoring setup. Heapster currently watches Events, Pods and Nodes
|
||||
through the API server. In the test setup we can use real heapster for watching API server, with mocked out piece that scrapes cAdvisor
|
||||
data from Kubelets.
|
||||
|
||||
### Elasticsearch and Fluentd
|
||||
|
||||
Similarly to Heapster Elasticsearch runs outside the Master machine but generates some traffic on it. Fluentd “daemon” running on Master
|
||||
periodically sends Docker logs it gathered to the Elasticsearch running on one of the Nodes. In the initial version we omit Elasticsearch,
|
||||
as it produces only a constant small load on Master Node that does not change with the size of the cluster.
|
||||
|
||||
## Necessary work
|
||||
|
||||
There are three more or less independent things that needs to be worked on:
|
||||
- HollowNode implementation, creating a library/binary that will be able to listen to Watches and respond in a correct fashion with Status
|
||||
updates. This also involves creation of a CloudProvider that can produce such Hollow Nodes, or making sure that HollowNodes can correctly
|
||||
self-register in no-provider Master.
|
||||
- Kubemark setup, including figuring networking model, number of Hollow Nodes that will be allowed to run on a single “machine”, writing
|
||||
setup/run/teardown scripts (in [option 1](#option-1)), or figuring out how to run Master and Hollow Nodes on top of Kubernetes
|
||||
(in [option 2](#option-2))
|
||||
- Creating a Player component that will send requests to the API server putting a load on a cluster. This involves creating a way to
|
||||
specify desired workload. This task is
|
||||
very well isolated from the rest, as it is about sending requests to the real API server. Because of that we can discuss requirements
|
||||
separately.
|
||||
|
||||
## Concerns
|
||||
|
||||
Network performance most likely won't be a problem for the initial version if running on directly on VMs rather than on top of a Kubernetes
|
||||
cluster, as Kubemark will be running on standard networking stack (no cloud-provider software routes, or overlay network is needed, as we
|
||||
don't need custom routing between Pods). Similarly we don't think that running Kubemark on Kubernetes virtualized cluster networking will
|
||||
cause noticeable performance impact, but it requires testing.
|
||||
|
||||
On the other hand when adding additional features it may turn out that we need to simulate Kubernetes Pod network. In such, when running
|
||||
'pure' Kubemark we may try one of the following:
|
||||
- running overlay network like Flannel or OVS instead of using cloud providers routes,
|
||||
- write simple network multiplexer to multiplex communications from the Hollow Kubelets/KubeProxies on the machine.
|
||||
|
||||
In case of Kubemark on Kubernetes it may turn that we run into a problem with adding yet another layer of network virtualization, but we
|
||||
don't need to solve this problem now.
|
||||
|
||||
## Work plan
|
||||
|
||||
- Teach/make sure that Master can talk to multiple Kubelets on the same Machine [option 1](#option-1):
|
||||
- make sure that Master can talk to a Kubelet on non-default port,
|
||||
- make sure that Master can talk to all Kubelets on different ports,
|
||||
- Write HollowNode library:
|
||||
- new HollowProxy,
|
||||
- new HollowKubelet,
|
||||
- new HollowNode combining the two,
|
||||
- make sure that Master can talk to two HollowKubelets running on the same machine
|
||||
- Make sure that we can run Hollow cluster on top of Kubernetes [option 2](#option-2)
|
||||
- Write a player that will automatically put some predefined load on Master, <- this is the moment when it’s possible to play with it and is useful by itself for
|
||||
scalability tests. Alternatively we can just use current density/load tests,
|
||||
- Benchmark our machines - see how many Watch clients we can have before everything explodes,
|
||||
- See how many HollowNodes we can run on a single machine by attaching them to the real master <- this is the moment it starts to useful
|
||||
- Update kube-up/kube-down scripts to enable creating “HollowClusters”/write a new scripts/something, integrate HollowCluster with a Elasticsearch/Heapster equivalents,
|
||||
- Allow passing custom configuration to the Player
|
||||
|
||||
## Future work
|
||||
|
||||
In the future we want to add following capabilities to the Kubemark system:
|
||||
- replaying real traffic reconstructed from the recorded Events stream,
|
||||
- simulating scraping things running on Nodes through Master proxy.
|
||||
|
||||
|
||||
|
||||
|
||||
<!-- BEGIN MUNGE: IS_VERSIONED -->
|
||||
<!-- TAG IS_VERSIONED -->
|
||||
<!-- END MUNGE: IS_VERSIONED -->
|
||||
|
||||
|
||||
<!-- BEGIN MUNGE: GENERATED_ANALYTICS -->
|
||||
[]()
|
||||
<!-- END MUNGE: GENERATED_ANALYTICS -->
|
||||
|
|
@ -0,0 +1,411 @@
|
|||
---
|
||||
layout: docwithnav
|
||||
title: "Abstract"
|
||||
---
|
||||
<!-- BEGIN MUNGE: UNVERSIONED_WARNING -->
|
||||
|
||||
|
||||
<!-- END MUNGE: UNVERSIONED_WARNING -->
|
||||
|
||||
## Abstract
|
||||
|
||||
A proposal for refactoring `SecurityContext` to have pod-level and container-level attributes in
|
||||
order to correctly model pod- and container-level security concerns.
|
||||
|
||||
## Motivation
|
||||
|
||||
Currently, containers have a `SecurityContext` attribute which contains information about the
|
||||
security settings the container uses. In practice many of these attributes are uniform across all
|
||||
containers in a pod. Simultaneously, there is also a need to apply the security context pattern
|
||||
at the pod level to correctly model security attributes that apply only at a pod level.
|
||||
|
||||
Users should be able to:
|
||||
|
||||
1. Express security settings that are applicable to the entire pod
|
||||
2. Express base security settings that apply to all containers
|
||||
3. Override only the settings that need to be differentiated from the base in individual
|
||||
containers
|
||||
|
||||
This proposal is a dependency for other changes related to security context:
|
||||
|
||||
1. [Volume ownership management in the Kubelet](https://github.com/kubernetes/kubernetes/pull/12944)
|
||||
2. [Generic SELinux label management in the Kubelet](https://github.com/kubernetes/kubernetes/pull/14192)
|
||||
|
||||
Goals of this design:
|
||||
|
||||
1. Describe the use cases for which a pod-level security context is necessary
|
||||
2. Thoroughly describe the API backward compatibility issues that arise from the introduction of
|
||||
a pod-level security context
|
||||
3. Describe all implementation changes necessary for the feature
|
||||
|
||||
## Constraints and assumptions
|
||||
|
||||
1. We will not design for intra-pod security; we are not currently concerned about isolating
|
||||
containers in the same pod from one another
|
||||
1. We will design for backward compatibility with the current V1 API
|
||||
|
||||
## Use Cases
|
||||
|
||||
1. As a developer, I want to correctly model security attributes which belong to an entire pod
|
||||
2. As a user, I want to be able to specify container attributes that apply to all containers
|
||||
without repeating myself
|
||||
3. As an existing user, I want to be able to use the existing container-level security API
|
||||
|
||||
### Use Case: Pod level security attributes
|
||||
|
||||
Some security attributes make sense only to model at the pod level. For example, it is a
|
||||
fundamental property of pods that all containers in a pod share the same network namespace.
|
||||
Therefore, using the host namespace makes sense to model at the pod level only, and indeed, today
|
||||
it is part of the `PodSpec`. Other host namespace support is currently being added and these will
|
||||
also be pod-level settings; it makes sense to model them as a pod-level collection of security
|
||||
attributes.
|
||||
|
||||
## Use Case: Override pod security context for container
|
||||
|
||||
Some use cases require the containers in a pod to run with different security settings. As an
|
||||
example, a user may want to have a pod with two containers, one of which runs as root with the
|
||||
privileged setting, and one that runs as a non-root UID. To support use cases like this, it should
|
||||
be possible to override appropriate (ie, not intrinsically pod-level) security settings for
|
||||
individual containers.
|
||||
|
||||
## Proposed Design
|
||||
|
||||
### SecurityContext
|
||||
|
||||
For posterity and ease of reading, note the current state of `SecurityContext`:
|
||||
|
||||
{% highlight go %}
|
||||
{% raw %}
|
||||
package api
|
||||
|
||||
type Container struct {
|
||||
// Other fields omitted
|
||||
|
||||
// Optional: SecurityContext defines the security options the pod should be run with
|
||||
SecurityContext *SecurityContext `json:"securityContext,omitempty"`
|
||||
}
|
||||
|
||||
type SecurityContext struct {
|
||||
// Capabilities are the capabilities to add/drop when running the container
|
||||
Capabilities *Capabilities `json:"capabilities,omitempty"`
|
||||
|
||||
// Run the container in privileged mode
|
||||
Privileged *bool `json:"privileged,omitempty"`
|
||||
|
||||
// SELinuxOptions are the labels to be applied to the container
|
||||
// and volumes
|
||||
SELinuxOptions *SELinuxOptions `json:"seLinuxOptions,omitempty"`
|
||||
|
||||
// RunAsUser is the UID to run the entrypoint of the container process.
|
||||
RunAsUser *int64 `json:"runAsUser,omitempty"`
|
||||
|
||||
// RunAsNonRoot indicates that the container should be run as a non-root user. If the RunAsUser
|
||||
// field is not explicitly set then the kubelet may check the image for a specified user or
|
||||
// perform defaulting to specify a user.
|
||||
RunAsNonRoot bool `json:"runAsNonRoot,omitempty"`
|
||||
}
|
||||
|
||||
// SELinuxOptions contains the fields that make up the SELinux context of a container.
|
||||
type SELinuxOptions struct {
|
||||
// SELinux user label
|
||||
User string `json:"user,omitempty"`
|
||||
|
||||
// SELinux role label
|
||||
Role string `json:"role,omitempty"`
|
||||
|
||||
// SELinux type label
|
||||
Type string `json:"type,omitempty"`
|
||||
|
||||
// SELinux level label.
|
||||
Level string `json:"level,omitempty"`
|
||||
}
|
||||
{% endraw %}
|
||||
{% endhighlight %}
|
||||
|
||||
### PodSecurityContext
|
||||
|
||||
`PodSecurityContext` specifies two types of security attributes:
|
||||
|
||||
1. Attributes that apply to the pod itself
|
||||
2. Attributes that apply to the containers of the pod
|
||||
|
||||
In the internal API, fields of the `PodSpec` controlling the use of the host PID, IPC, and network
|
||||
namespaces are relocated to this type:
|
||||
|
||||
{% highlight go %}
|
||||
{% raw %}
|
||||
package api
|
||||
|
||||
type PodSpec struct {
|
||||
// Other fields omitted
|
||||
|
||||
// Optional: SecurityContext specifies pod-level attributes and container security attributes
|
||||
// that apply to all containers.
|
||||
SecurityContext *PodSecurityContext `json:"securityContext,omitempty"`
|
||||
}
|
||||
|
||||
// PodSecurityContext specifies security attributes of the pod and container attributes that apply
|
||||
// to all containers of the pod.
|
||||
type PodSecurityContext struct {
|
||||
// Use the host's network namespace. If this option is set, the ports that will be
|
||||
// used must be specified.
|
||||
// Optional: Default to false.
|
||||
HostNetwork bool
|
||||
// Use the host's IPC namespace
|
||||
HostIPC bool
|
||||
|
||||
// Use the host's PID namespace
|
||||
HostPID bool
|
||||
|
||||
// Capabilities are the capabilities to add/drop when running containers
|
||||
Capabilities *Capabilities `json:"capabilities,omitempty"`
|
||||
|
||||
// Run the container in privileged mode
|
||||
Privileged *bool `json:"privileged,omitempty"`
|
||||
|
||||
// SELinuxOptions are the labels to be applied to the container
|
||||
// and volumes
|
||||
SELinuxOptions *SELinuxOptions `json:"seLinuxOptions,omitempty"`
|
||||
|
||||
// RunAsUser is the UID to run the entrypoint of the container process.
|
||||
RunAsUser *int64 `json:"runAsUser,omitempty"`
|
||||
|
||||
// RunAsNonRoot indicates that the container should be run as a non-root user. If the RunAsUser
|
||||
// field is not explicitly set then the kubelet may check the image for a specified user or
|
||||
// perform defaulting to specify a user.
|
||||
RunAsNonRoot bool
|
||||
}
|
||||
|
||||
// Comments and generated docs will change for the container.SecurityContext field to indicate
|
||||
// the precedence of these fields over the pod-level ones.
|
||||
|
||||
type Container struct {
|
||||
// Other fields omitted
|
||||
|
||||
// Optional: SecurityContext defines the security options the pod should be run with.
|
||||
// Settings specified in this field take precedence over the settings defined in
|
||||
// pod.Spec.SecurityContext.
|
||||
SecurityContext *SecurityContext `json:"securityContext,omitempty"`
|
||||
}
|
||||
{% endraw %}
|
||||
{% endhighlight %}
|
||||
|
||||
In the V1 API, the pod-level security attributes which are currently fields of the `PodSpec` are
|
||||
retained on the `PodSpec` for backward compatibility purposes:
|
||||
|
||||
{% highlight go %}
|
||||
{% raw %}
|
||||
package v1
|
||||
|
||||
type PodSpec struct {
|
||||
// Other fields omitted
|
||||
|
||||
// Use the host's network namespace. If this option is set, the ports that will be
|
||||
// used must be specified.
|
||||
// Optional: Default to false.
|
||||
HostNetwork bool `json:"hostNetwork,omitempty"`
|
||||
// Use the host's pid namespace.
|
||||
// Optional: Default to false.
|
||||
HostPID bool `json:"hostPID,omitempty"`
|
||||
// Use the host's ipc namespace.
|
||||
// Optional: Default to false.
|
||||
HostIPC bool `json:"hostIPC,omitempty"`
|
||||
|
||||
// Optional: SecurityContext specifies pod-level attributes and container security attributes
|
||||
// that apply to all containers.
|
||||
SecurityContext *PodSecurityContext `json:"securityContext,omitempty"`
|
||||
}
|
||||
{% endraw %}
|
||||
{% endhighlight %}
|
||||
|
||||
The `pod.Spec.SecurityContext` specifies the security context of all containers in the pod.
|
||||
The containers' `securityContext` field is overlaid on the base security context to determine the
|
||||
effective security context for the container.
|
||||
|
||||
The new V1 API should be backward compatible with the existing API. Backward compatibility is
|
||||
defined as:
|
||||
|
||||
> 1. Any API call (e.g. a structure POSTed to a REST endpoint) that worked before your change must
|
||||
> work the same after your change.
|
||||
> 2. Any API call that uses your change must not cause problems (e.g. crash or degrade behavior) when
|
||||
> issued against servers that do not include your change.
|
||||
> 3. It must be possible to round-trip your change (convert to different API versions and back) with
|
||||
> no loss of information.
|
||||
|
||||
Previous versions of this proposal attempted to deal with backward compatiblity by defining
|
||||
the affect of setting the pod-level fields on the container-level fields. While trying to find
|
||||
consensus on this design, it became apparent that this approach was going to be extremely complex
|
||||
to implement, explain, and support. Instead, we will approach backward compatibility as follows:
|
||||
|
||||
1. Pod-level and container-level settings will not affect one another
|
||||
2. Old clients will be able to use container-level settings in the exact same way
|
||||
3. Container level settings always override pod-level settings if they are set
|
||||
|
||||
#### Examples
|
||||
|
||||
1. Old client using `pod.Spec.Containers[x].SecurityContext`
|
||||
|
||||
An old client creates a pod:
|
||||
|
||||
{% highlight yaml %}
|
||||
{% raw %}
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: test-pod
|
||||
spec:
|
||||
containers:
|
||||
- name: a
|
||||
securityContext:
|
||||
runAsUser: 1001
|
||||
- name: b
|
||||
securityContest:
|
||||
runAsUser: 1002
|
||||
{% endraw %}
|
||||
{% endhighlight %}
|
||||
|
||||
looks to old clients like:
|
||||
|
||||
{% highlight yaml %}
|
||||
{% raw %}
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: test-pod
|
||||
spec:
|
||||
containers:
|
||||
- name: a
|
||||
securityContext:
|
||||
runAsUser: 1001
|
||||
- name: b
|
||||
securityContext:
|
||||
runAsUser: 1002
|
||||
{% endraw %}
|
||||
{% endhighlight %}
|
||||
|
||||
looks to new clients like:
|
||||
|
||||
{% highlight yaml %}
|
||||
{% raw %}
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: test-pod
|
||||
spec:
|
||||
containers:
|
||||
- name: a
|
||||
securityContext:
|
||||
runAsUser: 1001
|
||||
- name: b
|
||||
securityContext:
|
||||
runAsUser: 1002
|
||||
{% endraw %}
|
||||
{% endhighlight %}
|
||||
|
||||
2. New client using `pod.Spec.SecurityContext`
|
||||
|
||||
A new client creates a pod using a field of `pod.Spec.SecurityContext`:
|
||||
|
||||
{% highlight yaml %}
|
||||
{% raw %}
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: test-pod
|
||||
spec:
|
||||
securityContext:
|
||||
runAsUser: 1001
|
||||
containers:
|
||||
- name: a
|
||||
- name: b
|
||||
{% endraw %}
|
||||
{% endhighlight %}
|
||||
|
||||
appears to new clients as:
|
||||
|
||||
{% highlight yaml %}
|
||||
{% raw %}
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: test-pod
|
||||
spec:
|
||||
securityContext:
|
||||
runAsUser: 1001
|
||||
containers:
|
||||
- name: a
|
||||
- name: b
|
||||
{% endraw %}
|
||||
{% endhighlight %}
|
||||
|
||||
old clients will see:
|
||||
|
||||
{% highlight yaml %}
|
||||
{% raw %}
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: test-pod
|
||||
spec:
|
||||
containers:
|
||||
- name: a
|
||||
- name: b
|
||||
{% endraw %}
|
||||
{% endhighlight %}
|
||||
|
||||
3. Pods created using `pod.Spec.SecurityContext` and `pod.Spec.Containers[x].SecurityContext`
|
||||
|
||||
If a field is set in both `pod.Spec.SecurityContext` and
|
||||
`pod.Spec.Containers[x].SecurityContext`, the value in `pod.Spec.Containers[x].SecurityContext`
|
||||
wins. In the following pod:
|
||||
|
||||
{% highlight yaml %}
|
||||
{% raw %}
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: test-pod
|
||||
spec:
|
||||
securityContext:
|
||||
runAsUser: 1001
|
||||
containers:
|
||||
- name: a
|
||||
securityContext:
|
||||
runAsUser: 1002
|
||||
- name: b
|
||||
{% endraw %}
|
||||
{% endhighlight %}
|
||||
|
||||
The effective setting for `runAsUser` for container A is `1002`.
|
||||
|
||||
#### Testing
|
||||
|
||||
A backward compatibility test suite will be established for the v1 API. The test suite will
|
||||
verify compatibility by converting objects into the internal API and back to the version API and
|
||||
examining the results.
|
||||
|
||||
All of the examples here will be used as test-cases. As more test cases are added, the proposal will
|
||||
be updated.
|
||||
|
||||
An example of a test like this can be found in the
|
||||
[OpenShift API package](https://github.com/openshift/origin/blob/master/pkg/api/compatibility_test.go)
|
||||
|
||||
E2E test cases will be added to test the correct determination of the security context for containers.
|
||||
|
||||
### Kubelet changes
|
||||
|
||||
1. The Kubelet will use the new fields on the `PodSecurityContext` for host namespace control
|
||||
2. The Kubelet will be modified to correctly implement the backward compatibility and effective
|
||||
security context determination defined here
|
||||
|
||||
|
||||
|
||||
<!-- BEGIN MUNGE: IS_VERSIONED -->
|
||||
<!-- TAG IS_VERSIONED -->
|
||||
<!-- END MUNGE: IS_VERSIONED -->
|
||||
|
||||
|
||||
<!-- BEGIN MUNGE: GENERATED_ANALYTICS -->
|
||||
[]()
|
||||
<!-- END MUNGE: GENERATED_ANALYTICS -->
|
||||
|
|
@ -0,0 +1,139 @@
|
|||
---
|
||||
layout: docwithnav
|
||||
title: "Rescheduler design space"
|
||||
---
|
||||
<!-- BEGIN MUNGE: UNVERSIONED_WARNING -->
|
||||
|
||||
|
||||
<!-- END MUNGE: UNVERSIONED_WARNING -->
|
||||
|
||||
# Rescheduler design space
|
||||
|
||||
@davidopp, @erictune, @briangrant
|
||||
|
||||
July 2015
|
||||
|
||||
## Introduction and definition
|
||||
|
||||
A rescheduler is an agent that proactively causes currently-running
|
||||
Pods to be moved, so as to optimize some objective function for
|
||||
goodness of the layout of Pods in the cluster. (The objective function
|
||||
doesn't have to be expressed mathematically; it may just be a
|
||||
collection of ad-hoc rules, but in principle there is an objective
|
||||
function. Implicitly an objective function is described by the
|
||||
scheduler's predicate and priority functions.) It might be triggered
|
||||
to run every N minutes, or whenever some event happens that is known
|
||||
to make the objective function worse (for example, whenever any Pod goes
|
||||
PENDING for a long time.)
|
||||
|
||||
## Motivation and use cases
|
||||
|
||||
A rescheduler is useful because without a rescheduler, scheduling
|
||||
decisions are only made at the time Pods are created. But later on,
|
||||
the state of the cell may have changed in some way such that it would
|
||||
be better to move the Pod to another node.
|
||||
|
||||
There are two categories of movements a rescheduler might trigger: coalescing
|
||||
and spreading.
|
||||
|
||||
### Coalesce Pods
|
||||
|
||||
This is the most common use case. Cluster layout changes over time. For
|
||||
example, run-to-completion Pods terminate, producing free space in their wake, but that space
|
||||
is fragmented. This fragmentation might prevent a PENDING Pod from scheduling
|
||||
(there are enough free resource for the Pod in aggregate across the cluster,
|
||||
but not on any single node). A rescheduler can coalesce free space like a
|
||||
disk defragmenter, thereby producing enough free space on a node for a PENDING
|
||||
Pod to schedule. In some cases it can do this just by moving Pods into existing
|
||||
holes, but often it will need to evict (and reschedule) running Pods in order to
|
||||
create a large enough hole.
|
||||
|
||||
A second use case for a rescheduler to coalesce pods is when it becomes possible
|
||||
to support the running Pods on a fewer number of nodes. The rescheduler can
|
||||
gradually move Pods off of some set of nodes to make those nodes empty so
|
||||
that they can then be shut down/removed. More specifically,
|
||||
the system could do a simulation to see whether after removing a node from the
|
||||
cluster, will the Pods that were on that node be able to reschedule,
|
||||
either directly or with the help of the rescheduler; if the answer is
|
||||
yes, then you can safely auto-scale down (assuming services will still
|
||||
meeting their application-level SLOs).
|
||||
|
||||
### Spread Pods
|
||||
|
||||
The main use cases for spreading Pods revolve around relieving congestion on (a) highly
|
||||
utilized node(s). For example, some process might suddenly start receiving a significantly
|
||||
above-normal amount of external requests, leading to starvation of best-effort
|
||||
Pods on the node. We can use the rescheduler to move the best-effort Pods off of the
|
||||
node. (They are likely to have generous eviction SLOs, so are more likely to be movable
|
||||
than the Pod that is experiencing the higher load, but in principle we might move either.)
|
||||
Or even before any node becomes overloaded, we might proactively re-spread Pods from nodes
|
||||
with high-utilization, to give them some buffer against future utilization spikes. In either
|
||||
case, the nodes we move the Pods onto might have been in the system for a long time or might
|
||||
have been added by the cluster auto-scaler specifically to allow the rescheduler to
|
||||
rebalance utilization.
|
||||
|
||||
A second spreading use case is to separate antagonists.
|
||||
Sometimes the processes running in two different Pods on the same node
|
||||
may have unexpected antagonistic
|
||||
behavior towards one another. A system component might monitor for such
|
||||
antagonism and ask the rescheduler to move one of the antagonists to a new node.
|
||||
|
||||
### Ranking the use cases
|
||||
|
||||
The vast majority of users probably only care about rescheduling for three scenarios:
|
||||
|
||||
1. Move Pods around to get a PENDING Pod to schedule
|
||||
1. Redistribute Pods onto new nodes added by a cluster auto-scaler when ther are no PENDING Pods
|
||||
1. Move Pods around when CPU starvation is detected on a node
|
||||
|
||||
## Design considerations and design space
|
||||
|
||||
Because rescheduling is disruptive--it causes one or more
|
||||
already-running Pods to die when they otherwise wouldn't--a key
|
||||
constraint on rescheduling is that it must be done subject to
|
||||
disruption SLOs. There are a number of ways to specify these SLOs--a
|
||||
global rate limit across all Pods, a rate limit across a set of Pods
|
||||
defined by some particular label selector, a maximum number of Pods
|
||||
that can be down at any one time among a set defined by some
|
||||
particular label selector, etc. These policies are presumably part of
|
||||
the Rescheduler's configuration.
|
||||
|
||||
There are a lot of design possibilities for a rescheduler. To explain
|
||||
them, it's easiest to start with the description of a baseline
|
||||
rescheduler, and then describe possible modifications. The Baseline
|
||||
rescheduler
|
||||
* only kicks in when there are one or more PENDING Pods for some period of time; its objective function is binary: completely happy if there are no PENDING Pods, and completely unhappy if there are PENDING Pods; it does not try to optimize for any other aspect of cluster layout
|
||||
* is not a scheduler -- it simply identifies a node where a PENDING Pod could fit if one or more Pods on that node were moved out of the way, and then kills those Pods to make room for the PENDING Pod, which will then be scheduled there by the regular scheduler(s). [obviously this killing operation must be able to specify "don't allow the killed Pod to reschedule back to whence it was killed" otherwise the killing is pointless] Of course it should only do this if it is sure the killed Pods will be able to reschedule into already-free space in the cluster. Note that although it is not a scheduler, the Rescheduler needs to be linked with the predicate functions of the scheduling algorithm(s) so that it can know (1) that the PENDING Pod would actually schedule into the hole it has identified once the hole is created, and (2) that the evicted Pod(s) will be able to schedule somewhere else in the cluster.
|
||||
|
||||
Possible variations on this Baseline rescheduler are
|
||||
|
||||
1. it can kill the Pod(s) whose space it wants **and also schedule the Pod that will take that space and reschedule the Pod(s) that were killed**, rather than just killing the Pod(s) whose space it wants and relying on the regular scheduler(s) to schedule the Pod that will take that space (and to reschedule the Pod(s) that were evicted)
|
||||
1. it can run continuously in the background to optimize general cluster layout instead of just trying to get a PENDING Pod to schedule
|
||||
1. it can try to move groups of Pods instead of using a one-at-a-time / greedy approach
|
||||
1. it can formulate multi-hop plans instead of single-hop
|
||||
|
||||
A key design question for a Rescheduler is how much knowledge it needs about the scheduling policies used by the cluster's scheduler(s).
|
||||
* For the Baseline rescheduler, it needs to know the predicate functions used by the cluster's scheduler(s) else it can't know how to create a hole that the PENDING Pod will fit into, nor be sure that the evicted Pod(s) will be able to reschedule elsewhere.
|
||||
* If it is going to run continuously in the background to optimize cluster layout but is still only going to kill Pods, then it still needs to know the predicate functions for the reason mentioned above. In principle it doesn't need to know the priority functions; it could just randomly kill Pods and rely on the regular scheduler to put them back in better places. However, this is a rather inexact approach. Thus it is useful for the rescheduler to know the priority functions, or at least some subset of them, so it can be sure that an action it takes will actually improve the cluster layout.
|
||||
* If it is going to run continuously in the background to optimize cluster layout and is going to act as a scheduler rather than just killing Pods, then it needs to know the predicate functions and some compatible (but not necessarily identical) priority functions One example of a case where "compatible but not identical" might be useful is if the main scheduler(s) has a very simple scheduling policy optimized for low scheduling latency, and the Rescheduler having a more sophisticated/optimal scheduling policy that requires more computation time. The main thing to avoid is for the scheduler(s) and rescheduler to have incompatible priority functions, as this will cause them to "fight" (though it still can't lead to an infinite loop, since the scheduler(s) only ever touches a Pod once).
|
||||
|
||||
## Appendix: Integrating rescheduler with cluster auto-scaler (scale up)
|
||||
|
||||
For scaling up the cluster, a reasonable workflow might be:
|
||||
1. pod horizontal auto-scaler decides to add one or more Pods to a service, based on the metrics it is observing
|
||||
1. the Pod goes PENDING due to lack of a suitable node with sufficient resources
|
||||
1. rescheduler notices the PENDING Pod and determines that the Pod cannot schedule just by rearranging existing Pods (while respecting SLOs)
|
||||
1. rescheduler triggers cluster auto-scaler to add a node of the appropriate type for the PENDING Pod
|
||||
1. the PENDING Pod schedules onto the new node (and possibly the rescheduler also moves other Pods onto that node)
|
||||
|
||||
|
||||
|
||||
<!-- BEGIN MUNGE: IS_VERSIONED -->
|
||||
<!-- TAG IS_VERSIONED -->
|
||||
<!-- END MUNGE: IS_VERSIONED -->
|
||||
|
||||
|
||||
<!-- BEGIN MUNGE: GENERATED_ANALYTICS -->
|
||||
[]()
|
||||
<!-- END MUNGE: GENERATED_ANALYTICS -->
|
||||
|
|
@ -0,0 +1,125 @@
|
|||
---
|
||||
layout: docwithnav
|
||||
title: "Resource Quality of Service in Kubernetes"
|
||||
---
|
||||
<!-- BEGIN MUNGE: UNVERSIONED_WARNING -->
|
||||
|
||||
|
||||
<!-- END MUNGE: UNVERSIONED_WARNING -->
|
||||
|
||||
# Resource Quality of Service in Kubernetes
|
||||
|
||||
**Author**: Ananya Kumar (@AnanyaKumar) Vishnu Kannan (@vishh)
|
||||
|
||||
**Status**: Design & Implementation in progress.
|
||||
|
||||
*This document presents the design of resource quality of service for containers in Kubernetes, and describes use cases and implementation details.*
|
||||
|
||||
**Quality of Service is still under development. Look [here](resource-qos.html#under-development) for more details**
|
||||
|
||||
## Motivation
|
||||
|
||||
Kubernetes allocates resources to containers in a simple way. Users can specify resource limits for containers. For example, a user can specify a 1gb memory limit for a container. The scheduler uses resource limits to schedule containers (technically, the scheduler schedules pods comprised of containers). For example, the scheduler will not place 5 containers with a 1gb memory limit onto a machine with 4gb memory. Currently, Kubernetes does not have robust mechanisms to ensure that containers run reliably on an overcommitted system.
|
||||
|
||||
In the current implementation, **if users specify limits for every container, cluster utilization is poor**. Containers often don’t use all the resources that they request which leads to a lot of wasted resources. For example, we might have 4 containers, each reserving 1GB of memory in a node with 4GB memory but only using 500MB of memory. Theoretically, we could fit more containers on the node, but Kubernetes will not schedule new pods (with specified limits) on the node.
|
||||
|
||||
A possible solution is to launch containers without specified limits - containers that don't ask for any resource guarantees. But **containers with limits specified are not very well protected from containers without limits specified**. If a container without a specified memory limit goes overboard and uses lots of memory, other containers (with specified memory limits) might be killed. This is bad, because users often want a way to launch containers that have resources guarantees, and that stay up reliably.
|
||||
|
||||
This proposal provides mechanisms for oversubscribing nodes while maintaining resource guarantees, by allowing containers to specify levels of resource guarantees. Containers will be able to *request* for a minimum resource guarantee. The *request* is different from the *limit* - containers will not be allowed to exceed resource limits. With this change, users can launch *best-effort* containers with 0 request. Best-effort containers use resources only if not being used by other containers, and can be used for resource-scavenging. Supporting best-effort containers in Borg increased utilization by about 20%, and we hope to see similar improvements in Kubernetes.
|
||||
|
||||
## Requests and Limits
|
||||
|
||||
Note: this section describes the functionality that QoS should eventually provide. Due to implementation issues, providing some of these guarantees, while maintaining our broader goals of efficient cluster utilization, is difficult. Later sections will go into the nuances of how the functionality will be achieved, and limitations of the initial implementation.
|
||||
|
||||
For each resource, containers can specify a resource request and limit, 0 <= request <= limit <= Infinity. If the container is successfully scheduled, the container is guaranteed the amount of resource requested. The container will not be allowed to exceed the specified limit. How the request and limit are enforced depends on whether the resource is [compressible or incompressible](../../docs/design/resources.html).
|
||||
|
||||
### Compressible Resource Guarantees
|
||||
|
||||
- For now, we are only supporting CPU.
|
||||
- Containers are guaranteed to get the amount of CPU they request, they may or may not get additional CPU time (depending on the other jobs running).
|
||||
- Excess CPU resources will be distributed based on the amount of CPU requested. For example, suppose container A requests for 60% of the CPU, and container B requests for 30% of the CPU. Suppose that both containers are trying to use as much CPU as they can. Then the extra 10% of CPU will be distributed to A and B in a 2:1 ratio (implementation discussed in later sections).
|
||||
- Containers will be throttled if they exceed their limit. If limit is unspecified, then the containers can use excess CPU when available.
|
||||
|
||||
### Incompressible Resource Guarantees
|
||||
|
||||
- For now, we are only supporting memory.
|
||||
- Containers will get the amount of memory they request, if they exceed their memory request, they could be killed (if some other container needs memory), but if containers consume fewer resources than requested, they will not be killed (except in cases where system tasks or daemons need more memory).
|
||||
- Containers will be killed if they use more memory than their limit.
|
||||
|
||||
### Kubelet Admission Policy
|
||||
|
||||
- Pods will be admitted by Kubelet based on the sum of requests of its containers. The Kubelet will ensure that sum of requests of all containers (over all pods) is within the system’s resource (for both memory and CPU).
|
||||
|
||||
## QoS Classes
|
||||
|
||||
In an overcommitted system (where sum of requests > machine capacity) containers might eventually have to be killed, for example if the system runs out of CPU or memory resources. Ideally, we should kill containers that are less important. For each resource, we divide containers into 3 QoS classes: *Guaranteed*, *Burstable*, and *Best-Effort*, in decreasing order of priority.
|
||||
|
||||
The relationship between "Requests and Limits" and "QoS Classes" is subtle. Theoretically, the policy of classifying containers into QoS classes is orthogonal to the requests and limits specified for the container. Hypothetically, users could use an (currently unplanned) API to specify whether a container is guaranteed or best-effort. However, in this proposal, the policy of classifying containers into QoS classes is intimately tied to "Requests and Limits" - in fact, QoS classes are used to implement some of the memory guarantees described in the previous section.
|
||||
|
||||
For each resource, containers will be split into 3 different classes
|
||||
- For now, we will only focus on memory. Containers will not be killed if CPU guarantees cannot be met (for example if system tasks or daemons take up lots of CPU), they will be temporarily throttled.
|
||||
- Containers with a 0 memory request are classified as memory *Best-Effort*. These containers are not requesting resource guarantees, and will be treated as lowest priority (processes in these containers are the first to get killed if the system runs out of memory).
|
||||
- Containers with the same request and limit and non-zero request are classified as memory *Guaranteed*. These containers ask for a well-defined amount of the resource and are considered top-priority (with respect to memory usage).
|
||||
- All other containers are memory *Burstable* - middle priority containers that have some form of minimal resource guarantee, but can use more resources when available.
|
||||
- In the current policy and implementation, best-effort containers are technically a subset of Burstable containers (where the request is 0), but they are a very important special case. Memory best-effort containers don't ask for any resource guarantees so they can utilize unused resources in a cluster (resource scavenging).
|
||||
|
||||
### Alternative QoS Class Policy
|
||||
|
||||
An alternative is to have user-specified numerical priorities that guide Kubelet on which tasks to kill (if the node runs out of memory, lower priority tasks will be killed). A strict hierarchy of user-specified numerical priorities is not desirable because:
|
||||
|
||||
1. Achieved behavior would be emergent based on how users assigned priorities to their containers. No particular SLO could be delivered by the system, and usage would be subject to gaming if not restricted administratively
|
||||
2. Changes to desired priority bands would require changes to all user container configurations.
|
||||
|
||||
|
||||
## Under Development
|
||||
|
||||
This feature is still under development.
|
||||
Following are some of the primary issues.
|
||||
|
||||
* Our current design supports QoS per-resource.
|
||||
Given that unified hierarchy is in the horizon, a per-resource QoS cannot be supported.
|
||||
[#14943](https://github.com/kubernetes/kubernetes/pull/14943) has more information.
|
||||
|
||||
* Scheduler does not take usage into account.
|
||||
The scheduler can pile up BestEffort tasks on a node and cause resource pressure.
|
||||
[#14081](https://github.com/kubernetes/kubernetes/issues/14081) needs to be resolved for the scheduler to start utilizing node's usage.
|
||||
|
||||
The semantics of this feature can change in subsequent releases.
|
||||
|
||||
## Implementation Issues and Extensions
|
||||
|
||||
The above implementation provides for basic oversubscription with protection, but there are a number of issues. Below is a list of issues and TODOs for each of them. The first iteration of QoS will not solve these problems, but we aim to solve them in subsequent iterations of QoS. This list is not exhaustive. We expect to add issues to the list, and reference issues and PRs associated with items on this list.
|
||||
|
||||
Supporting other platforms:
|
||||
- **RKT**: The proposal focuses on Docker. TODO: add support for RKT.
|
||||
- **Systemd**: Systemd platforms need to be handled in a different way. Handling distributions of Linux based on systemd is critical, because major Linux distributions like Debian and Ubuntu are moving to systemd. TODO: Add code to handle systemd based operating systems.
|
||||
|
||||
Protecting containers and guarantees:
|
||||
- **Control loops**: The OOM score assignment is not perfect for burstable containers, and system OOM kills are expensive. TODO: Add a control loop to reduce memory pressure, while ensuring guarantees for various containers.
|
||||
- **Kubelet, Kube-proxy, Docker daemon protection**: If a system is overcommitted with memory guaranteed containers, then all prcoesses will have an OOM_SCORE of 0. So Docker daemon could be killed instead of a container or pod being killed. TODO: Place all user-pods into a separate cgroup, and set a limit on the memory they can consume. Initially, the limits can be based on estimated memory usage of Kubelet, Kube-proxy, and CPU limits, eventually we can monitor the resources they consume.
|
||||
- **OOM Assignment Races**: We cannot set OOM_SCORE_ADJ of a process until it has launched. This could lead to races. For example, suppose that a memory burstable container is using 70% of the system’s memory, and another burstable container is using 30% of the system’s memory. A best-effort burstable container attempts to launch on the Kubelet. Initially the best-effort container is using 2% of memory, and has an OOM_SCORE_ADJ of 20. So its OOM_SCORE is lower than the burstable pod using 70% of system memory. The burstable pod will be evicted by the best-effort pod. Short-term TODO: Implement a restart policy where best-effort pods are immediately evicted if OOM killed, but burstable pods are given a few retries. Long-term TODO: push support for OOM scores in cgroups to the upstream Linux kernel.
|
||||
- **Swap Memory**: The QoS proposal assumes that swap memory is disabled. If swap is enabled, then resource guarantees (for pods that specify resource requirements) will not hold. For example, suppose 2 guaranteed pods have reached their memory limit. They can start allocating memory on swap space. Eventually, if there isn’t enough swap space, processes in the pods might get killed. TODO: ensure that swap space is disabled on our cluster setups scripts.
|
||||
|
||||
Killing and eviction mechanics:
|
||||
- **Killing Containers**: Usually, containers cannot function properly if one of the constituent processes in the container is killed. TODO: When a process in a container is out of resource killed (e.g. OOM killed), kill the entire container.
|
||||
- **Out of Resource Eviction**: If a container in a multi-container pod fails, we might want restart the entire pod instead of just restarting the container. In some cases (e.g. if a memory best-effort container is out of resource killed), we might change pods to "failed" phase and pods might need to be evicted. TODO: Draft a policy for out of resource eviction and implement it.
|
||||
|
||||
Maintaining CPU performance:
|
||||
- **CPU-sharing Issues** Suppose that a node is running 2 container: a container A requesting for 50% of CPU (but without a CPU limit), and a container B not requesting for resoruces. Suppose that both pods try to use as much CPU as possible. After the proposal is implemented, A will get 100% of the CPU, and B will get around 0% of the CPU. However, a fairer scheme would give the Burstable container 75% of the CPU and the Best-Effort container 25% of the CPU (since resources past the Burstable container’s request are not guaranteed). TODO: think about whether this issue to be solved, implement a solution.
|
||||
- **CPU kills**: System tasks or daemons like the Kubelet could consume more CPU, and we won't be able to guarantee containers the CPU amount they requested. If the situation persists, we might want to kill the container. TODO: Draft a policy for CPU usage killing and implement it.
|
||||
- **CPU limits**: Enabling CPU limits can be problematic, because processes might be hard capped and might stall for a while. TODO: Enable CPU limits intelligently using CPU quota and core allocation.
|
||||
|
||||
Documentation:
|
||||
- **Documentation**: TODO: add user docs for resource QoS
|
||||
|
||||
|
||||
|
||||
<!-- BEGIN MUNGE: IS_VERSIONED -->
|
||||
<!-- TAG IS_VERSIONED -->
|
||||
<!-- END MUNGE: IS_VERSIONED -->
|
||||
|
||||
|
||||
<!-- BEGIN MUNGE: GENERATED_ANALYTICS -->
|
||||
[]()
|
||||
<!-- END MUNGE: GENERATED_ANALYTICS -->
|
||||
|
|
@ -0,0 +1,89 @@
|
|||
---
|
||||
layout: docwithnav
|
||||
title: "Background"
|
||||
---
|
||||
|
||||
<!-- BEGIN MUNGE: UNVERSIONED_WARNING -->
|
||||
|
||||
|
||||
<!-- END MUNGE: UNVERSIONED_WARNING -->
|
||||
|
||||
## Background
|
||||
|
||||
We have a goal to be able to scale to 1000-node clusters by end of 2015.
|
||||
As a result, we need to be able to run some kind of regression tests and deliver
|
||||
a mechanism so that developers can test their changes with respect to performance.
|
||||
|
||||
Ideally, we would like to run performance tests also on PRs - although it might
|
||||
be impossible to run them on every single PR, we may introduce a possibility for
|
||||
a reviewer to trigger them if the change has non obvious impact on the performance
|
||||
(something like "k8s-bot run scalability tests please" should be feasible).
|
||||
|
||||
However, running performance tests on 1000-node clusters (or even bigger in the
|
||||
future is) is a non-starter. Thus, we need some more sophisticated infrastructure
|
||||
to simulate big clusters on relatively small number of machines and/or cores.
|
||||
|
||||
This document describes two approaches to tackling this problem.
|
||||
Once we have a better understanding of their consequences, we may want to
|
||||
decide to drop one of them, but we are not yet in that position.
|
||||
|
||||
|
||||
## Proposal 1 - Kubmark
|
||||
|
||||
In this proposal we are focusing on scalability testing of master components.
|
||||
We do NOT focus on node-scalability - this issue should be handled separately.
|
||||
|
||||
Since we do not focus on the node performance, we don't need real Kubelet nor
|
||||
KubeProxy - in fact we don't even need to start real containers.
|
||||
All we actually need is to have some Kubelet-like and KubeProxy-like components
|
||||
that will be simulating the load on apiserver that their real equivalents are
|
||||
generating (e.g. sending NodeStatus updated, watching for pods, watching for
|
||||
endpoints (KubeProxy), etc.).
|
||||
|
||||
What needs to be done:
|
||||
|
||||
1. Determine what requests both KubeProxy and Kubelet are sending to apiserver.
|
||||
2. Create a KubeletSim that is generating the same load on apiserver that the
|
||||
real Kubelet, but is not starting any containers. In the initial version we
|
||||
can assume that pods never die, so it is enough to just react on the state
|
||||
changes read from apiserver.
|
||||
TBD: Maybe we can reuse a real Kubelet for it by just injecting some "fake"
|
||||
interfaces to it?
|
||||
3. Similarly create a KubeProxySim that is generating the same load on apiserver
|
||||
as a real KubeProxy. Again, since we are not planning to talk to those
|
||||
containers, it basically doesn't need to do anything apart from that.
|
||||
TBD: Maybe we can reuse a real KubeProxy for it by just injecting some "fake"
|
||||
interfaces to it?
|
||||
4. Refactor kube-up/kube-down scripts (or create new ones) to allow starting
|
||||
a cluster with KubeletSim and KubeProxySim instead of real ones and put
|
||||
a bunch of them on a single machine.
|
||||
5. Create a load generator for it (probably initially it would be enough to
|
||||
reuse tests that we use in gce-scalability suite).
|
||||
|
||||
|
||||
## Proposal 2 - Oversubscribing
|
||||
|
||||
The other method we are proposing is to oversubscribe the resource,
|
||||
or in essence enable a single node to look like many separate nodes even though
|
||||
they reside on a single host. This is a well established pattern in many different
|
||||
cluster managers (for more details see
|
||||
http://www.uscms.org/SoftwareComputing/Grid/WMS/glideinWMS/doc.prd/index.html ).
|
||||
There are a couple of different ways to accomplish this, but the most viable method
|
||||
is to run privileged kubelet pods under a hosts kubelet process. These pods then
|
||||
register back with the master via the introspective service using modified names
|
||||
as not to collide.
|
||||
|
||||
Complications may currently exist around container tracking and ownership in docker.
|
||||
|
||||
|
||||
|
||||
|
||||
<!-- BEGIN MUNGE: IS_VERSIONED -->
|
||||
<!-- TAG IS_VERSIONED -->
|
||||
<!-- END MUNGE: IS_VERSIONED -->
|
||||
|
||||
|
||||
<!-- BEGIN MUNGE: GENERATED_ANALYTICS -->
|
||||
[]()
|
||||
<!-- END MUNGE: GENERATED_ANALYTICS -->
|
||||
|
|
@ -0,0 +1,33 @@
|
|||
kubectl.md
|
||||
kubectl_apply.md
|
||||
kubectl_annotate.md
|
||||
kubectl_api-versions.md
|
||||
kubectl_attach.md
|
||||
kubectl_cluster-info.md
|
||||
kubectl_config.md
|
||||
kubectl_config_set-cluster.md
|
||||
kubectl_config_set-context.md
|
||||
kubectl_config_set-credentials.md
|
||||
kubectl_config_set.md
|
||||
kubectl_config_unset.md
|
||||
kubectl_config_use-context.md
|
||||
kubectl_config_view.md
|
||||
kubectl_create.md
|
||||
kubectl_delete.md
|
||||
kubectl_describe.md
|
||||
kubectl_edit.md
|
||||
kubectl_exec.md
|
||||
kubectl_expose.md
|
||||
kubectl_get.md
|
||||
kubectl_label.md
|
||||
kubectl_logs.md
|
||||
kubectl_namespace.md
|
||||
kubectl_patch.md
|
||||
kubectl_port-forward.md
|
||||
kubectl_proxy.md
|
||||
kubectl_replace.md
|
||||
kubectl_rolling-update.md
|
||||
kubectl_run.md
|
||||
kubectl_scale.md
|
||||
kubectl_stop.md
|
||||
kubectl_version.md
|
|
@ -0,0 +1,97 @@
|
|||
---
|
||||
layout: docwithnav
|
||||
title: "kubectl"
|
||||
---
|
||||
<!-- BEGIN MUNGE: UNVERSIONED_WARNING -->
|
||||
|
||||
<!-- END MUNGE: UNVERSIONED_WARNING -->
|
||||
|
||||
## kubectl
|
||||
|
||||
kubectl controls the Kubernetes cluster manager
|
||||
|
||||
### Synopsis
|
||||
|
||||
|
||||
kubectl controls the Kubernetes cluster manager.
|
||||
|
||||
Find more information at https://github.com/kubernetes/kubernetes.
|
||||
|
||||
```
|
||||
{% raw %}
|
||||
kubectl
|
||||
{% endraw %}
|
||||
```
|
||||
|
||||
### Options
|
||||
|
||||
```
|
||||
{% raw %}
|
||||
--alsologtostderr[=false]: log to standard error as well as files
|
||||
--api-version="": The API version to use when talking to the server
|
||||
--certificate-authority="": Path to a cert. file for the certificate authority.
|
||||
--client-certificate="": Path to a client key file for TLS.
|
||||
--client-key="": Path to a client key file for TLS.
|
||||
--cluster="": The name of the kubeconfig cluster to use
|
||||
--context="": The name of the kubeconfig context to use
|
||||
--insecure-skip-tls-verify[=false]: If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure.
|
||||
--kubeconfig="": Path to the kubeconfig file to use for CLI requests.
|
||||
--log-backtrace-at=:0: when logging hits line file:N, emit a stack trace
|
||||
--log-dir="": If non-empty, write log files in this directory
|
||||
--log-flush-frequency=5s: Maximum number of seconds between log flushes
|
||||
--logtostderr[=true]: log to standard error instead of files
|
||||
--match-server-version[=false]: Require server version to match client version
|
||||
--namespace="": If present, the namespace scope for this CLI request.
|
||||
--password="": Password for basic authentication to the API server.
|
||||
-s, --server="": The address and port of the Kubernetes API server
|
||||
--stderrthreshold=2: logs at or above this threshold go to stderr
|
||||
--token="": Bearer token for authentication to the API server.
|
||||
--user="": The name of the kubeconfig user to use
|
||||
--username="": Username for basic authentication to the API server.
|
||||
--v=0: log level for V logs
|
||||
--vmodule=: comma-separated list of pattern=N settings for file-filtered logging
|
||||
{% endraw %}
|
||||
```
|
||||
|
||||
### SEE ALSO
|
||||
|
||||
* [kubectl annotate](kubectl_annotate.html) - Update the annotations on a resource
|
||||
* [kubectl api-versions](kubectl_api-versions.html) - Print the supported API versions on the server, in the form of "group/version".
|
||||
* [kubectl apply](kubectl_apply.html) - Apply a configuration to a resource by filename or stdin
|
||||
* [kubectl attach](kubectl_attach.html) - Attach to a running container.
|
||||
* [kubectl autoscale](kubectl_autoscale.html) - Auto-scale a replication controller
|
||||
* [kubectl cluster-info](kubectl_cluster-info.html) - Display cluster info
|
||||
* [kubectl config](kubectl_config.html) - config modifies kubeconfig files
|
||||
* [kubectl create](kubectl_create.html) - Create a resource by filename or stdin
|
||||
* [kubectl delete](kubectl_delete.html) - Delete resources by filenames, stdin, resources and names, or by resources and label selector.
|
||||
* [kubectl describe](kubectl_describe.html) - Show details of a specific resource or group of resources
|
||||
* [kubectl edit](kubectl_edit.html) - Edit a resource on the server
|
||||
* [kubectl exec](kubectl_exec.html) - Execute a command in a container.
|
||||
* [kubectl expose](kubectl_expose.html) - Take a replication controller, service or pod and expose it as a new Kubernetes Service
|
||||
* [kubectl get](kubectl_get.html) - Display one or many resources
|
||||
* [kubectl label](kubectl_label.html) - Update the labels on a resource
|
||||
* [kubectl logs](kubectl_logs.html) - Print the logs for a container in a pod.
|
||||
* [kubectl namespace](kubectl_namespace.html) - SUPERSEDED: Set and view the current Kubernetes namespace
|
||||
* [kubectl patch](kubectl_patch.html) - Update field(s) of a resource by stdin.
|
||||
* [kubectl port-forward](kubectl_port-forward.html) - Forward one or more local ports to a pod.
|
||||
* [kubectl proxy](kubectl_proxy.html) - Run a proxy to the Kubernetes API server
|
||||
* [kubectl replace](kubectl_replace.html) - Replace a resource by filename or stdin.
|
||||
* [kubectl rolling-update](kubectl_rolling-update.html) - Perform a rolling update of the given ReplicationController.
|
||||
* [kubectl run](kubectl_run.html) - Run a particular image on the cluster.
|
||||
* [kubectl scale](kubectl_scale.html) - Set a new size for a Replication Controller.
|
||||
* [kubectl stop](kubectl_stop.html) - Deprecated: Gracefully shut down a resource by name or filename.
|
||||
* [kubectl version](kubectl_version.html) - Print the client and server version information.
|
||||
|
||||
###### Auto generated by spf13/cobra at 2015-10-27 18:17:28.82451834 +0000 UTC
|
||||
|
||||
|
||||
|
||||
<!-- BEGIN MUNGE: IS_VERSIONED -->
|
||||
<!-- TAG IS_VERSIONED -->
|
||||
<!-- END MUNGE: IS_VERSIONED -->
|
||||
|
||||
|
||||
<!-- BEGIN MUNGE: GENERATED_ANALYTICS -->
|
||||
[]()
|
||||
<!-- END MUNGE: GENERATED_ANALYTICS -->
|
||||
|
|
@ -0,0 +1,118 @@
|
|||
---
|
||||
layout: docwithnav
|
||||
title: "kubectl annotate"
|
||||
---
|
||||
<!-- BEGIN MUNGE: UNVERSIONED_WARNING -->
|
||||
|
||||
|
||||
<!-- END MUNGE: UNVERSIONED_WARNING -->
|
||||
|
||||
## kubectl annotate
|
||||
|
||||
Update the annotations on a resource
|
||||
|
||||
### Synopsis
|
||||
|
||||
|
||||
Update the annotations on one or more resources.
|
||||
|
||||
An annotation is a key/value pair that can hold larger (compared to a label), and possibly not human-readable, data.
|
||||
It is intended to store non-identifying auxiliary data, especially data manipulated by tools and system extensions.
|
||||
If --overwrite is true, then existing annotations can be overwritten, otherwise attempting to overwrite an annotation will result in an error.
|
||||
If --resource-version is specified, then updates will use this resource version, otherwise the existing resource-version will be used.
|
||||
|
||||
Possible resources include (case insensitive): pods (po), services (svc),
|
||||
replicationcontrollers (rc), nodes (no), events (ev), componentstatuses (cs),
|
||||
limitranges (limits), persistentvolumes (pv), persistentvolumeclaims (pvc),
|
||||
horizontalpodautoscalers (hpa), resourcequotas (quota) or secrets.
|
||||
|
||||
```
|
||||
{% raw %}
|
||||
kubectl annotate [--overwrite] (-f FILENAME | TYPE NAME) KEY_1=VAL_1 ... KEY_N=VAL_N [--resource-version=version]
|
||||
{% endraw %}
|
||||
```
|
||||
|
||||
### Examples
|
||||
|
||||
```
|
||||
{% raw %}
|
||||
# Update pod 'foo' with the annotation 'description' and the value 'my frontend'.
|
||||
# If the same annotation is set multiple times, only the last value will be applied
|
||||
$ kubectl annotate pods foo description='my frontend'
|
||||
|
||||
# Update a pod identified by type and name in "pod.json"
|
||||
$ kubectl annotate -f pod.json description='my frontend'
|
||||
|
||||
# Update pod 'foo' with the annotation 'description' and the value 'my frontend running nginx', overwriting any existing value.
|
||||
$ kubectl annotate --overwrite pods foo description='my frontend running nginx'
|
||||
|
||||
# Update all pods in the namespace
|
||||
$ kubectl annotate pods --all description='my frontend running nginx'
|
||||
|
||||
# Update pod 'foo' only if the resource is unchanged from version 1.
|
||||
$ kubectl annotate pods foo description='my frontend running nginx' --resource-version=1
|
||||
|
||||
# Update pod 'foo' by removing an annotation named 'description' if it exists.
|
||||
# Does not require the --overwrite flag.
|
||||
$ kubectl annotate pods foo description-
|
||||
{% endraw %}
|
||||
```
|
||||
|
||||
### Options
|
||||
|
||||
```
|
||||
{% raw %}
|
||||
--all[=false]: select all resources in the namespace of the specified resource types
|
||||
-f, --filename=[]: Filename, directory, or URL to a file identifying the resource to update the annotation
|
||||
--overwrite[=false]: If true, allow annotations to be overwritten, otherwise reject annotation updates that overwrite existing annotations.
|
||||
--resource-version="": If non-empty, the annotation update will only succeed if this is the current resource-version for the object. Only valid when specifying a single resource.
|
||||
{% endraw %}
|
||||
```
|
||||
|
||||
### Options inherited from parent commands
|
||||
|
||||
```
|
||||
{% raw %}
|
||||
--alsologtostderr[=false]: log to standard error as well as files
|
||||
--api-version="": The API version to use when talking to the server
|
||||
--certificate-authority="": Path to a cert. file for the certificate authority.
|
||||
--client-certificate="": Path to a client key file for TLS.
|
||||
--client-key="": Path to a client key file for TLS.
|
||||
--cluster="": The name of the kubeconfig cluster to use
|
||||
--context="": The name of the kubeconfig context to use
|
||||
--insecure-skip-tls-verify[=false]: If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure.
|
||||
--kubeconfig="": Path to the kubeconfig file to use for CLI requests.
|
||||
--log-backtrace-at=:0: when logging hits line file:N, emit a stack trace
|
||||
--log-dir="": If non-empty, write log files in this directory
|
||||
--log-flush-frequency=5s: Maximum number of seconds between log flushes
|
||||
--logtostderr[=true]: log to standard error instead of files
|
||||
--match-server-version[=false]: Require server version to match client version
|
||||
--namespace="": If present, the namespace scope for this CLI request.
|
||||
--password="": Password for basic authentication to the API server.
|
||||
-s, --server="": The address and port of the Kubernetes API server
|
||||
--stderrthreshold=2: logs at or above this threshold go to stderr
|
||||
--token="": Bearer token for authentication to the API server.
|
||||
--user="": The name of the kubeconfig user to use
|
||||
--username="": Username for basic authentication to the API server.
|
||||
--v=0: log level for V logs
|
||||
--vmodule=: comma-separated list of pattern=N settings for file-filtered logging
|
||||
{% endraw %}
|
||||
```
|
||||
|
||||
### SEE ALSO
|
||||
|
||||
* [kubectl](kubectl.html) - kubectl controls the Kubernetes cluster manager
|
||||
|
||||
###### Auto generated by spf13/cobra at 2015-11-27 14:20:04.945134857 +0000 UTC
|
||||
|
||||
|
||||
|
||||
<!-- BEGIN MUNGE: IS_VERSIONED -->
|
||||
<!-- TAG IS_VERSIONED -->
|
||||
<!-- END MUNGE: IS_VERSIONED -->
|
||||
|
||||
|
||||
<!-- BEGIN MUNGE: GENERATED_ANALYTICS -->
|
||||
[]()
|
||||
<!-- END MUNGE: GENERATED_ANALYTICS -->
|
||||
|
|
@ -0,0 +1,71 @@
|
|||
---
|
||||
layout: docwithnav
|
||||
title: "kubectl api-versions"
|
||||
---
|
||||
<!-- BEGIN MUNGE: UNVERSIONED_WARNING -->
|
||||
|
||||
|
||||
<!-- END MUNGE: UNVERSIONED_WARNING -->
|
||||
|
||||
## kubectl api-versions
|
||||
|
||||
Print the supported API versions on the server, in the form of "group/version".
|
||||
|
||||
### Synopsis
|
||||
|
||||
|
||||
Print the supported API versions on the server, in the form of "group/version".
|
||||
|
||||
```
|
||||
{% raw %}
|
||||
kubectl api-versions
|
||||
{% endraw %}
|
||||
```
|
||||
|
||||
### Options inherited from parent commands
|
||||
|
||||
```
|
||||
{% raw %}
|
||||
--alsologtostderr[=false]: log to standard error as well as files
|
||||
--api-version="": The API version to use when talking to the server
|
||||
--certificate-authority="": Path to a cert. file for the certificate authority.
|
||||
--client-certificate="": Path to a client key file for TLS.
|
||||
--client-key="": Path to a client key file for TLS.
|
||||
--cluster="": The name of the kubeconfig cluster to use
|
||||
--context="": The name of the kubeconfig context to use
|
||||
--insecure-skip-tls-verify[=false]: If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure.
|
||||
--kubeconfig="": Path to the kubeconfig file to use for CLI requests.
|
||||
--log-backtrace-at=:0: when logging hits line file:N, emit a stack trace
|
||||
--log-dir="": If non-empty, write log files in this directory
|
||||
--log-flush-frequency=5s: Maximum number of seconds between log flushes
|
||||
--logtostderr[=true]: log to standard error instead of files
|
||||
--match-server-version[=false]: Require server version to match client version
|
||||
--namespace="": If present, the namespace scope for this CLI request.
|
||||
--password="": Password for basic authentication to the API server.
|
||||
-s, --server="": The address and port of the Kubernetes API server
|
||||
--stderrthreshold=2: logs at or above this threshold go to stderr
|
||||
--token="": Bearer token for authentication to the API server.
|
||||
--user="": The name of the kubeconfig user to use
|
||||
--username="": Username for basic authentication to the API server.
|
||||
--v=0: log level for V logs
|
||||
--vmodule=: comma-separated list of pattern=N settings for file-filtered logging
|
||||
{% endraw %}
|
||||
```
|
||||
|
||||
### SEE ALSO
|
||||
|
||||
* [kubectl](kubectl.html) - kubectl controls the Kubernetes cluster manager
|
||||
|
||||
###### Auto generated by spf13/cobra at 2015-10-27 18:17:28.824241345 +0000 UTC
|
||||
|
||||
|
||||
|
||||
<!-- BEGIN MUNGE: IS_VERSIONED -->
|
||||
<!-- TAG IS_VERSIONED -->
|
||||
<!-- END MUNGE: IS_VERSIONED -->
|
||||
|
||||
|
||||
<!-- BEGIN MUNGE: GENERATED_ANALYTICS -->
|
||||
[]()
|
||||
<!-- END MUNGE: GENERATED_ANALYTICS -->
|
||||
|
|
@ -0,0 +1,96 @@
|
|||
---
|
||||
layout: docwithnav
|
||||
title: "kubectl apply"
|
||||
---
|
||||
<!-- BEGIN MUNGE: UNVERSIONED_WARNING -->
|
||||
|
||||
|
||||
<!-- END MUNGE: UNVERSIONED_WARNING -->
|
||||
|
||||
## kubectl apply
|
||||
|
||||
Apply a configuration to a resource by filename or stdin
|
||||
|
||||
### Synopsis
|
||||
|
||||
|
||||
Apply a configuration to a resource by filename or stdin.
|
||||
|
||||
JSON and YAML formats are accepted.
|
||||
|
||||
```
|
||||
{% raw %}
|
||||
kubectl apply -f FILENAME
|
||||
{% endraw %}
|
||||
```
|
||||
|
||||
### Examples
|
||||
|
||||
```
|
||||
{% raw %}
|
||||
# Apply the configuration in pod.json to a pod.
|
||||
$ kubectl apply -f ./pod.json
|
||||
|
||||
# Apply the JSON passed into stdin to a pod.
|
||||
$ cat pod.json | kubectl apply -f -
|
||||
{% endraw %}
|
||||
```
|
||||
|
||||
### Options
|
||||
|
||||
```
|
||||
{% raw %}
|
||||
-f, --filename=[]: Filename, directory, or URL to file that contains the configuration to apply
|
||||
-o, --output="": Output mode. Use "-o name" for shorter output (resource/name).
|
||||
--schema-cache-dir="/tmp/kubectl.schema": If non-empty, load/store cached API schemas in this directory, default is '/tmp/kubectl.schema'
|
||||
--validate[=true]: If true, use a schema to validate the input before sending it
|
||||
{% endraw %}
|
||||
```
|
||||
|
||||
### Options inherited from parent commands
|
||||
|
||||
```
|
||||
{% raw %}
|
||||
--alsologtostderr[=false]: log to standard error as well as files
|
||||
--api-version="": The API version to use when talking to the server
|
||||
--certificate-authority="": Path to a cert. file for the certificate authority.
|
||||
--client-certificate="": Path to a client key file for TLS.
|
||||
--client-key="": Path to a client key file for TLS.
|
||||
--cluster="": The name of the kubeconfig cluster to use
|
||||
--context="": The name of the kubeconfig context to use
|
||||
--insecure-skip-tls-verify[=false]: If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure.
|
||||
--kubeconfig="": Path to the kubeconfig file to use for CLI requests.
|
||||
--log-backtrace-at=:0: when logging hits line file:N, emit a stack trace
|
||||
--log-dir="": If non-empty, write log files in this directory
|
||||
--log-flush-frequency=5s: Maximum number of seconds between log flushes
|
||||
--logtostderr[=true]: log to standard error instead of files
|
||||
--match-server-version[=false]: Require server version to match client version
|
||||
--namespace="": If present, the namespace scope for this CLI request.
|
||||
--password="": Password for basic authentication to the API server.
|
||||
-s, --server="": The address and port of the Kubernetes API server
|
||||
--stderrthreshold=2: logs at or above this threshold go to stderr
|
||||
--token="": Bearer token for authentication to the API server.
|
||||
--user="": The name of the kubeconfig user to use
|
||||
--username="": Username for basic authentication to the API server.
|
||||
--v=0: log level for V logs
|
||||
--vmodule=: comma-separated list of pattern=N settings for file-filtered logging
|
||||
{% endraw %}
|
||||
```
|
||||
|
||||
### SEE ALSO
|
||||
|
||||
* [kubectl](kubectl.html) - kubectl controls the Kubernetes cluster manager
|
||||
|
||||
###### Auto generated by spf13/cobra on 10-Oct-2015
|
||||
|
||||
|
||||
|
||||
<!-- BEGIN MUNGE: IS_VERSIONED -->
|
||||
<!-- TAG IS_VERSIONED -->
|
||||
<!-- END MUNGE: IS_VERSIONED -->
|
||||
|
||||
|
||||
<!-- BEGIN MUNGE: GENERATED_ANALYTICS -->
|
||||
[]()
|
||||
<!-- END MUNGE: GENERATED_ANALYTICS -->
|
||||
|
|
@ -0,0 +1,97 @@
|
|||
---
|
||||
layout: docwithnav
|
||||
title: "kubectl attach"
|
||||
---
|
||||
<!-- BEGIN MUNGE: UNVERSIONED_WARNING -->
|
||||
|
||||
|
||||
<!-- END MUNGE: UNVERSIONED_WARNING -->
|
||||
|
||||
## kubectl attach
|
||||
|
||||
Attach to a running container.
|
||||
|
||||
### Synopsis
|
||||
|
||||
|
||||
Attach to a a process that is already running inside an existing container.
|
||||
|
||||
```
|
||||
{% raw %}
|
||||
kubectl attach POD -c CONTAINER
|
||||
{% endraw %}
|
||||
```
|
||||
|
||||
### Examples
|
||||
|
||||
```
|
||||
{% raw %}
|
||||
# Get output from running pod 123456-7890, using the first container by default
|
||||
$ kubectl attach 123456-7890
|
||||
|
||||
# Get output from ruby-container from pod 123456-7890
|
||||
$ kubectl attach 123456-7890 -c ruby-container date
|
||||
|
||||
# Switch to raw terminal mode, sends stdin to 'bash' in ruby-container from pod 123456-780
|
||||
# and sends stdout/stderr from 'bash' back to the client
|
||||
$ kubectl attach 123456-7890 -c ruby-container -i -t
|
||||
{% endraw %}
|
||||
```
|
||||
|
||||
### Options
|
||||
|
||||
```
|
||||
{% raw %}
|
||||
-c, --container="": Container name
|
||||
-i, --stdin[=false]: Pass stdin to the container
|
||||
-t, --tty[=false]: Stdin is a TTY
|
||||
{% endraw %}
|
||||
```
|
||||
|
||||
### Options inherited from parent commands
|
||||
|
||||
```
|
||||
{% raw %}
|
||||
--alsologtostderr[=false]: log to standard error as well as files
|
||||
--api-version="": The API version to use when talking to the server
|
||||
--certificate-authority="": Path to a cert. file for the certificate authority.
|
||||
--client-certificate="": Path to a client key file for TLS.
|
||||
--client-key="": Path to a client key file for TLS.
|
||||
--cluster="": The name of the kubeconfig cluster to use
|
||||
--context="": The name of the kubeconfig context to use
|
||||
--insecure-skip-tls-verify[=false]: If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure.
|
||||
--kubeconfig="": Path to the kubeconfig file to use for CLI requests.
|
||||
--log-backtrace-at=:0: when logging hits line file:N, emit a stack trace
|
||||
--log-dir="": If non-empty, write log files in this directory
|
||||
--log-flush-frequency=5s: Maximum number of seconds between log flushes
|
||||
--logtostderr[=true]: log to standard error instead of files
|
||||
--match-server-version[=false]: Require server version to match client version
|
||||
--namespace="": If present, the namespace scope for this CLI request.
|
||||
--password="": Password for basic authentication to the API server.
|
||||
-s, --server="": The address and port of the Kubernetes API server
|
||||
--stderrthreshold=2: logs at or above this threshold go to stderr
|
||||
--token="": Bearer token for authentication to the API server.
|
||||
--user="": The name of the kubeconfig user to use
|
||||
--username="": Username for basic authentication to the API server.
|
||||
--v=0: log level for V logs
|
||||
--vmodule=: comma-separated list of pattern=N settings for file-filtered logging
|
||||
{% endraw %}
|
||||
```
|
||||
|
||||
### SEE ALSO
|
||||
|
||||
* [kubectl](kubectl.html) - kubectl controls the Kubernetes cluster manager
|
||||
|
||||
###### Auto generated by spf13/cobra at 2015-09-10 18:53:03.155651469 +0000 UTC
|
||||
|
||||
|
||||
|
||||
<!-- BEGIN MUNGE: IS_VERSIONED -->
|
||||
<!-- TAG IS_VERSIONED -->
|
||||
<!-- END MUNGE: IS_VERSIONED -->
|
||||
|
||||
|
||||
<!-- BEGIN MUNGE: GENERATED_ANALYTICS -->
|
||||
[]()
|
||||
<!-- END MUNGE: GENERATED_ANALYTICS -->
|
||||
|
|
@ -0,0 +1,106 @@
|
|||
---
|
||||
layout: docwithnav
|
||||
title: "kubectl autoscale"
|
||||
---
|
||||
<!-- BEGIN MUNGE: UNVERSIONED_WARNING -->
|
||||
|
||||
|
||||
<!-- END MUNGE: UNVERSIONED_WARNING -->
|
||||
|
||||
## kubectl autoscale
|
||||
|
||||
Auto-scale a replication controller
|
||||
|
||||
### Synopsis
|
||||
|
||||
|
||||
Creates an autoscaler that automatically chooses and sets the number of pods that run in a kubernetes cluster.
|
||||
|
||||
Looks up a replication controller by name and creates an autoscaler that uses this replication controller as a reference.
|
||||
An autoscaler can automatically increase or decrease number of pods deployed within the system as needed.
|
||||
|
||||
```
|
||||
{% raw %}
|
||||
kubectl autoscale (-f FILENAME | TYPE NAME | TYPE/NAME) [--min=MINPODS] --max=MAXPODS [--cpu-percent=CPU] [flags]
|
||||
{% endraw %}
|
||||
```
|
||||
|
||||
### Examples
|
||||
|
||||
```
|
||||
{% raw %}
|
||||
# Auto scale a replication controller "foo", with the number of pods between 2 to 10, target CPU utilization at a default value that server applies:
|
||||
$ kubectl autoscale rc foo --min=2 --max=10
|
||||
|
||||
# Auto scale a replication controller "foo", with the number of pods between 1 to 5, target CPU utilization at 80%:
|
||||
$ kubectl autoscale rc foo --max=5 --cpu-percent=80
|
||||
{% endraw %}
|
||||
```
|
||||
|
||||
### Options
|
||||
|
||||
```
|
||||
{% raw %}
|
||||
--cpu-percent=-1: The target average CPU utilization (represented as a percent of requested CPU) over all the pods. If it's not specified or negative, the server will apply a default value.
|
||||
--dry-run[=false]: If true, only print the object that would be sent, without creating it.
|
||||
-f, --filename=[]: Filename, directory, or URL to a file identifying the resource to get from a server.
|
||||
--generator="horizontalpodautoscaler/v1beta1": The name of the API generator to use. Currently there is only 1 generator.
|
||||
--max=-1: The upper limit for the number of pods that can be set by the autoscaler. Required.
|
||||
--min=-1: The lower limit for the number of pods that can be set by the autoscaler. If it's not specified or negative, the server will apply a default value.
|
||||
--name="": The name for the newly created object. If not specified, the name of the input resource will be used.
|
||||
--no-headers[=false]: When using the default output, don't print headers.
|
||||
-o, --output="": Output format. One of: json|yaml|wide|name|go-template=...|go-template-file=...|jsonpath=...|jsonpath-file=... See golang template [http://golang.org/pkg/text/template/#pkg-overview] and jsonpath template [http://releases.k8s.io/release-1.1/docs/user-guide/jsonpath.md].
|
||||
--output-version="": Output the formatted object with the given version (default api-version).
|
||||
-a, --show-all[=false]: When printing, show all resources (default hide terminated pods.)
|
||||
--sort-by="": If non-empty, sort list types using this field specification. The field specification is expressed as a JSONPath expression (e.g. 'ObjectMeta.Name'). The field in the API resource specified by this JSONPath expression must be an integer or a string.
|
||||
--template="": Template string or path to template file to use when -o=go-template, -o=go-template-file. The template format is golang templates [http://golang.org/pkg/text/template/#pkg-overview].
|
||||
{% endraw %}
|
||||
```
|
||||
|
||||
### Options inherited from parent commands
|
||||
|
||||
```
|
||||
{% raw %}
|
||||
--alsologtostderr[=false]: log to standard error as well as files
|
||||
--api-version="": The API version to use when talking to the server
|
||||
--certificate-authority="": Path to a cert. file for the certificate authority.
|
||||
--client-certificate="": Path to a client key file for TLS.
|
||||
--client-key="": Path to a client key file for TLS.
|
||||
--cluster="": The name of the kubeconfig cluster to use
|
||||
--context="": The name of the kubeconfig context to use
|
||||
--insecure-skip-tls-verify[=false]: If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure.
|
||||
--kubeconfig="": Path to the kubeconfig file to use for CLI requests.
|
||||
--log-backtrace-at=:0: when logging hits line file:N, emit a stack trace
|
||||
--log-dir="": If non-empty, write log files in this directory
|
||||
--log-flush-frequency=5s: Maximum number of seconds between log flushes
|
||||
--logtostderr[=true]: log to standard error instead of files
|
||||
--match-server-version[=false]: Require server version to match client version
|
||||
--namespace="": If present, the namespace scope for this CLI request.
|
||||
--password="": Password for basic authentication to the API server.
|
||||
-s, --server="": The address and port of the Kubernetes API server
|
||||
--stderrthreshold=2: logs at or above this threshold go to stderr
|
||||
--token="": Bearer token for authentication to the API server.
|
||||
--user="": The name of the kubeconfig user to use
|
||||
--username="": Username for basic authentication to the API server.
|
||||
--v=0: log level for V logs
|
||||
--vmodule=: comma-separated list of pattern=N settings for file-filtered logging
|
||||
{% endraw %}
|
||||
```
|
||||
|
||||
### SEE ALSO
|
||||
|
||||
* [kubectl](kubectl.html) - kubectl controls the Kubernetes cluster manager
|
||||
|
||||
###### Auto generated by spf13/cobra on 16-Oct-2015
|
||||
|
||||
|
||||
|
||||
<!-- BEGIN MUNGE: IS_VERSIONED -->
|
||||
<!-- TAG IS_VERSIONED -->
|
||||
<!-- END MUNGE: IS_VERSIONED -->
|
||||
|
||||
|
||||
<!-- BEGIN MUNGE: GENERATED_ANALYTICS -->
|
||||
[]()
|
||||
<!-- END MUNGE: GENERATED_ANALYTICS -->
|
||||
|
|
@ -0,0 +1,71 @@
|
|||
---
|
||||
layout: docwithnav
|
||||
title: "kubectl cluster-info"
|
||||
---
|
||||
<!-- BEGIN MUNGE: UNVERSIONED_WARNING -->
|
||||
|
||||
|
||||
<!-- END MUNGE: UNVERSIONED_WARNING -->
|
||||
|
||||
## kubectl cluster-info
|
||||
|
||||
Display cluster info
|
||||
|
||||
### Synopsis
|
||||
|
||||
|
||||
Display addresses of the master and services with label kubernetes.io/cluster-service=true
|
||||
|
||||
```
|
||||
{% raw %}
|
||||
kubectl cluster-info
|
||||
{% endraw %}
|
||||
```
|
||||
|
||||
### Options inherited from parent commands
|
||||
|
||||
```
|
||||
{% raw %}
|
||||
--alsologtostderr[=false]: log to standard error as well as files
|
||||
--api-version="": The API version to use when talking to the server
|
||||
--certificate-authority="": Path to a cert. file for the certificate authority.
|
||||
--client-certificate="": Path to a client key file for TLS.
|
||||
--client-key="": Path to a client key file for TLS.
|
||||
--cluster="": The name of the kubeconfig cluster to use
|
||||
--context="": The name of the kubeconfig context to use
|
||||
--insecure-skip-tls-verify[=false]: If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure.
|
||||
--kubeconfig="": Path to the kubeconfig file to use for CLI requests.
|
||||
--log-backtrace-at=:0: when logging hits line file:N, emit a stack trace
|
||||
--log-dir="": If non-empty, write log files in this directory
|
||||
--log-flush-frequency=5s: Maximum number of seconds between log flushes
|
||||
--logtostderr[=true]: log to standard error instead of files
|
||||
--match-server-version[=false]: Require server version to match client version
|
||||
--namespace="": If present, the namespace scope for this CLI request.
|
||||
--password="": Password for basic authentication to the API server.
|
||||
-s, --server="": The address and port of the Kubernetes API server
|
||||
--stderrthreshold=2: logs at or above this threshold go to stderr
|
||||
--token="": Bearer token for authentication to the API server.
|
||||
--user="": The name of the kubeconfig user to use
|
||||
--username="": Username for basic authentication to the API server.
|
||||
--v=0: log level for V logs
|
||||
--vmodule=: comma-separated list of pattern=N settings for file-filtered logging
|
||||
{% endraw %}
|
||||
```
|
||||
|
||||
### SEE ALSO
|
||||
|
||||
* [kubectl](kubectl.html) - kubectl controls the Kubernetes cluster manager
|
||||
|
||||
###### Auto generated by spf13/cobra at 2015-09-10 18:53:03.163962347 +0000 UTC
|
||||
|
||||
|
||||
|
||||
<!-- BEGIN MUNGE: IS_VERSIONED -->
|
||||
<!-- TAG IS_VERSIONED -->
|
||||
<!-- END MUNGE: IS_VERSIONED -->
|
||||
|
||||
|
||||
<!-- BEGIN MUNGE: GENERATED_ANALYTICS -->
|
||||
[]()
|
||||
<!-- END MUNGE: GENERATED_ANALYTICS -->
|
||||
|
|
@ -0,0 +1,91 @@
|
|||
---
|
||||
layout: docwithnav
|
||||
title: "kubectl config"
|
||||
---
|
||||
<!-- BEGIN MUNGE: UNVERSIONED_WARNING -->
|
||||
|
||||
|
||||
<!-- END MUNGE: UNVERSIONED_WARNING -->
|
||||
|
||||
## kubectl config
|
||||
|
||||
config modifies kubeconfig files
|
||||
|
||||
### Synopsis
|
||||
|
||||
|
||||
config modifies kubeconfig files using subcommands like "kubectl config set current-context my-context"
|
||||
|
||||
The loading order follows these rules:
|
||||
1. If the --kubeconfig flag is set, then only that file is loaded. The flag may only be set once and no merging takes place.
|
||||
2. If $KUBECONFIG environment variable is set, then it is used a list of paths (normal path delimitting rules for your system). These paths are merged together. When a value is modified, it is modified in the file that defines the stanza. When a value is created, it is created in the first file that exists. If no files in the chain exist, then it creates the last file in the list.
|
||||
3. Otherwise, ${HOME}/.kube/config is used and no merging takes place.
|
||||
|
||||
|
||||
```
|
||||
{% raw %}
|
||||
kubectl config SUBCOMMAND
|
||||
{% endraw %}
|
||||
```
|
||||
|
||||
### Options
|
||||
|
||||
```
|
||||
{% raw %}
|
||||
--kubeconfig="": use a particular kubeconfig file
|
||||
{% endraw %}
|
||||
```
|
||||
|
||||
### Options inherited from parent commands
|
||||
|
||||
```
|
||||
{% raw %}
|
||||
--alsologtostderr[=false]: log to standard error as well as files
|
||||
--api-version="": The API version to use when talking to the server
|
||||
--certificate-authority="": Path to a cert. file for the certificate authority.
|
||||
--client-certificate="": Path to a client key file for TLS.
|
||||
--client-key="": Path to a client key file for TLS.
|
||||
--cluster="": The name of the kubeconfig cluster to use
|
||||
--context="": The name of the kubeconfig context to use
|
||||
--insecure-skip-tls-verify[=false]: If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure.
|
||||
--log-backtrace-at=:0: when logging hits line file:N, emit a stack trace
|
||||
--log-dir="": If non-empty, write log files in this directory
|
||||
--log-flush-frequency=5s: Maximum number of seconds between log flushes
|
||||
--logtostderr[=true]: log to standard error instead of files
|
||||
--match-server-version[=false]: Require server version to match client version
|
||||
--namespace="": If present, the namespace scope for this CLI request.
|
||||
--password="": Password for basic authentication to the API server.
|
||||
-s, --server="": The address and port of the Kubernetes API server
|
||||
--stderrthreshold=2: logs at or above this threshold go to stderr
|
||||
--token="": Bearer token for authentication to the API server.
|
||||
--user="": The name of the kubeconfig user to use
|
||||
--username="": Username for basic authentication to the API server.
|
||||
--v=0: log level for V logs
|
||||
--vmodule=: comma-separated list of pattern=N settings for file-filtered logging
|
||||
{% endraw %}
|
||||
```
|
||||
|
||||
### SEE ALSO
|
||||
|
||||
* [kubectl](kubectl.html) - kubectl controls the Kubernetes cluster manager
|
||||
* [kubectl config set](kubectl_config_set.html) - Sets an individual value in a kubeconfig file
|
||||
* [kubectl config set-cluster](kubectl_config_set-cluster.html) - Sets a cluster entry in kubeconfig
|
||||
* [kubectl config set-context](kubectl_config_set-context.html) - Sets a context entry in kubeconfig
|
||||
* [kubectl config set-credentials](kubectl_config_set-credentials.html) - Sets a user entry in kubeconfig
|
||||
* [kubectl config unset](kubectl_config_unset.html) - Unsets an individual value in a kubeconfig file
|
||||
* [kubectl config use-context](kubectl_config_use-context.html) - Sets the current-context in a kubeconfig file
|
||||
* [kubectl config view](kubectl_config_view.html) - Displays merged kubeconfig settings or a specified kubeconfig file.
|
||||
|
||||
###### Auto generated by spf13/cobra at 2015-09-23 08:09:58.253683538 +0000 UTC
|
||||
|
||||
|
||||
|
||||
<!-- BEGIN MUNGE: IS_VERSIONED -->
|
||||
<!-- TAG IS_VERSIONED -->
|
||||
<!-- END MUNGE: IS_VERSIONED -->
|
||||
|
||||
|
||||
<!-- BEGIN MUNGE: GENERATED_ANALYTICS -->
|
||||
[]()
|
||||
<!-- END MUNGE: GENERATED_ANALYTICS -->
|
||||
|
|
@ -0,0 +1,95 @@
|
|||
---
|
||||
layout: docwithnav
|
||||
title: "kubectl config set-cluster"
|
||||
---
|
||||
<!-- BEGIN MUNGE: UNVERSIONED_WARNING -->
|
||||
|
||||
|
||||
<!-- END MUNGE: UNVERSIONED_WARNING -->
|
||||
|
||||
## kubectl config set-cluster
|
||||
|
||||
Sets a cluster entry in kubeconfig
|
||||
|
||||
### Synopsis
|
||||
|
||||
|
||||
Sets a cluster entry in kubeconfig.
|
||||
Specifying a name that already exists will merge new fields on top of existing values for those fields.
|
||||
|
||||
```
|
||||
{% raw %}
|
||||
kubectl config set-cluster NAME [--server=server] [--certificate-authority=path/to/certficate/authority] [--api-version=apiversion] [--insecure-skip-tls-verify=true]
|
||||
{% endraw %}
|
||||
```
|
||||
|
||||
### Examples
|
||||
|
||||
```
|
||||
{% raw %}
|
||||
# Set only the server field on the e2e cluster entry without touching other values.
|
||||
$ kubectl config set-cluster e2e --server=https://1.2.3.4
|
||||
|
||||
# Embed certificate authority data for the e2e cluster entry
|
||||
$ kubectl config set-cluster e2e --certificate-authority=~/.kube/e2e/kubernetes.ca.crt
|
||||
|
||||
# Disable cert checking for the dev cluster entry
|
||||
$ kubectl config set-cluster e2e --insecure-skip-tls-verify=true
|
||||
{% endraw %}
|
||||
```
|
||||
|
||||
### Options
|
||||
|
||||
```
|
||||
{% raw %}
|
||||
--api-version="": api-version for the cluster entry in kubeconfig
|
||||
--certificate-authority="": path to certificate-authority for the cluster entry in kubeconfig
|
||||
--embed-certs=false: embed-certs for the cluster entry in kubeconfig
|
||||
--insecure-skip-tls-verify=false: insecure-skip-tls-verify for the cluster entry in kubeconfig
|
||||
--server="": server for the cluster entry in kubeconfig
|
||||
{% endraw %}
|
||||
```
|
||||
|
||||
### Options inherited from parent commands
|
||||
|
||||
```
|
||||
{% raw %}
|
||||
--alsologtostderr[=false]: log to standard error as well as files
|
||||
--client-certificate="": Path to a client key file for TLS.
|
||||
--client-key="": Path to a client key file for TLS.
|
||||
--cluster="": The name of the kubeconfig cluster to use
|
||||
--context="": The name of the kubeconfig context to use
|
||||
--kubeconfig="": use a particular kubeconfig file
|
||||
--log-backtrace-at=:0: when logging hits line file:N, emit a stack trace
|
||||
--log-dir="": If non-empty, write log files in this directory
|
||||
--log-flush-frequency=5s: Maximum number of seconds between log flushes
|
||||
--logtostderr[=true]: log to standard error instead of files
|
||||
--match-server-version[=false]: Require server version to match client version
|
||||
--namespace="": If present, the namespace scope for this CLI request.
|
||||
--password="": Password for basic authentication to the API server.
|
||||
--stderrthreshold=2: logs at or above this threshold go to stderr
|
||||
--token="": Bearer token for authentication to the API server.
|
||||
--user="": The name of the kubeconfig user to use
|
||||
--username="": Username for basic authentication to the API server.
|
||||
--v=0: log level for V logs
|
||||
--vmodule=: comma-separated list of pattern=N settings for file-filtered logging
|
||||
{% endraw %}
|
||||
```
|
||||
|
||||
### SEE ALSO
|
||||
|
||||
* [kubectl config](kubectl_config.html) - config modifies kubeconfig files
|
||||
|
||||
###### Auto generated by spf13/cobra at 2015-09-10 18:53:03.161700827 +0000 UTC
|
||||
|
||||
|
||||
|
||||
<!-- BEGIN MUNGE: IS_VERSIONED -->
|
||||
<!-- TAG IS_VERSIONED -->
|
||||
<!-- END MUNGE: IS_VERSIONED -->
|
||||
|
||||
|
||||
<!-- BEGIN MUNGE: GENERATED_ANALYTICS -->
|
||||
[]()
|
||||
<!-- END MUNGE: GENERATED_ANALYTICS -->
|
||||
|
|
@ -0,0 +1,88 @@
|
|||
---
|
||||
layout: docwithnav
|
||||
title: "kubectl config set-context"
|
||||
---
|
||||
<!-- BEGIN MUNGE: UNVERSIONED_WARNING -->
|
||||
|
||||
|
||||
<!-- END MUNGE: UNVERSIONED_WARNING -->
|
||||
|
||||
## kubectl config set-context
|
||||
|
||||
Sets a context entry in kubeconfig
|
||||
|
||||
### Synopsis
|
||||
|
||||
|
||||
Sets a context entry in kubeconfig
|
||||
Specifying a name that already exists will merge new fields on top of existing values for those fields.
|
||||
|
||||
```
|
||||
{% raw %}
|
||||
kubectl config set-context NAME [--cluster=cluster_nickname] [--user=user_nickname] [--namespace=namespace]
|
||||
{% endraw %}
|
||||
```
|
||||
|
||||
### Examples
|
||||
|
||||
```
|
||||
{% raw %}
|
||||
# Set the user field on the gce context entry without touching other values
|
||||
$ kubectl config set-context gce --user=cluster-admin
|
||||
{% endraw %}
|
||||
```
|
||||
|
||||
### Options
|
||||
|
||||
```
|
||||
{% raw %}
|
||||
--cluster="": cluster for the context entry in kubeconfig
|
||||
--namespace="": namespace for the context entry in kubeconfig
|
||||
--user="": user for the context entry in kubeconfig
|
||||
{% endraw %}
|
||||
```
|
||||
|
||||
### Options inherited from parent commands
|
||||
|
||||
```
|
||||
{% raw %}
|
||||
--alsologtostderr[=false]: log to standard error as well as files
|
||||
--api-version="": The API version to use when talking to the server
|
||||
--certificate-authority="": Path to a cert. file for the certificate authority.
|
||||
--client-certificate="": Path to a client key file for TLS.
|
||||
--client-key="": Path to a client key file for TLS.
|
||||
--context="": The name of the kubeconfig context to use
|
||||
--insecure-skip-tls-verify[=false]: If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure.
|
||||
--kubeconfig="": use a particular kubeconfig file
|
||||
--log-backtrace-at=:0: when logging hits line file:N, emit a stack trace
|
||||
--log-dir="": If non-empty, write log files in this directory
|
||||
--log-flush-frequency=5s: Maximum number of seconds between log flushes
|
||||
--logtostderr[=true]: log to standard error instead of files
|
||||
--match-server-version[=false]: Require server version to match client version
|
||||
--password="": Password for basic authentication to the API server.
|
||||
-s, --server="": The address and port of the Kubernetes API server
|
||||
--stderrthreshold=2: logs at or above this threshold go to stderr
|
||||
--token="": Bearer token for authentication to the API server.
|
||||
--username="": Username for basic authentication to the API server.
|
||||
--v=0: log level for V logs
|
||||
--vmodule=: comma-separated list of pattern=N settings for file-filtered logging
|
||||
{% endraw %}
|
||||
```
|
||||
|
||||
### SEE ALSO
|
||||
|
||||
* [kubectl config](kubectl_config.html) - config modifies kubeconfig files
|
||||
|
||||
###### Auto generated by spf13/cobra at 2015-09-10 18:53:03.162402642 +0000 UTC
|
||||
|
||||
|
||||
|
||||
<!-- BEGIN MUNGE: IS_VERSIONED -->
|
||||
<!-- TAG IS_VERSIONED -->
|
||||
<!-- END MUNGE: IS_VERSIONED -->
|
||||
|
||||
|
||||
<!-- BEGIN MUNGE: GENERATED_ANALYTICS -->
|
||||
[]()
|
||||
<!-- END MUNGE: GENERATED_ANALYTICS -->
|
||||
|
|
@ -0,0 +1,108 @@
|
|||
---
|
||||
layout: docwithnav
|
||||
title: "kubectl config set-credentials"
|
||||
---
|
||||
<!-- BEGIN MUNGE: UNVERSIONED_WARNING -->
|
||||
|
||||
|
||||
<!-- END MUNGE: UNVERSIONED_WARNING -->
|
||||
|
||||
## kubectl config set-credentials
|
||||
|
||||
Sets a user entry in kubeconfig
|
||||
|
||||
### Synopsis
|
||||
|
||||
|
||||
Sets a user entry in kubeconfig
|
||||
Specifying a name that already exists will merge new fields on top of existing values.
|
||||
|
||||
Client-certificate flags:
|
||||
--client-certificate=certfile --client-key=keyfile
|
||||
|
||||
Bearer token flags:
|
||||
--token=bearer_token
|
||||
|
||||
Basic auth flags:
|
||||
--username=basic_user --password=basic_password
|
||||
|
||||
Bearer token and basic auth are mutually exclusive.
|
||||
|
||||
|
||||
```
|
||||
{% raw %}
|
||||
kubectl config set-credentials NAME [--client-certificate=path/to/certfile] [--client-key=path/to/keyfile] [--token=bearer_token] [--username=basic_user] [--password=basic_password]
|
||||
{% endraw %}
|
||||
```
|
||||
|
||||
### Examples
|
||||
|
||||
```
|
||||
{% raw %}
|
||||
# Set only the "client-key" field on the "cluster-admin"
|
||||
# entry, without touching other values:
|
||||
$ kubectl config set-credentials cluster-admin --client-key=~/.kube/admin.key
|
||||
|
||||
# Set basic auth for the "cluster-admin" entry
|
||||
$ kubectl config set-credentials cluster-admin --username=admin --password=uXFGweU9l35qcif
|
||||
|
||||
# Embed client certificate data in the "cluster-admin" entry
|
||||
$ kubectl config set-credentials cluster-admin --client-certificate=~/.kube/admin.crt --embed-certs=true
|
||||
{% endraw %}
|
||||
```
|
||||
|
||||
### Options
|
||||
|
||||
```
|
||||
{% raw %}
|
||||
--client-certificate="": path to client-certificate for the user entry in kubeconfig
|
||||
--client-key="": path to client-key for the user entry in kubeconfig
|
||||
--embed-certs=false: embed client cert/key for the user entry in kubeconfig
|
||||
--password="": password for the user entry in kubeconfig
|
||||
--token="": token for the user entry in kubeconfig
|
||||
--username="": username for the user entry in kubeconfig
|
||||
{% endraw %}
|
||||
```
|
||||
|
||||
### Options inherited from parent commands
|
||||
|
||||
```
|
||||
{% raw %}
|
||||
--alsologtostderr[=false]: log to standard error as well as files
|
||||
--api-version="": The API version to use when talking to the server
|
||||
--certificate-authority="": Path to a cert. file for the certificate authority.
|
||||
--cluster="": The name of the kubeconfig cluster to use
|
||||
--context="": The name of the kubeconfig context to use
|
||||
--insecure-skip-tls-verify[=false]: If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure.
|
||||
--kubeconfig="": use a particular kubeconfig file
|
||||
--log-backtrace-at=:0: when logging hits line file:N, emit a stack trace
|
||||
--log-dir="": If non-empty, write log files in this directory
|
||||
--log-flush-frequency=5s: Maximum number of seconds between log flushes
|
||||
--logtostderr[=true]: log to standard error instead of files
|
||||
--match-server-version[=false]: Require server version to match client version
|
||||
--namespace="": If present, the namespace scope for this CLI request.
|
||||
-s, --server="": The address and port of the Kubernetes API server
|
||||
--stderrthreshold=2: logs at or above this threshold go to stderr
|
||||
--user="": The name of the kubeconfig user to use
|
||||
--v=0: log level for V logs
|
||||
--vmodule=: comma-separated list of pattern=N settings for file-filtered logging
|
||||
{% endraw %}
|
||||
```
|
||||
|
||||
### SEE ALSO
|
||||
|
||||
* [kubectl config](kubectl_config.html) - config modifies kubeconfig files
|
||||
|
||||
###### Auto generated by spf13/cobra at 2015-09-10 18:53:03.162045132 +0000 UTC
|
||||
|
||||
|
||||
|
||||
<!-- BEGIN MUNGE: IS_VERSIONED -->
|
||||
<!-- TAG IS_VERSIONED -->
|
||||
<!-- END MUNGE: IS_VERSIONED -->
|
||||
|
||||
|
||||
<!-- BEGIN MUNGE: GENERATED_ANALYTICS -->
|
||||
[]()
|
||||
<!-- END MUNGE: GENERATED_ANALYTICS -->
|
||||
|
|
@ -0,0 +1,73 @@
|
|||
---
|
||||
layout: docwithnav
|
||||
title: "kubectl config set"
|
||||
---
|
||||
<!-- BEGIN MUNGE: UNVERSIONED_WARNING -->
|
||||
|
||||
|
||||
<!-- END MUNGE: UNVERSIONED_WARNING -->
|
||||
|
||||
## kubectl config set
|
||||
|
||||
Sets an individual value in a kubeconfig file
|
||||
|
||||
### Synopsis
|
||||
|
||||
|
||||
Sets an individual value in a kubeconfig file
|
||||
PROPERTY_NAME is a dot delimited name where each token represents either a attribute name or a map key. Map keys may not contain dots.
|
||||
PROPERTY_VALUE is the new value you wish to set.
|
||||
|
||||
```
|
||||
{% raw %}
|
||||
kubectl config set PROPERTY_NAME PROPERTY_VALUE
|
||||
{% endraw %}
|
||||
```
|
||||
|
||||
### Options inherited from parent commands
|
||||
|
||||
```
|
||||
{% raw %}
|
||||
--alsologtostderr[=false]: log to standard error as well as files
|
||||
--api-version="": The API version to use when talking to the server
|
||||
--certificate-authority="": Path to a cert. file for the certificate authority.
|
||||
--client-certificate="": Path to a client key file for TLS.
|
||||
--client-key="": Path to a client key file for TLS.
|
||||
--cluster="": The name of the kubeconfig cluster to use
|
||||
--context="": The name of the kubeconfig context to use
|
||||
--insecure-skip-tls-verify[=false]: If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure.
|
||||
--kubeconfig="": use a particular kubeconfig file
|
||||
--log-backtrace-at=:0: when logging hits line file:N, emit a stack trace
|
||||
--log-dir="": If non-empty, write log files in this directory
|
||||
--log-flush-frequency=5s: Maximum number of seconds between log flushes
|
||||
--logtostderr[=true]: log to standard error instead of files
|
||||
--match-server-version[=false]: Require server version to match client version
|
||||
--namespace="": If present, the namespace scope for this CLI request.
|
||||
--password="": Password for basic authentication to the API server.
|
||||
-s, --server="": The address and port of the Kubernetes API server
|
||||
--stderrthreshold=2: logs at or above this threshold go to stderr
|
||||
--token="": Bearer token for authentication to the API server.
|
||||
--user="": The name of the kubeconfig user to use
|
||||
--username="": Username for basic authentication to the API server.
|
||||
--v=0: log level for V logs
|
||||
--vmodule=: comma-separated list of pattern=N settings for file-filtered logging
|
||||
{% endraw %}
|
||||
```
|
||||
|
||||
### SEE ALSO
|
||||
|
||||
* [kubectl config](kubectl_config.html) - config modifies kubeconfig files
|
||||
|
||||
###### Auto generated by spf13/cobra at 2015-09-10 18:53:03.162716308 +0000 UTC
|
||||
|
||||
|
||||
|
||||
<!-- BEGIN MUNGE: IS_VERSIONED -->
|
||||
<!-- TAG IS_VERSIONED -->
|
||||
<!-- END MUNGE: IS_VERSIONED -->
|
||||
|
||||
|
||||
<!-- BEGIN MUNGE: GENERATED_ANALYTICS -->
|
||||
[]()
|
||||
<!-- END MUNGE: GENERATED_ANALYTICS -->
|
||||
|
|
@ -0,0 +1,72 @@
|
|||
---
|
||||
layout: docwithnav
|
||||
title: "kubectl config unset"
|
||||
---
|
||||
<!-- BEGIN MUNGE: UNVERSIONED_WARNING -->
|
||||
|
||||
|
||||
<!-- END MUNGE: UNVERSIONED_WARNING -->
|
||||
|
||||
## kubectl config unset
|
||||
|
||||
Unsets an individual value in a kubeconfig file
|
||||
|
||||
### Synopsis
|
||||
|
||||
|
||||
Unsets an individual value in a kubeconfig file
|
||||
PROPERTY_NAME is a dot delimited name where each token represents either a attribute name or a map key. Map keys may not contain dots.
|
||||
|
||||
```
|
||||
{% raw %}
|
||||
kubectl config unset PROPERTY_NAME
|
||||
{% endraw %}
|
||||
```
|
||||
|
||||
### Options inherited from parent commands
|
||||
|
||||
```
|
||||
{% raw %}
|
||||
--alsologtostderr[=false]: log to standard error as well as files
|
||||
--api-version="": The API version to use when talking to the server
|
||||
--certificate-authority="": Path to a cert. file for the certificate authority.
|
||||
--client-certificate="": Path to a client key file for TLS.
|
||||
--client-key="": Path to a client key file for TLS.
|
||||
--cluster="": The name of the kubeconfig cluster to use
|
||||
--context="": The name of the kubeconfig context to use
|
||||
--insecure-skip-tls-verify[=false]: If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure.
|
||||
--kubeconfig="": use a particular kubeconfig file
|
||||
--log-backtrace-at=:0: when logging hits line file:N, emit a stack trace
|
||||
--log-dir="": If non-empty, write log files in this directory
|
||||
--log-flush-frequency=5s: Maximum number of seconds between log flushes
|
||||
--logtostderr[=true]: log to standard error instead of files
|
||||
--match-server-version[=false]: Require server version to match client version
|
||||
--namespace="": If present, the namespace scope for this CLI request.
|
||||
--password="": Password for basic authentication to the API server.
|
||||
-s, --server="": The address and port of the Kubernetes API server
|
||||
--stderrthreshold=2: logs at or above this threshold go to stderr
|
||||
--token="": Bearer token for authentication to the API server.
|
||||
--user="": The name of the kubeconfig user to use
|
||||
--username="": Username for basic authentication to the API server.
|
||||
--v=0: log level for V logs
|
||||
--vmodule=: comma-separated list of pattern=N settings for file-filtered logging
|
||||
{% endraw %}
|
||||
```
|
||||
|
||||
### SEE ALSO
|
||||
|
||||
* [kubectl config](kubectl_config.html) - config modifies kubeconfig files
|
||||
|
||||
###### Auto generated by spf13/cobra at 2015-09-10 18:53:03.163015642 +0000 UTC
|
||||
|
||||
|
||||
|
||||
<!-- BEGIN MUNGE: IS_VERSIONED -->
|
||||
<!-- TAG IS_VERSIONED -->
|
||||
<!-- END MUNGE: IS_VERSIONED -->
|
||||
|
||||
|
||||
<!-- BEGIN MUNGE: GENERATED_ANALYTICS -->
|
||||
[]()
|
||||
<!-- END MUNGE: GENERATED_ANALYTICS -->
|
||||
|
|
@ -0,0 +1,71 @@
|
|||
---
|
||||
layout: docwithnav
|
||||
title: "kubectl config use-context"
|
||||
---
|
||||
<!-- BEGIN MUNGE: UNVERSIONED_WARNING -->
|
||||
|
||||
|
||||
<!-- END MUNGE: UNVERSIONED_WARNING -->
|
||||
|
||||
## kubectl config use-context
|
||||
|
||||
Sets the current-context in a kubeconfig file
|
||||
|
||||
### Synopsis
|
||||
|
||||
|
||||
Sets the current-context in a kubeconfig file
|
||||
|
||||
```
|
||||
{% raw %}
|
||||
kubectl config use-context CONTEXT_NAME
|
||||
{% endraw %}
|
||||
```
|
||||
|
||||
### Options inherited from parent commands
|
||||
|
||||
```
|
||||
{% raw %}
|
||||
--alsologtostderr[=false]: log to standard error as well as files
|
||||
--api-version="": The API version to use when talking to the server
|
||||
--certificate-authority="": Path to a cert. file for the certificate authority.
|
||||
--client-certificate="": Path to a client key file for TLS.
|
||||
--client-key="": Path to a client key file for TLS.
|
||||
--cluster="": The name of the kubeconfig cluster to use
|
||||
--context="": The name of the kubeconfig context to use
|
||||
--insecure-skip-tls-verify[=false]: If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure.
|
||||
--kubeconfig="": use a particular kubeconfig file
|
||||
--log-backtrace-at=:0: when logging hits line file:N, emit a stack trace
|
||||
--log-dir="": If non-empty, write log files in this directory
|
||||
--log-flush-frequency=5s: Maximum number of seconds between log flushes
|
||||
--logtostderr[=true]: log to standard error instead of files
|
||||
--match-server-version[=false]: Require server version to match client version
|
||||
--namespace="": If present, the namespace scope for this CLI request.
|
||||
--password="": Password for basic authentication to the API server.
|
||||
-s, --server="": The address and port of the Kubernetes API server
|
||||
--stderrthreshold=2: logs at or above this threshold go to stderr
|
||||
--token="": Bearer token for authentication to the API server.
|
||||
--user="": The name of the kubeconfig user to use
|
||||
--username="": Username for basic authentication to the API server.
|
||||
--v=0: log level for V logs
|
||||
--vmodule=: comma-separated list of pattern=N settings for file-filtered logging
|
||||
{% endraw %}
|
||||
```
|
||||
|
||||
### SEE ALSO
|
||||
|
||||
* [kubectl config](kubectl_config.html) - config modifies kubeconfig files
|
||||
|
||||
###### Auto generated by spf13/cobra at 2015-09-10 18:53:03.163336177 +0000 UTC
|
||||
|
||||
|
||||
|
||||
<!-- BEGIN MUNGE: IS_VERSIONED -->
|
||||
<!-- TAG IS_VERSIONED -->
|
||||
<!-- END MUNGE: IS_VERSIONED -->
|
||||
|
||||
|
||||
<!-- BEGIN MUNGE: GENERATED_ANALYTICS -->
|
||||
[]()
|
||||
<!-- END MUNGE: GENERATED_ANALYTICS -->
|
||||
|
|
@ -0,0 +1,102 @@
|
|||
---
|
||||
layout: docwithnav
|
||||
title: "kubectl config view"
|
||||
---
|
||||
<!-- BEGIN MUNGE: UNVERSIONED_WARNING -->
|
||||
|
||||
|
||||
<!-- END MUNGE: UNVERSIONED_WARNING -->
|
||||
|
||||
## kubectl config view
|
||||
|
||||
Displays merged kubeconfig settings or a specified kubeconfig file.
|
||||
|
||||
### Synopsis
|
||||
|
||||
|
||||
Displays merged kubeconfig settings or a specified kubeconfig file.
|
||||
|
||||
You can use --output=template --template=TEMPLATE to extract specific values.
|
||||
|
||||
```
|
||||
{% raw %}
|
||||
kubectl config view
|
||||
{% endraw %}
|
||||
```
|
||||
|
||||
### Examples
|
||||
|
||||
```
|
||||
{% raw %}
|
||||
# Show Merged kubeconfig settings.
|
||||
$ kubectl config view
|
||||
|
||||
# Get the password for the e2e user
|
||||
$ kubectl config view -o template --template='{{range .users}}{{ if eq .name "e2e" }}{{ index .user.password }}{{end}}{{end}}'
|
||||
{% endraw %}
|
||||
```
|
||||
|
||||
### Options
|
||||
|
||||
```
|
||||
{% raw %}
|
||||
--flatten[=false]: flatten the resulting kubeconfig file into self contained output (useful for creating portable kubeconfig files)
|
||||
--merge=true: merge together the full hierarchy of kubeconfig files
|
||||
--minify[=false]: remove all information not used by current-context from the output
|
||||
--no-headers[=false]: When using the default output, don't print headers.
|
||||
-o, --output="": Output format. One of: json|yaml|wide|name|go-template=...|go-template-file=...|jsonpath=...|jsonpath-file=... See golang template [http://golang.org/pkg/text/template/#pkg-overview] and jsonpath template [http://releases.k8s.io/release-1.1/docs/user-guide/jsonpath.md].
|
||||
--output-version="": Output the formatted object with the given version (default api-version).
|
||||
--raw[=false]: display raw byte data
|
||||
-a, --show-all[=false]: When printing, show all resources (default hide terminated pods.)
|
||||
--sort-by="": If non-empty, sort list types using this field specification. The field specification is expressed as a JSONPath expression (e.g. 'ObjectMeta.Name'). The field in the API resource specified by this JSONPath expression must be an integer or a string.
|
||||
--template="": Template string or path to template file to use when -o=go-template, -o=go-template-file. The template format is golang templates [http://golang.org/pkg/text/template/#pkg-overview].
|
||||
{% endraw %}
|
||||
```
|
||||
|
||||
### Options inherited from parent commands
|
||||
|
||||
```
|
||||
{% raw %}
|
||||
--alsologtostderr[=false]: log to standard error as well as files
|
||||
--api-version="": The API version to use when talking to the server
|
||||
--certificate-authority="": Path to a cert. file for the certificate authority.
|
||||
--client-certificate="": Path to a client key file for TLS.
|
||||
--client-key="": Path to a client key file for TLS.
|
||||
--cluster="": The name of the kubeconfig cluster to use
|
||||
--context="": The name of the kubeconfig context to use
|
||||
--insecure-skip-tls-verify[=false]: If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure.
|
||||
--kubeconfig="": use a particular kubeconfig file
|
||||
--log-backtrace-at=:0: when logging hits line file:N, emit a stack trace
|
||||
--log-dir="": If non-empty, write log files in this directory
|
||||
--log-flush-frequency=5s: Maximum number of seconds between log flushes
|
||||
--logtostderr[=true]: log to standard error instead of files
|
||||
--match-server-version[=false]: Require server version to match client version
|
||||
--namespace="": If present, the namespace scope for this CLI request.
|
||||
--password="": Password for basic authentication to the API server.
|
||||
-s, --server="": The address and port of the Kubernetes API server
|
||||
--stderrthreshold=2: logs at or above this threshold go to stderr
|
||||
--token="": Bearer token for authentication to the API server.
|
||||
--user="": The name of the kubeconfig user to use
|
||||
--username="": Username for basic authentication to the API server.
|
||||
--v=0: log level for V logs
|
||||
--vmodule=: comma-separated list of pattern=N settings for file-filtered logging
|
||||
{% endraw %}
|
||||
```
|
||||
|
||||
### SEE ALSO
|
||||
|
||||
* [kubectl config](kubectl_config.html) - config modifies kubeconfig files
|
||||
|
||||
###### Auto generated by spf13/cobra at 2015-09-25 23:39:47.896358807 +0000 UTC
|
||||
|
||||
|
||||
|
||||
<!-- BEGIN MUNGE: IS_VERSIONED -->
|
||||
<!-- TAG IS_VERSIONED -->
|
||||
<!-- END MUNGE: IS_VERSIONED -->
|
||||
|
||||
|
||||
<!-- BEGIN MUNGE: GENERATED_ANALYTICS -->
|
||||
[]()
|
||||
<!-- END MUNGE: GENERATED_ANALYTICS -->
|
||||
|
|
@ -0,0 +1,96 @@
|
|||
---
|
||||
layout: docwithnav
|
||||
title: "kubectl create"
|
||||
---
|
||||
<!-- BEGIN MUNGE: UNVERSIONED_WARNING -->
|
||||
|
||||
|
||||
<!-- END MUNGE: UNVERSIONED_WARNING -->
|
||||
|
||||
## kubectl create
|
||||
|
||||
Create a resource by filename or stdin
|
||||
|
||||
### Synopsis
|
||||
|
||||
|
||||
Create a resource by filename or stdin.
|
||||
|
||||
JSON and YAML formats are accepted.
|
||||
|
||||
```
|
||||
{% raw %}
|
||||
kubectl create -f FILENAME
|
||||
{% endraw %}
|
||||
```
|
||||
|
||||
### Examples
|
||||
|
||||
```
|
||||
{% raw %}
|
||||
# Create a pod using the data in pod.json.
|
||||
$ kubectl create -f ./pod.json
|
||||
|
||||
# Create a pod based on the JSON passed into stdin.
|
||||
$ cat pod.json | kubectl create -f -
|
||||
{% endraw %}
|
||||
```
|
||||
|
||||
### Options
|
||||
|
||||
```
|
||||
{% raw %}
|
||||
-f, --filename=[]: Filename, directory, or URL to file to use to create the resource
|
||||
-o, --output="": Output mode. Use "-o name" for shorter output (resource/name).
|
||||
--schema-cache-dir="/tmp/kubectl.schema": If non-empty, load/store cached API schemas in this directory, default is '/tmp/kubectl.schema'
|
||||
--validate[=true]: If true, use a schema to validate the input before sending it
|
||||
{% endraw %}
|
||||
```
|
||||
|
||||
### Options inherited from parent commands
|
||||
|
||||
```
|
||||
{% raw %}
|
||||
--alsologtostderr[=false]: log to standard error as well as files
|
||||
--api-version="": The API version to use when talking to the server
|
||||
--certificate-authority="": Path to a cert. file for the certificate authority.
|
||||
--client-certificate="": Path to a client key file for TLS.
|
||||
--client-key="": Path to a client key file for TLS.
|
||||
--cluster="": The name of the kubeconfig cluster to use
|
||||
--context="": The name of the kubeconfig context to use
|
||||
--insecure-skip-tls-verify[=false]: If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure.
|
||||
--kubeconfig="": Path to the kubeconfig file to use for CLI requests.
|
||||
--log-backtrace-at=:0: when logging hits line file:N, emit a stack trace
|
||||
--log-dir="": If non-empty, write log files in this directory
|
||||
--log-flush-frequency=5s: Maximum number of seconds between log flushes
|
||||
--logtostderr[=true]: log to standard error instead of files
|
||||
--match-server-version[=false]: Require server version to match client version
|
||||
--namespace="": If present, the namespace scope for this CLI request.
|
||||
--password="": Password for basic authentication to the API server.
|
||||
-s, --server="": The address and port of the Kubernetes API server
|
||||
--stderrthreshold=2: logs at or above this threshold go to stderr
|
||||
--token="": Bearer token for authentication to the API server.
|
||||
--user="": The name of the kubeconfig user to use
|
||||
--username="": Username for basic authentication to the API server.
|
||||
--v=0: log level for V logs
|
||||
--vmodule=: comma-separated list of pattern=N settings for file-filtered logging
|
||||
{% endraw %}
|
||||
```
|
||||
|
||||
### SEE ALSO
|
||||
|
||||
* [kubectl](kubectl.html) - kubectl controls the Kubernetes cluster manager
|
||||
|
||||
###### Auto generated by spf13/cobra at 2015-09-11 20:48:33.289761103 +0000 UTC
|
||||
|
||||
|
||||
|
||||
<!-- BEGIN MUNGE: IS_VERSIONED -->
|
||||
<!-- TAG IS_VERSIONED -->
|
||||
<!-- END MUNGE: IS_VERSIONED -->
|
||||
|
||||
|
||||
<!-- BEGIN MUNGE: GENERATED_ANALYTICS -->
|
||||
[]()
|
||||
<!-- END MUNGE: GENERATED_ANALYTICS -->
|
||||
|
|
@ -0,0 +1,118 @@
|
|||
---
|
||||
layout: docwithnav
|
||||
title: "kubectl delete"
|
||||
---
|
||||
<!-- BEGIN MUNGE: UNVERSIONED_WARNING -->
|
||||
|
||||
|
||||
<!-- END MUNGE: UNVERSIONED_WARNING -->
|
||||
|
||||
## kubectl delete
|
||||
|
||||
Delete resources by filenames, stdin, resources and names, or by resources and label selector.
|
||||
|
||||
### Synopsis
|
||||
|
||||
|
||||
Delete resources by filenames, stdin, resources and names, or by resources and label selector.
|
||||
|
||||
JSON and YAML formats are accepted.
|
||||
|
||||
Only one type of the arguments may be specified: filenames, resources and names, or resources and label selector
|
||||
|
||||
Note that the delete command does NOT do resource version checks, so if someone
|
||||
submits an update to a resource right when you submit a delete, their update
|
||||
will be lost along with the rest of the resource.
|
||||
|
||||
```
|
||||
{% raw %}
|
||||
kubectl delete ([-f FILENAME] | TYPE [(NAME | -l label | --all)])
|
||||
{% endraw %}
|
||||
```
|
||||
|
||||
### Examples
|
||||
|
||||
```
|
||||
{% raw %}
|
||||
# Delete a pod using the type and name specified in pod.json.
|
||||
$ kubectl delete -f ./pod.json
|
||||
|
||||
# Delete a pod based on the type and name in the JSON passed into stdin.
|
||||
$ cat pod.json | kubectl delete -f -
|
||||
|
||||
# Delete pods and services with same names "baz" and "foo"
|
||||
$ kubectl delete pod,service baz foo
|
||||
|
||||
# Delete pods and services with label name=myLabel.
|
||||
$ kubectl delete pods,services -l name=myLabel
|
||||
|
||||
# Delete a pod with UID 1234-56-7890-234234-456456.
|
||||
$ kubectl delete pod 1234-56-7890-234234-456456
|
||||
|
||||
# Delete all pods
|
||||
$ kubectl delete pods --all
|
||||
{% endraw %}
|
||||
```
|
||||
|
||||
### Options
|
||||
|
||||
```
|
||||
{% raw %}
|
||||
--all[=false]: [-all] to select all the specified resources.
|
||||
--cascade[=true]: If true, cascade the deletion of the resources managed by this resource (e.g. Pods created by a ReplicationController). Default true.
|
||||
-f, --filename=[]: Filename, directory, or URL to a file containing the resource to delete.
|
||||
--grace-period=-1: Period of time in seconds given to the resource to terminate gracefully. Ignored if negative.
|
||||
--ignore-not-found[=false]: Treat "resource not found" as a successful delete. Defaults to "true" when --all is specified.
|
||||
-o, --output="": Output mode. Use "-o name" for shorter output (resource/name).
|
||||
-l, --selector="": Selector (label query) to filter on.
|
||||
--timeout=0: The length of time to wait before giving up on a delete, zero means determine a timeout from the size of the object
|
||||
{% endraw %}
|
||||
```
|
||||
|
||||
### Options inherited from parent commands
|
||||
|
||||
```
|
||||
{% raw %}
|
||||
--alsologtostderr[=false]: log to standard error as well as files
|
||||
--api-version="": The API version to use when talking to the server
|
||||
--certificate-authority="": Path to a cert. file for the certificate authority.
|
||||
--client-certificate="": Path to a client key file for TLS.
|
||||
--client-key="": Path to a client key file for TLS.
|
||||
--cluster="": The name of the kubeconfig cluster to use
|
||||
--context="": The name of the kubeconfig context to use
|
||||
--insecure-skip-tls-verify[=false]: If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure.
|
||||
--kubeconfig="": Path to the kubeconfig file to use for CLI requests.
|
||||
--log-backtrace-at=:0: when logging hits line file:N, emit a stack trace
|
||||
--log-dir="": If non-empty, write log files in this directory
|
||||
--log-flush-frequency=5s: Maximum number of seconds between log flushes
|
||||
--logtostderr[=true]: log to standard error instead of files
|
||||
--match-server-version[=false]: Require server version to match client version
|
||||
--namespace="": If present, the namespace scope for this CLI request.
|
||||
--password="": Password for basic authentication to the API server.
|
||||
-s, --server="": The address and port of the Kubernetes API server
|
||||
--stderrthreshold=2: logs at or above this threshold go to stderr
|
||||
--token="": Bearer token for authentication to the API server.
|
||||
--user="": The name of the kubeconfig user to use
|
||||
--username="": Username for basic authentication to the API server.
|
||||
--v=0: log level for V logs
|
||||
--vmodule=: comma-separated list of pattern=N settings for file-filtered logging
|
||||
{% endraw %}
|
||||
```
|
||||
|
||||
### SEE ALSO
|
||||
|
||||
* [kubectl](kubectl.html) - kubectl controls the Kubernetes cluster manager
|
||||
|
||||
###### Auto generated by spf13/cobra at 2015-09-10 18:53:03.153952299 +0000 UTC
|
||||
|
||||
|
||||
|
||||
<!-- BEGIN MUNGE: IS_VERSIONED -->
|
||||
<!-- TAG IS_VERSIONED -->
|
||||
<!-- END MUNGE: IS_VERSIONED -->
|
||||
|
||||
|
||||
<!-- BEGIN MUNGE: GENERATED_ANALYTICS -->
|
||||
[]()
|
||||
<!-- END MUNGE: GENERATED_ANALYTICS -->
|
||||
|
|
@ -0,0 +1,118 @@
|
|||
---
|
||||
layout: docwithnav
|
||||
title: "kubectl describe"
|
||||
---
|
||||
<!-- BEGIN MUNGE: UNVERSIONED_WARNING -->
|
||||
|
||||
|
||||
<!-- END MUNGE: UNVERSIONED_WARNING -->
|
||||
|
||||
## kubectl describe
|
||||
|
||||
Show details of a specific resource or group of resources
|
||||
|
||||
### Synopsis
|
||||
|
||||
|
||||
Show details of a specific resource or group of resources.
|
||||
|
||||
This command joins many API calls together to form a detailed description of a
|
||||
given resource or group of resources.
|
||||
|
||||
$ kubectl describe TYPE NAME_PREFIX
|
||||
|
||||
will first check for an exact match on TYPE and NAME_PREFIX. If no such resource
|
||||
exists, it will output details for every resource that has a name prefixed with NAME_PREFIX
|
||||
|
||||
Possible resource types include (case insensitive): pods (po), services (svc),
|
||||
replicationcontrollers (rc), nodes (no), events (ev), limitranges (limits),
|
||||
persistentvolumes (pv), persistentvolumeclaims (pvc), resourcequotas (quota),
|
||||
namespaces (ns), serviceaccounts, horizontalpodautoscalers (hpa), or secrets.
|
||||
|
||||
```
|
||||
{% raw %}
|
||||
kubectl describe (-f FILENAME | TYPE [NAME_PREFIX | -l label] | TYPE/NAME)
|
||||
{% endraw %}
|
||||
```
|
||||
|
||||
### Examples
|
||||
|
||||
```
|
||||
{% raw %}
|
||||
# Describe a node
|
||||
$ kubectl describe nodes kubernetes-minion-emt8.c.myproject.internal
|
||||
|
||||
# Describe a pod
|
||||
$ kubectl describe pods/nginx
|
||||
|
||||
# Describe a pod identified by type and name in "pod.json"
|
||||
$ kubectl describe -f pod.json
|
||||
|
||||
# Describe all pods
|
||||
$ kubectl describe pods
|
||||
|
||||
# Describe pods by label name=myLabel
|
||||
$ kubectl describe po -l name=myLabel
|
||||
|
||||
# Describe all pods managed by the 'frontend' replication controller (rc-created pods
|
||||
# get the name of the rc as a prefix in the pod the name).
|
||||
$ kubectl describe pods frontend
|
||||
{% endraw %}
|
||||
```
|
||||
|
||||
### Options
|
||||
|
||||
```
|
||||
{% raw %}
|
||||
-f, --filename=[]: Filename, directory, or URL to a file containing the resource to describe
|
||||
-l, --selector="": Selector (label query) to filter on
|
||||
{% endraw %}
|
||||
```
|
||||
|
||||
### Options inherited from parent commands
|
||||
|
||||
```
|
||||
{% raw %}
|
||||
--alsologtostderr[=false]: log to standard error as well as files
|
||||
--api-version="": The API version to use when talking to the server
|
||||
--certificate-authority="": Path to a cert. file for the certificate authority.
|
||||
--client-certificate="": Path to a client key file for TLS.
|
||||
--client-key="": Path to a client key file for TLS.
|
||||
--cluster="": The name of the kubeconfig cluster to use
|
||||
--context="": The name of the kubeconfig context to use
|
||||
--insecure-skip-tls-verify[=false]: If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure.
|
||||
--kubeconfig="": Path to the kubeconfig file to use for CLI requests.
|
||||
--log-backtrace-at=:0: when logging hits line file:N, emit a stack trace
|
||||
--log-dir="": If non-empty, write log files in this directory
|
||||
--log-flush-frequency=5s: Maximum number of seconds between log flushes
|
||||
--logtostderr[=true]: log to standard error instead of files
|
||||
--match-server-version[=false]: Require server version to match client version
|
||||
--namespace="": If present, the namespace scope for this CLI request.
|
||||
--password="": Password for basic authentication to the API server.
|
||||
-s, --server="": The address and port of the Kubernetes API server
|
||||
--stderrthreshold=2: logs at or above this threshold go to stderr
|
||||
--token="": Bearer token for authentication to the API server.
|
||||
--user="": The name of the kubeconfig user to use
|
||||
--username="": Username for basic authentication to the API server.
|
||||
--v=0: log level for V logs
|
||||
--vmodule=: comma-separated list of pattern=N settings for file-filtered logging
|
||||
{% endraw %}
|
||||
```
|
||||
|
||||
### SEE ALSO
|
||||
|
||||
* [kubectl](kubectl.html) - kubectl controls the Kubernetes cluster manager
|
||||
|
||||
###### Auto generated by spf13/cobra at 2015-11-27 14:20:04.941871459 +0000 UTC
|
||||
|
||||
|
||||
|
||||
<!-- BEGIN MUNGE: IS_VERSIONED -->
|
||||
<!-- TAG IS_VERSIONED -->
|
||||
<!-- END MUNGE: IS_VERSIONED -->
|
||||
|
||||
|
||||
<!-- BEGIN MUNGE: GENERATED_ANALYTICS -->
|
||||
[]()
|
||||
<!-- END MUNGE: GENERATED_ANALYTICS -->
|
||||
|
|
@ -0,0 +1,113 @@
|
|||
---
|
||||
layout: docwithnav
|
||||
title: "kubectl edit"
|
||||
---
|
||||
<!-- BEGIN MUNGE: UNVERSIONED_WARNING -->
|
||||
|
||||
|
||||
<!-- END MUNGE: UNVERSIONED_WARNING -->
|
||||
|
||||
## kubectl edit
|
||||
|
||||
Edit a resource on the server
|
||||
|
||||
### Synopsis
|
||||
|
||||
|
||||
Edit a resource from the default editor.
|
||||
|
||||
The edit command allows you to directly edit any API resource you can retrieve via the
|
||||
command line tools. It will open the editor defined by your KUBE_EDITOR, GIT_EDITOR,
|
||||
or EDITOR environment variables, or fall back to 'vi'. You can edit multiple objects,
|
||||
although changes are applied one at a time. The command accepts filenames as well as
|
||||
command line arguments, although the files you point to must be previously saved
|
||||
versions of resources.
|
||||
|
||||
The files to edit will be output in the default API version, or a version specified
|
||||
by --output-version. The default format is YAML - if you would like to edit in JSON
|
||||
pass -o json.
|
||||
|
||||
In the event an error occurs while updating, a temporary file will be created on disk
|
||||
that contains your unapplied changes. The most common error when updating a resource
|
||||
is another editor changing the resource on the server. When this occurs, you will have
|
||||
to apply your changes to the newer version of the resource, or update your temporary
|
||||
saved copy to include the latest resource version.
|
||||
|
||||
```
|
||||
{% raw %}
|
||||
kubectl edit (RESOURCE/NAME | -f FILENAME)
|
||||
{% endraw %}
|
||||
```
|
||||
|
||||
### Examples
|
||||
|
||||
```
|
||||
{% raw %}
|
||||
# Edit the service named 'docker-registry':
|
||||
$ kubectl edit svc/docker-registry
|
||||
|
||||
# Use an alternative editor
|
||||
$ KUBE_EDITOR="nano" kubectl edit svc/docker-registry
|
||||
|
||||
# Edit the service 'docker-registry' in JSON using the v1 API format:
|
||||
$ kubectl edit svc/docker-registry --output-version=v1 -o json
|
||||
{% endraw %}
|
||||
```
|
||||
|
||||
### Options
|
||||
|
||||
```
|
||||
{% raw %}
|
||||
-f, --filename=[]: Filename, directory, or URL to file to use to edit the resource
|
||||
-o, --output="yaml": Output format. One of: yaml|json.
|
||||
--output-version="": Output the formatted object with the given version (default api-version).
|
||||
{% endraw %}
|
||||
```
|
||||
|
||||
### Options inherited from parent commands
|
||||
|
||||
```
|
||||
{% raw %}
|
||||
--alsologtostderr[=false]: log to standard error as well as files
|
||||
--api-version="": The API version to use when talking to the server
|
||||
--certificate-authority="": Path to a cert. file for the certificate authority.
|
||||
--client-certificate="": Path to a client key file for TLS.
|
||||
--client-key="": Path to a client key file for TLS.
|
||||
--cluster="": The name of the kubeconfig cluster to use
|
||||
--context="": The name of the kubeconfig context to use
|
||||
--insecure-skip-tls-verify[=false]: If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure.
|
||||
--kubeconfig="": Path to the kubeconfig file to use for CLI requests.
|
||||
--log-backtrace-at=:0: when logging hits line file:N, emit a stack trace
|
||||
--log-dir="": If non-empty, write log files in this directory
|
||||
--log-flush-frequency=5s: Maximum number of seconds between log flushes
|
||||
--logtostderr[=true]: log to standard error instead of files
|
||||
--match-server-version[=false]: Require server version to match client version
|
||||
--namespace="": If present, the namespace scope for this CLI request.
|
||||
--password="": Password for basic authentication to the API server.
|
||||
-s, --server="": The address and port of the Kubernetes API server
|
||||
--stderrthreshold=2: logs at or above this threshold go to stderr
|
||||
--token="": Bearer token for authentication to the API server.
|
||||
--user="": The name of the kubeconfig user to use
|
||||
--username="": Username for basic authentication to the API server.
|
||||
--v=0: log level for V logs
|
||||
--vmodule=: comma-separated list of pattern=N settings for file-filtered logging
|
||||
{% endraw %}
|
||||
```
|
||||
|
||||
### SEE ALSO
|
||||
|
||||
* [kubectl](kubectl.html) - kubectl controls the Kubernetes cluster manager
|
||||
|
||||
###### Auto generated by spf13/cobra at 2015-09-16 00:43:02.024642139 +0000 UTC
|
||||
|
||||
|
||||
|
||||
<!-- BEGIN MUNGE: IS_VERSIONED -->
|
||||
<!-- TAG IS_VERSIONED -->
|
||||
<!-- END MUNGE: IS_VERSIONED -->
|
||||
|
||||
|
||||
<!-- BEGIN MUNGE: GENERATED_ANALYTICS -->
|
||||
[]()
|
||||
<!-- END MUNGE: GENERATED_ANALYTICS -->
|
||||
|
|
@ -0,0 +1,98 @@
|
|||
---
|
||||
layout: docwithnav
|
||||
title: "kubectl exec"
|
||||
---
|
||||
<!-- BEGIN MUNGE: UNVERSIONED_WARNING -->
|
||||
|
||||
|
||||
<!-- END MUNGE: UNVERSIONED_WARNING -->
|
||||
|
||||
## kubectl exec
|
||||
|
||||
Execute a command in a container.
|
||||
|
||||
### Synopsis
|
||||
|
||||
|
||||
Execute a command in a container.
|
||||
|
||||
```
|
||||
{% raw %}
|
||||
kubectl exec POD [-c CONTAINER] -- COMMAND [args...]
|
||||
{% endraw %}
|
||||
```
|
||||
|
||||
### Examples
|
||||
|
||||
```
|
||||
{% raw %}
|
||||
# Get output from running 'date' from pod 123456-7890, using the first container by default
|
||||
$ kubectl exec 123456-7890 date
|
||||
|
||||
# Get output from running 'date' in ruby-container from pod 123456-7890
|
||||
$ kubectl exec 123456-7890 -c ruby-container date
|
||||
|
||||
# Switch to raw terminal mode, sends stdin to 'bash' in ruby-container from pod 123456-780
|
||||
# and sends stdout/stderr from 'bash' back to the client
|
||||
$ kubectl exec 123456-7890 -c ruby-container -i -t -- bash -il
|
||||
{% endraw %}
|
||||
```
|
||||
|
||||
### Options
|
||||
|
||||
```
|
||||
{% raw %}
|
||||
-c, --container="": Container name. If omitted, the first container in the pod will be chosen
|
||||
-p, --pod="": Pod name
|
||||
-i, --stdin[=false]: Pass stdin to the container
|
||||
-t, --tty[=false]: Stdin is a TTY
|
||||
{% endraw %}
|
||||
```
|
||||
|
||||
### Options inherited from parent commands
|
||||
|
||||
```
|
||||
{% raw %}
|
||||
--alsologtostderr[=false]: log to standard error as well as files
|
||||
--api-version="": The API version to use when talking to the server
|
||||
--certificate-authority="": Path to a cert. file for the certificate authority.
|
||||
--client-certificate="": Path to a client key file for TLS.
|
||||
--client-key="": Path to a client key file for TLS.
|
||||
--cluster="": The name of the kubeconfig cluster to use
|
||||
--context="": The name of the kubeconfig context to use
|
||||
--insecure-skip-tls-verify[=false]: If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure.
|
||||
--kubeconfig="": Path to the kubeconfig file to use for CLI requests.
|
||||
--log-backtrace-at=:0: when logging hits line file:N, emit a stack trace
|
||||
--log-dir="": If non-empty, write log files in this directory
|
||||
--log-flush-frequency=5s: Maximum number of seconds between log flushes
|
||||
--logtostderr[=true]: log to standard error instead of files
|
||||
--match-server-version[=false]: Require server version to match client version
|
||||
--namespace="": If present, the namespace scope for this CLI request.
|
||||
--password="": Password for basic authentication to the API server.
|
||||
-s, --server="": The address and port of the Kubernetes API server
|
||||
--stderrthreshold=2: logs at or above this threshold go to stderr
|
||||
--token="": Bearer token for authentication to the API server.
|
||||
--user="": The name of the kubeconfig user to use
|
||||
--username="": Username for basic authentication to the API server.
|
||||
--v=0: log level for V logs
|
||||
--vmodule=: comma-separated list of pattern=N settings for file-filtered logging
|
||||
{% endraw %}
|
||||
```
|
||||
|
||||
### SEE ALSO
|
||||
|
||||
* [kubectl](kubectl.html) - kubectl controls the Kubernetes cluster manager
|
||||
|
||||
###### Auto generated by spf13/cobra at 2015-09-10 18:53:03.156052759 +0000 UTC
|
||||
|
||||
|
||||
|
||||
<!-- BEGIN MUNGE: IS_VERSIONED -->
|
||||
<!-- TAG IS_VERSIONED -->
|
||||
<!-- END MUNGE: IS_VERSIONED -->
|
||||
|
||||
|
||||
<!-- BEGIN MUNGE: GENERATED_ANALYTICS -->
|
||||
[]()
|
||||
<!-- END MUNGE: GENERATED_ANALYTICS -->
|
||||
|
|
@ -0,0 +1,124 @@
|
|||
---
|
||||
layout: docwithnav
|
||||
title: "kubectl expose"
|
||||
---
|
||||
<!-- BEGIN MUNGE: UNVERSIONED_WARNING -->
|
||||
|
||||
|
||||
<!-- END MUNGE: UNVERSIONED_WARNING -->
|
||||
|
||||
## kubectl expose
|
||||
|
||||
Take a replication controller, service or pod and expose it as a new Kubernetes Service
|
||||
|
||||
### Synopsis
|
||||
|
||||
|
||||
Take a replication controller, service or pod and expose it as a new Kubernetes Service.
|
||||
|
||||
Looks up a replication controller, service or pod by name and uses the selector for that resource as the
|
||||
selector for a new Service on the specified port. If no labels are specified, the new service will
|
||||
re-use the labels from the resource it exposes.
|
||||
|
||||
```
|
||||
{% raw %}
|
||||
kubectl expose (-f FILENAME | TYPE NAME) [--port=port] [--protocol=TCP|UDP] [--target-port=number-or-name] [--name=name] [----external-ip=external-ip-of-service] [--type=type]
|
||||
{% endraw %}
|
||||
```
|
||||
|
||||
### Examples
|
||||
|
||||
```
|
||||
{% raw %}
|
||||
# Create a service for a replicated nginx, which serves on port 80 and connects to the containers on port 8000.
|
||||
$ kubectl expose rc nginx --port=80 --target-port=8000
|
||||
|
||||
# Create a service for a replication controller identified by type and name specified in "nginx-controller.yaml", which serves on port 80 and connects to the containers on port 8000.
|
||||
$ kubectl expose -f nginx-controller.yaml --port=80 --target-port=8000
|
||||
|
||||
# Create a service for a pod valid-pod, which serves on port 444 with the name "frontend"
|
||||
$ kubectl expose pod valid-pod --port=444 --name=frontend
|
||||
|
||||
# Create a second service based on the above service, exposing the container port 8443 as port 443 with the name "nginx-https"
|
||||
$ kubectl expose service nginx --port=443 --target-port=8443 --name=nginx-https
|
||||
|
||||
# Create a service for a replicated streaming application on port 4100 balancing UDP traffic and named 'video-stream'.
|
||||
$ kubectl expose rc streamer --port=4100 --protocol=udp --name=video-stream
|
||||
{% endraw %}
|
||||
```
|
||||
|
||||
### Options
|
||||
|
||||
```
|
||||
{% raw %}
|
||||
--container-port="": Synonym for --target-port
|
||||
--dry-run[=false]: If true, only print the object that would be sent, without creating it.
|
||||
--external-ip="": External IP address to set for the service. The service can be accessed by this IP in addition to its generated service IP.
|
||||
-f, --filename=[]: Filename, directory, or URL to a file identifying the resource to expose a service
|
||||
--generator="service/v2": The name of the API generator to use. There are 2 generators: 'service/v1' and 'service/v2'. The only difference between them is that service port in v1 is named 'default', while it is left unnamed in v2. Default is 'service/v2'.
|
||||
-l, --labels="": Labels to apply to the service created by this call.
|
||||
--load-balancer-ip="": IP to assign to to the Load Balancer. If empty, an ephemeral IP will be created and used(cloud-provider specific).
|
||||
--name="": The name for the newly created object.
|
||||
--no-headers[=false]: When using the default output, don't print headers.
|
||||
-o, --output="": Output format. One of: json|yaml|wide|name|go-template=...|go-template-file=...|jsonpath=...|jsonpath-file=... See golang template [http://golang.org/pkg/text/template/#pkg-overview] and jsonpath template [http://releases.k8s.io/release-1.1/docs/user-guide/jsonpath.md].
|
||||
--output-version="": Output the formatted object with the given version (default api-version).
|
||||
--overrides="": An inline JSON override for the generated object. If this is non-empty, it is used to override the generated object. Requires that the object supply a valid apiVersion field.
|
||||
--port=-1: The port that the service should serve on. Copied from the resource being exposed, if unspecified
|
||||
--protocol="TCP": The network protocol for the service to be created. Default is 'tcp'.
|
||||
--selector="": A label selector to use for this service. If empty (the default) infer the selector from the replication controller.
|
||||
--session-affinity="": If non-empty, set the session affinity for the service to this; legal values: 'None', 'ClientIP'
|
||||
-a, --show-all[=false]: When printing, show all resources (default hide terminated pods.)
|
||||
--sort-by="": If non-empty, sort list types using this field specification. The field specification is expressed as a JSONPath expression (e.g. 'ObjectMeta.Name'). The field in the API resource specified by this JSONPath expression must be an integer or a string.
|
||||
--target-port="": Name or number for the port on the container that the service should direct traffic to. Optional.
|
||||
--template="": Template string or path to template file to use when -o=go-template, -o=go-template-file. The template format is golang templates [http://golang.org/pkg/text/template/#pkg-overview].
|
||||
--type="": Type for this service: ClusterIP, NodePort, or LoadBalancer. Default is 'ClusterIP'.
|
||||
{% endraw %}
|
||||
```
|
||||
|
||||
### Options inherited from parent commands
|
||||
|
||||
```
|
||||
{% raw %}
|
||||
--alsologtostderr[=false]: log to standard error as well as files
|
||||
--api-version="": The API version to use when talking to the server
|
||||
--certificate-authority="": Path to a cert. file for the certificate authority.
|
||||
--client-certificate="": Path to a client key file for TLS.
|
||||
--client-key="": Path to a client key file for TLS.
|
||||
--cluster="": The name of the kubeconfig cluster to use
|
||||
--context="": The name of the kubeconfig context to use
|
||||
--insecure-skip-tls-verify[=false]: If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure.
|
||||
--kubeconfig="": Path to the kubeconfig file to use for CLI requests.
|
||||
--log-backtrace-at=:0: when logging hits line file:N, emit a stack trace
|
||||
--log-dir="": If non-empty, write log files in this directory
|
||||
--log-flush-frequency=5s: Maximum number of seconds between log flushes
|
||||
--logtostderr[=true]: log to standard error instead of files
|
||||
--match-server-version[=false]: Require server version to match client version
|
||||
--namespace="": If present, the namespace scope for this CLI request.
|
||||
--password="": Password for basic authentication to the API server.
|
||||
-s, --server="": The address and port of the Kubernetes API server
|
||||
--stderrthreshold=2: logs at or above this threshold go to stderr
|
||||
--token="": Bearer token for authentication to the API server.
|
||||
--user="": The name of the kubeconfig user to use
|
||||
--username="": Username for basic authentication to the API server.
|
||||
--v=0: log level for V logs
|
||||
--vmodule=: comma-separated list of pattern=N settings for file-filtered logging
|
||||
{% endraw %}
|
||||
```
|
||||
|
||||
### SEE ALSO
|
||||
|
||||
* [kubectl](kubectl.html) - kubectl controls the Kubernetes cluster manager
|
||||
|
||||
###### Auto generated by spf13/cobra at 2015-09-25 23:39:47.895322301 +0000 UTC
|
||||
|
||||
|
||||
|
||||
<!-- BEGIN MUNGE: IS_VERSIONED -->
|
||||
<!-- TAG IS_VERSIONED -->
|
||||
<!-- END MUNGE: IS_VERSIONED -->
|
||||
|
||||
|
||||
<!-- BEGIN MUNGE: GENERATED_ANALYTICS -->
|
||||
[]()
|
||||
<!-- END MUNGE: GENERATED_ANALYTICS -->
|
||||
|
|
@ -0,0 +1,132 @@
|
|||
---
|
||||
layout: docwithnav
|
||||
title: "kubectl get"
|
||||
---
|
||||
<!-- BEGIN MUNGE: UNVERSIONED_WARNING -->
|
||||
|
||||
|
||||
<!-- END MUNGE: UNVERSIONED_WARNING -->
|
||||
|
||||
## kubectl get
|
||||
|
||||
Display one or many resources
|
||||
|
||||
### Synopsis
|
||||
|
||||
|
||||
Display one or many resources.
|
||||
|
||||
Possible resource types include (case insensitive): pods (po), services (svc),
|
||||
replicationcontrollers (rc), nodes (no), events (ev), componentstatuses (cs),
|
||||
limitranges (limits), persistentvolumes (pv), persistentvolumeclaims (pvc),
|
||||
resourcequotas (quota), namespaces (ns), endpoints (ep),
|
||||
horizontalpodautoscalers (hpa), serviceaccounts or secrets.
|
||||
|
||||
By specifying the output as 'template' and providing a Go template as the value
|
||||
of the --template flag, you can filter the attributes of the fetched resource(s).
|
||||
|
||||
```
|
||||
{% raw %}
|
||||
kubectl get [(-o|--output=)json|yaml|wide|go-template=...|go-template-file=...|jsonpath=...|jsonpath-file=...] (TYPE [NAME | -l label] | TYPE/NAME ...) [flags]
|
||||
{% endraw %}
|
||||
```
|
||||
|
||||
### Examples
|
||||
|
||||
```
|
||||
{% raw %}
|
||||
# List all pods in ps output format.
|
||||
$ kubectl get pods
|
||||
|
||||
# List all pods in ps output format with more information (such as node name).
|
||||
$ kubectl get pods -o wide
|
||||
|
||||
# List all pods in resource/name format (such as pod/nginx).
|
||||
$ kubectl get pods -o name
|
||||
|
||||
# List a single replication controller with specified NAME in ps output format.
|
||||
$ kubectl get replicationcontroller web
|
||||
|
||||
# List a single pod in JSON output format.
|
||||
$ kubectl get -o json pod web-pod-13je7
|
||||
|
||||
# List a pod identified by type and name specified in "pod.yaml" in JSON output format.
|
||||
$ kubectl get -f pod.yaml -o json
|
||||
|
||||
# Return only the phase value of the specified pod.
|
||||
$ kubectl get -o template pod/web-pod-13je7 --template={{.status.phase}} --api-version=v1
|
||||
|
||||
# List all replication controllers and services together in ps output format.
|
||||
$ kubectl get rc,services
|
||||
|
||||
# List one or more resources by their type and names.
|
||||
$ kubectl get rc/web service/frontend pods/web-pod-13je7
|
||||
{% endraw %}
|
||||
```
|
||||
|
||||
### Options
|
||||
|
||||
```
|
||||
{% raw %}
|
||||
--all-namespaces[=false]: If present, list the requested object(s) across all namespaces. Namespace in current context is ignored even if specified with --namespace.
|
||||
-f, --filename=[]: Filename, directory, or URL to a file identifying the resource to get from a server.
|
||||
-L, --label-columns=[]: Accepts a comma separated list of labels that are going to be presented as columns. Names are case-sensitive. You can also use multiple flag statements like -L label1 -L label2...
|
||||
--no-headers[=false]: When using the default output, don't print headers.
|
||||
-o, --output="": Output format. One of: json|yaml|wide|name|go-template=...|go-template-file=...|jsonpath=...|jsonpath-file=... See golang template [http://golang.org/pkg/text/template/#pkg-overview] and jsonpath template [http://releases.k8s.io/release-1.1/docs/user-guide/jsonpath.md].
|
||||
--output-version="": Output the formatted object with the given version (default api-version).
|
||||
-l, --selector="": Selector (label query) to filter on
|
||||
-a, --show-all[=false]: When printing, show all resources (default hide terminated pods.)
|
||||
--sort-by="": If non-empty, sort list types using this field specification. The field specification is expressed as a JSONPath expression (e.g. 'ObjectMeta.Name'). The field in the API resource specified by this JSONPath expression must be an integer or a string.
|
||||
--template="": Template string or path to template file to use when -o=go-template, -o=go-template-file. The template format is golang templates [http://golang.org/pkg/text/template/#pkg-overview].
|
||||
-w, --watch[=false]: After listing/getting the requested object, watch for changes.
|
||||
--watch-only[=false]: Watch for changes to the requested object(s), without listing/getting first.
|
||||
{% endraw %}
|
||||
```
|
||||
|
||||
### Options inherited from parent commands
|
||||
|
||||
```
|
||||
{% raw %}
|
||||
--alsologtostderr[=false]: log to standard error as well as files
|
||||
--api-version="": The API version to use when talking to the server
|
||||
--certificate-authority="": Path to a cert. file for the certificate authority.
|
||||
--client-certificate="": Path to a client key file for TLS.
|
||||
--client-key="": Path to a client key file for TLS.
|
||||
--cluster="": The name of the kubeconfig cluster to use
|
||||
--context="": The name of the kubeconfig context to use
|
||||
--insecure-skip-tls-verify[=false]: If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure.
|
||||
--kubeconfig="": Path to the kubeconfig file to use for CLI requests.
|
||||
--log-backtrace-at=:0: when logging hits line file:N, emit a stack trace
|
||||
--log-dir="": If non-empty, write log files in this directory
|
||||
--log-flush-frequency=5s: Maximum number of seconds between log flushes
|
||||
--logtostderr[=true]: log to standard error instead of files
|
||||
--match-server-version[=false]: Require server version to match client version
|
||||
--namespace="": If present, the namespace scope for this CLI request.
|
||||
--password="": Password for basic authentication to the API server.
|
||||
-s, --server="": The address and port of the Kubernetes API server
|
||||
--stderrthreshold=2: logs at or above this threshold go to stderr
|
||||
--token="": Bearer token for authentication to the API server.
|
||||
--user="": The name of the kubeconfig user to use
|
||||
--username="": Username for basic authentication to the API server.
|
||||
--v=0: log level for V logs
|
||||
--vmodule=: comma-separated list of pattern=N settings for file-filtered logging
|
||||
{% endraw %}
|
||||
```
|
||||
|
||||
### SEE ALSO
|
||||
|
||||
* [kubectl](kubectl.html) - kubectl controls the Kubernetes cluster manager
|
||||
|
||||
###### Auto generated by spf13/cobra at 2015-11-27 14:20:04.941698058 +0000 UTC
|
||||
|
||||
|
||||
|
||||
<!-- BEGIN MUNGE: IS_VERSIONED -->
|
||||
<!-- TAG IS_VERSIONED -->
|
||||
<!-- END MUNGE: IS_VERSIONED -->
|
||||
|
||||
|
||||
<!-- BEGIN MUNGE: GENERATED_ANALYTICS -->
|
||||
[]()
|
||||
<!-- END MUNGE: GENERATED_ANALYTICS -->
|
||||
|
|
@ -0,0 +1,119 @@
|
|||
---
|
||||
layout: docwithnav
|
||||
title: "kubectl label"
|
||||
---
|
||||
<!-- BEGIN MUNGE: UNVERSIONED_WARNING -->
|
||||
|
||||
|
||||
<!-- END MUNGE: UNVERSIONED_WARNING -->
|
||||
|
||||
## kubectl label
|
||||
|
||||
Update the labels on a resource
|
||||
|
||||
### Synopsis
|
||||
|
||||
|
||||
Update the labels on a resource.
|
||||
|
||||
A label must begin with a letter or number, and may contain letters, numbers, hyphens, dots, and underscores, up to 63 characters.
|
||||
If --overwrite is true, then existing labels can be overwritten, otherwise attempting to overwrite a label will result in an error.
|
||||
If --resource-version is specified, then updates will use this resource version, otherwise the existing resource-version will be used.
|
||||
|
||||
```
|
||||
{% raw %}
|
||||
kubectl label [--overwrite] (-f FILENAME | TYPE NAME) KEY_1=VAL_1 ... KEY_N=VAL_N [--resource-version=version]
|
||||
{% endraw %}
|
||||
```
|
||||
|
||||
### Examples
|
||||
|
||||
```
|
||||
{% raw %}
|
||||
# Update pod 'foo' with the label 'unhealthy' and the value 'true'.
|
||||
$ kubectl label pods foo unhealthy=true
|
||||
|
||||
# Update pod 'foo' with the label 'status' and the value 'unhealthy', overwriting any existing value.
|
||||
$ kubectl label --overwrite pods foo status=unhealthy
|
||||
|
||||
# Update all pods in the namespace
|
||||
$ kubectl label pods --all status=unhealthy
|
||||
|
||||
# Update a pod identified by the type and name in "pod.json"
|
||||
$ kubectl label -f pod.json status=unhealthy
|
||||
|
||||
# Update pod 'foo' only if the resource is unchanged from version 1.
|
||||
$ kubectl label pods foo status=unhealthy --resource-version=1
|
||||
|
||||
# Update pod 'foo' by removing a label named 'bar' if it exists.
|
||||
# Does not require the --overwrite flag.
|
||||
$ kubectl label pods foo bar-
|
||||
{% endraw %}
|
||||
```
|
||||
|
||||
### Options
|
||||
|
||||
```
|
||||
{% raw %}
|
||||
--all[=false]: select all resources in the namespace of the specified resource types
|
||||
--dry-run[=false]: If true, only print the object that would be sent, without sending it.
|
||||
-f, --filename=[]: Filename, directory, or URL to a file identifying the resource to update the labels
|
||||
--no-headers[=false]: When using the default output, don't print headers.
|
||||
-o, --output="": Output format. One of: json|yaml|wide|name|go-template=...|go-template-file=...|jsonpath=...|jsonpath-file=... See golang template [http://golang.org/pkg/text/template/#pkg-overview] and jsonpath template [http://releases.k8s.io/release-1.1/docs/user-guide/jsonpath.md].
|
||||
--output-version="": Output the formatted object with the given version (default api-version).
|
||||
--overwrite[=false]: If true, allow labels to be overwritten, otherwise reject label updates that overwrite existing labels.
|
||||
--resource-version="": If non-empty, the labels update will only succeed if this is the current resource-version for the object. Only valid when specifying a single resource.
|
||||
-l, --selector="": Selector (label query) to filter on
|
||||
-a, --show-all[=false]: When printing, show all resources (default hide terminated pods.)
|
||||
--sort-by="": If non-empty, sort list types using this field specification. The field specification is expressed as a JSONPath expression (e.g. 'ObjectMeta.Name'). The field in the API resource specified by this JSONPath expression must be an integer or a string.
|
||||
--template="": Template string or path to template file to use when -o=go-template, -o=go-template-file. The template format is golang templates [http://golang.org/pkg/text/template/#pkg-overview].
|
||||
{% endraw %}
|
||||
```
|
||||
|
||||
### Options inherited from parent commands
|
||||
|
||||
```
|
||||
{% raw %}
|
||||
--alsologtostderr[=false]: log to standard error as well as files
|
||||
--api-version="": The API version to use when talking to the server
|
||||
--certificate-authority="": Path to a cert. file for the certificate authority.
|
||||
--client-certificate="": Path to a client key file for TLS.
|
||||
--client-key="": Path to a client key file for TLS.
|
||||
--cluster="": The name of the kubeconfig cluster to use
|
||||
--context="": The name of the kubeconfig context to use
|
||||
--insecure-skip-tls-verify[=false]: If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure.
|
||||
--kubeconfig="": Path to the kubeconfig file to use for CLI requests.
|
||||
--log-backtrace-at=:0: when logging hits line file:N, emit a stack trace
|
||||
--log-dir="": If non-empty, write log files in this directory
|
||||
--log-flush-frequency=5s: Maximum number of seconds between log flushes
|
||||
--logtostderr[=true]: log to standard error instead of files
|
||||
--match-server-version[=false]: Require server version to match client version
|
||||
--namespace="": If present, the namespace scope for this CLI request.
|
||||
--password="": Password for basic authentication to the API server.
|
||||
-s, --server="": The address and port of the Kubernetes API server
|
||||
--stderrthreshold=2: logs at or above this threshold go to stderr
|
||||
--token="": Bearer token for authentication to the API server.
|
||||
--user="": The name of the kubeconfig user to use
|
||||
--username="": Username for basic authentication to the API server.
|
||||
--v=0: log level for V logs
|
||||
--vmodule=: comma-separated list of pattern=N settings for file-filtered logging
|
||||
{% endraw %}
|
||||
```
|
||||
|
||||
### SEE ALSO
|
||||
|
||||
* [kubectl](kubectl.html) - kubectl controls the Kubernetes cluster manager
|
||||
|
||||
###### Auto generated by spf13/cobra at 2015-09-25 23:39:47.89548479 +0000 UTC
|
||||
|
||||
|
||||
|
||||
<!-- BEGIN MUNGE: IS_VERSIONED -->
|
||||
<!-- TAG IS_VERSIONED -->
|
||||
<!-- END MUNGE: IS_VERSIONED -->
|
||||
|
||||
|
||||
<!-- BEGIN MUNGE: GENERATED_ANALYTICS -->
|
||||
[]()
|
||||
<!-- END MUNGE: GENERATED_ANALYTICS -->
|
||||
|
|
@ -0,0 +1,108 @@
|
|||
---
|
||||
layout: docwithnav
|
||||
title: "kubectl logs"
|
||||
---
|
||||
<!-- BEGIN MUNGE: UNVERSIONED_WARNING -->
|
||||
|
||||
|
||||
<!-- END MUNGE: UNVERSIONED_WARNING -->
|
||||
|
||||
## kubectl logs
|
||||
|
||||
Print the logs for a container in a pod.
|
||||
|
||||
### Synopsis
|
||||
|
||||
|
||||
Print the logs for a container in a pod. If the pod has only one container, the container name is optional.
|
||||
|
||||
```
|
||||
{% raw %}
|
||||
kubectl logs [-f] [-p] POD [-c CONTAINER]
|
||||
{% endraw %}
|
||||
```
|
||||
|
||||
### Examples
|
||||
|
||||
```
|
||||
{% raw %}
|
||||
# Return snapshot logs from pod nginx with only one container
|
||||
$ kubectl logs nginx
|
||||
|
||||
# Return snapshot of previous terminated ruby container logs from pod web-1
|
||||
$ kubectl logs -p -c ruby web-1
|
||||
|
||||
# Begin streaming the logs of the ruby container in pod web-1
|
||||
$ kubectl logs -f -c ruby web-1
|
||||
|
||||
# Display only the most recent 20 lines of output in pod nginx
|
||||
$ kubectl logs --tail=20 nginx
|
||||
|
||||
# Show all logs from pod nginx written in the last hour
|
||||
$ kubectl logs --since=1h nginx
|
||||
{% endraw %}
|
||||
```
|
||||
|
||||
### Options
|
||||
|
||||
```
|
||||
{% raw %}
|
||||
-c, --container="": Container name
|
||||
-f, --follow[=false]: Specify if the logs should be streamed.
|
||||
--interactive[=true]: If true, prompt the user for input when required. Default true.
|
||||
--limit-bytes=0: Maximum bytes of logs to return. Defaults to no limit.
|
||||
-p, --previous[=false]: If true, print the logs for the previous instance of the container in a pod if it exists.
|
||||
--since=0: Only return logs newer than a relative duration like 5s, 2m, or 3h. Defaults to all logs. Only one of since-time / since may be used.
|
||||
--since-time="": Only return logs after a specific date (RFC3339). Defaults to all logs. Only one of since-time / since may be used.
|
||||
--tail=-1: Lines of recent log file to display. Defaults to -1, showing all log lines.
|
||||
--timestamps[=false]: Include timestamps on each line in the log output
|
||||
{% endraw %}
|
||||
```
|
||||
|
||||
### Options inherited from parent commands
|
||||
|
||||
```
|
||||
{% raw %}
|
||||
--alsologtostderr[=false]: log to standard error as well as files
|
||||
--api-version="": The API version to use when talking to the server
|
||||
--certificate-authority="": Path to a cert. file for the certificate authority.
|
||||
--client-certificate="": Path to a client key file for TLS.
|
||||
--client-key="": Path to a client key file for TLS.
|
||||
--cluster="": The name of the kubeconfig cluster to use
|
||||
--context="": The name of the kubeconfig context to use
|
||||
--insecure-skip-tls-verify[=false]: If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure.
|
||||
--kubeconfig="": Path to the kubeconfig file to use for CLI requests.
|
||||
--log-backtrace-at=:0: when logging hits line file:N, emit a stack trace
|
||||
--log-dir="": If non-empty, write log files in this directory
|
||||
--log-flush-frequency=5s: Maximum number of seconds between log flushes
|
||||
--logtostderr[=true]: log to standard error instead of files
|
||||
--match-server-version[=false]: Require server version to match client version
|
||||
--namespace="": If present, the namespace scope for this CLI request.
|
||||
--password="": Password for basic authentication to the API server.
|
||||
-s, --server="": The address and port of the Kubernetes API server
|
||||
--stderrthreshold=2: logs at or above this threshold go to stderr
|
||||
--token="": Bearer token for authentication to the API server.
|
||||
--user="": The name of the kubeconfig user to use
|
||||
--username="": Username for basic authentication to the API server.
|
||||
--v=0: log level for V logs
|
||||
--vmodule=: comma-separated list of pattern=N settings for file-filtered logging
|
||||
{% endraw %}
|
||||
```
|
||||
|
||||
### SEE ALSO
|
||||
|
||||
* [kubectl](kubectl.html) - kubectl controls the Kubernetes cluster manager
|
||||
|
||||
###### Auto generated by spf13/cobra at 2015-09-16 18:54:52.319210951 +0000 UTC
|
||||
|
||||
|
||||
|
||||
<!-- BEGIN MUNGE: IS_VERSIONED -->
|
||||
<!-- TAG IS_VERSIONED -->
|
||||
<!-- END MUNGE: IS_VERSIONED -->
|
||||
|
||||
|
||||
<!-- BEGIN MUNGE: GENERATED_ANALYTICS -->
|
||||
[]()
|
||||
<!-- END MUNGE: GENERATED_ANALYTICS -->
|
||||
|
|
@ -0,0 +1,74 @@
|
|||
---
|
||||
layout: docwithnav
|
||||
title: "kubectl namespace"
|
||||
---
|
||||
<!-- BEGIN MUNGE: UNVERSIONED_WARNING -->
|
||||
|
||||
|
||||
<!-- END MUNGE: UNVERSIONED_WARNING -->
|
||||
|
||||
## kubectl namespace
|
||||
|
||||
SUPERSEDED: Set and view the current Kubernetes namespace
|
||||
|
||||
### Synopsis
|
||||
|
||||
|
||||
SUPERSEDED: Set and view the current Kubernetes namespace scope for command line requests.
|
||||
|
||||
namespace has been superseded by the context.namespace field of .kubeconfig files. See 'kubectl config set-context --help' for more details.
|
||||
|
||||
|
||||
```
|
||||
{% raw %}
|
||||
kubectl namespace [namespace]
|
||||
{% endraw %}
|
||||
```
|
||||
|
||||
### Options inherited from parent commands
|
||||
|
||||
```
|
||||
{% raw %}
|
||||
--alsologtostderr[=false]: log to standard error as well as files
|
||||
--api-version="": The API version to use when talking to the server
|
||||
--certificate-authority="": Path to a cert. file for the certificate authority.
|
||||
--client-certificate="": Path to a client key file for TLS.
|
||||
--client-key="": Path to a client key file for TLS.
|
||||
--cluster="": The name of the kubeconfig cluster to use
|
||||
--context="": The name of the kubeconfig context to use
|
||||
--insecure-skip-tls-verify[=false]: If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure.
|
||||
--kubeconfig="": Path to the kubeconfig file to use for CLI requests.
|
||||
--log-backtrace-at=:0: when logging hits line file:N, emit a stack trace
|
||||
--log-dir="": If non-empty, write log files in this directory
|
||||
--log-flush-frequency=5s: Maximum number of seconds between log flushes
|
||||
--logtostderr[=true]: log to standard error instead of files
|
||||
--match-server-version[=false]: Require server version to match client version
|
||||
--namespace="": If present, the namespace scope for this CLI request.
|
||||
--password="": Password for basic authentication to the API server.
|
||||
-s, --server="": The address and port of the Kubernetes API server
|
||||
--stderrthreshold=2: logs at or above this threshold go to stderr
|
||||
--token="": Bearer token for authentication to the API server.
|
||||
--user="": The name of the kubeconfig user to use
|
||||
--username="": Username for basic authentication to the API server.
|
||||
--v=0: log level for V logs
|
||||
--vmodule=: comma-separated list of pattern=N settings for file-filtered logging
|
||||
{% endraw %}
|
||||
```
|
||||
|
||||
### SEE ALSO
|
||||
|
||||
* [kubectl](kubectl.html) - kubectl controls the Kubernetes cluster manager
|
||||
|
||||
###### Auto generated by spf13/cobra at 2015-09-10 18:53:03.154262869 +0000 UTC
|
||||
|
||||
|
||||
|
||||
<!-- BEGIN MUNGE: IS_VERSIONED -->
|
||||
<!-- TAG IS_VERSIONED -->
|
||||
<!-- END MUNGE: IS_VERSIONED -->
|
||||
|
||||
|
||||
<!-- BEGIN MUNGE: GENERATED_ANALYTICS -->
|
||||
[]()
|
||||
<!-- END MUNGE: GENERATED_ANALYTICS -->
|
||||
|
|
@ -0,0 +1,101 @@
|
|||
---
|
||||
layout: docwithnav
|
||||
title: "kubectl patch"
|
||||
---
|
||||
<!-- BEGIN MUNGE: UNVERSIONED_WARNING -->
|
||||
|
||||
|
||||
<!-- END MUNGE: UNVERSIONED_WARNING -->
|
||||
|
||||
## kubectl patch
|
||||
|
||||
Update field(s) of a resource by stdin.
|
||||
|
||||
### Synopsis
|
||||
|
||||
|
||||
Update field(s) of a resource using strategic merge patch
|
||||
|
||||
JSON and YAML formats are accepted.
|
||||
|
||||
Please refer to the models in http://kubernetes.io/v1.1/docs/api-reference/v1/definitions.html to find if a field is mutable.
|
||||
|
||||
```
|
||||
{% raw %}
|
||||
kubectl patch (-f FILENAME | TYPE NAME) -p PATCH
|
||||
{% endraw %}
|
||||
```
|
||||
|
||||
### Examples
|
||||
|
||||
```
|
||||
{% raw %}
|
||||
|
||||
# Partially update a node using strategic merge patch
|
||||
kubectl patch node k8s-node-1 -p '{"spec":{"unschedulable":true}}'
|
||||
|
||||
# Partially update a node identified by the type and name specified in "node.json" using strategic merge patch
|
||||
kubectl patch -f node.json -p '{"spec":{"unschedulable":true}}'
|
||||
|
||||
# Update a container's image; spec.containers[*].name is required because it's a merge key
|
||||
kubectl patch pod valid-pod -p '{"spec":{"containers":[{"name":"kubernetes-serve-hostname","image":"new image"}]}}'
|
||||
{% endraw %}
|
||||
```
|
||||
|
||||
### Options
|
||||
|
||||
```
|
||||
{% raw %}
|
||||
-f, --filename=[]: Filename, directory, or URL to a file identifying the resource to update
|
||||
-o, --output="": Output mode. Use "-o name" for shorter output (resource/name).
|
||||
-p, --patch="": The patch to be applied to the resource JSON file.
|
||||
{% endraw %}
|
||||
```
|
||||
|
||||
### Options inherited from parent commands
|
||||
|
||||
```
|
||||
{% raw %}
|
||||
--alsologtostderr[=false]: log to standard error as well as files
|
||||
--api-version="": The API version to use when talking to the server
|
||||
--certificate-authority="": Path to a cert. file for the certificate authority.
|
||||
--client-certificate="": Path to a client key file for TLS.
|
||||
--client-key="": Path to a client key file for TLS.
|
||||
--cluster="": The name of the kubeconfig cluster to use
|
||||
--context="": The name of the kubeconfig context to use
|
||||
--insecure-skip-tls-verify[=false]: If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure.
|
||||
--kubeconfig="": Path to the kubeconfig file to use for CLI requests.
|
||||
--log-backtrace-at=:0: when logging hits line file:N, emit a stack trace
|
||||
--log-dir="": If non-empty, write log files in this directory
|
||||
--log-flush-frequency=5s: Maximum number of seconds between log flushes
|
||||
--logtostderr[=true]: log to standard error instead of files
|
||||
--match-server-version[=false]: Require server version to match client version
|
||||
--namespace="": If present, the namespace scope for this CLI request.
|
||||
--password="": Password for basic authentication to the API server.
|
||||
-s, --server="": The address and port of the Kubernetes API server
|
||||
--stderrthreshold=2: logs at or above this threshold go to stderr
|
||||
--token="": Bearer token for authentication to the API server.
|
||||
--user="": The name of the kubeconfig user to use
|
||||
--username="": Username for basic authentication to the API server.
|
||||
--v=0: log level for V logs
|
||||
--vmodule=: comma-separated list of pattern=N settings for file-filtered logging
|
||||
{% endraw %}
|
||||
```
|
||||
|
||||
### SEE ALSO
|
||||
|
||||
* [kubectl](kubectl.html) - kubectl controls the Kubernetes cluster manager
|
||||
|
||||
###### Auto generated by spf13/cobra at 2015-11-27 14:20:04.942303171 +0000 UTC
|
||||
|
||||
|
||||
|
||||
<!-- BEGIN MUNGE: IS_VERSIONED -->
|
||||
<!-- TAG IS_VERSIONED -->
|
||||
<!-- END MUNGE: IS_VERSIONED -->
|
||||
|
||||
|
||||
<!-- BEGIN MUNGE: GENERATED_ANALYTICS -->
|
||||
[]()
|
||||
<!-- END MUNGE: GENERATED_ANALYTICS -->
|
||||
|
|
@ -0,0 +1,98 @@
|
|||
---
|
||||
layout: docwithnav
|
||||
title: "kubectl port-forward"
|
||||
---
|
||||
<!-- BEGIN MUNGE: UNVERSIONED_WARNING -->
|
||||
|
||||
|
||||
<!-- END MUNGE: UNVERSIONED_WARNING -->
|
||||
|
||||
## kubectl port-forward
|
||||
|
||||
Forward one or more local ports to a pod.
|
||||
|
||||
### Synopsis
|
||||
|
||||
|
||||
Forward one or more local ports to a pod.
|
||||
|
||||
```
|
||||
{% raw %}
|
||||
kubectl port-forward POD [LOCAL_PORT:]REMOTE_PORT [...[LOCAL_PORT_N:]REMOTE_PORT_N]
|
||||
{% endraw %}
|
||||
```
|
||||
|
||||
### Examples
|
||||
|
||||
```
|
||||
{% raw %}
|
||||
|
||||
# Listen on ports 5000 and 6000 locally, forwarding data to/from ports 5000 and 6000 in the pod
|
||||
$ kubectl port-forward mypod 5000 6000
|
||||
|
||||
# Listen on port 8888 locally, forwarding to 5000 in the pod
|
||||
$ kubectl port-forward mypod 8888:5000
|
||||
|
||||
# Listen on a random port locally, forwarding to 5000 in the pod
|
||||
$ kubectl port-forward mypod :5000
|
||||
|
||||
# Listen on a random port locally, forwarding to 5000 in the pod
|
||||
$ kubectl port-forward mypod 0:5000
|
||||
{% endraw %}
|
||||
```
|
||||
|
||||
### Options
|
||||
|
||||
```
|
||||
{% raw %}
|
||||
-p, --pod="": Pod name
|
||||
{% endraw %}
|
||||
```
|
||||
|
||||
### Options inherited from parent commands
|
||||
|
||||
```
|
||||
{% raw %}
|
||||
--alsologtostderr[=false]: log to standard error as well as files
|
||||
--api-version="": The API version to use when talking to the server
|
||||
--certificate-authority="": Path to a cert. file for the certificate authority.
|
||||
--client-certificate="": Path to a client key file for TLS.
|
||||
--client-key="": Path to a client key file for TLS.
|
||||
--cluster="": The name of the kubeconfig cluster to use
|
||||
--context="": The name of the kubeconfig context to use
|
||||
--insecure-skip-tls-verify[=false]: If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure.
|
||||
--kubeconfig="": Path to the kubeconfig file to use for CLI requests.
|
||||
--log-backtrace-at=:0: when logging hits line file:N, emit a stack trace
|
||||
--log-dir="": If non-empty, write log files in this directory
|
||||
--log-flush-frequency=5s: Maximum number of seconds between log flushes
|
||||
--logtostderr[=true]: log to standard error instead of files
|
||||
--match-server-version[=false]: Require server version to match client version
|
||||
--namespace="": If present, the namespace scope for this CLI request.
|
||||
--password="": Password for basic authentication to the API server.
|
||||
-s, --server="": The address and port of the Kubernetes API server
|
||||
--stderrthreshold=2: logs at or above this threshold go to stderr
|
||||
--token="": Bearer token for authentication to the API server.
|
||||
--user="": The name of the kubeconfig user to use
|
||||
--username="": Username for basic authentication to the API server.
|
||||
--v=0: log level for V logs
|
||||
--vmodule=: comma-separated list of pattern=N settings for file-filtered logging
|
||||
{% endraw %}
|
||||
```
|
||||
|
||||
### SEE ALSO
|
||||
|
||||
* [kubectl](kubectl.html) - kubectl controls the Kubernetes cluster manager
|
||||
|
||||
###### Auto generated by spf13/cobra at 2015-09-10 18:53:03.156433376 +0000 UTC
|
||||
|
||||
|
||||
|
||||
<!-- BEGIN MUNGE: IS_VERSIONED -->
|
||||
<!-- TAG IS_VERSIONED -->
|
||||
<!-- END MUNGE: IS_VERSIONED -->
|
||||
|
||||
|
||||
<!-- BEGIN MUNGE: GENERATED_ANALYTICS -->
|
||||
[]()
|
||||
<!-- END MUNGE: GENERATED_ANALYTICS -->
|
||||
|
|
@ -0,0 +1,120 @@
|
|||
---
|
||||
layout: docwithnav
|
||||
title: "kubectl proxy"
|
||||
---
|
||||
<!-- BEGIN MUNGE: UNVERSIONED_WARNING -->
|
||||
|
||||
|
||||
<!-- END MUNGE: UNVERSIONED_WARNING -->
|
||||
|
||||
## kubectl proxy
|
||||
|
||||
Run a proxy to the Kubernetes API server
|
||||
|
||||
### Synopsis
|
||||
|
||||
|
||||
To proxy all of the kubernetes api and nothing else, use:
|
||||
|
||||
kubectl proxy --api-prefix=/
|
||||
|
||||
To proxy only part of the kubernetes api and also some static files:
|
||||
|
||||
kubectl proxy --www=/my/files --www-prefix=/static/ --api-prefix=/api/
|
||||
|
||||
The above lets you 'curl localhost:8001/api/v1/pods'.
|
||||
|
||||
To proxy the entire kubernetes api at a different root, use:
|
||||
|
||||
kubectl proxy --api-prefix=/custom/
|
||||
|
||||
The above lets you 'curl localhost:8001/custom/api/v1/pods'
|
||||
|
||||
|
||||
```
|
||||
{% raw %}
|
||||
kubectl proxy [--port=PORT] [--www=static-dir] [--www-prefix=prefix] [--api-prefix=prefix]
|
||||
{% endraw %}
|
||||
```
|
||||
|
||||
### Examples
|
||||
|
||||
```
|
||||
{% raw %}
|
||||
# Run a proxy to kubernetes apiserver on port 8011, serving static content from ./local/www/
|
||||
$ kubectl proxy --port=8011 --www=./local/www/
|
||||
|
||||
# Run a proxy to kubernetes apiserver on an arbitrary local port.
|
||||
# The chosen port for the server will be output to stdout.
|
||||
$ kubectl proxy --port=0
|
||||
|
||||
# Run a proxy to kubernetes apiserver, changing the api prefix to k8s-api
|
||||
# This makes e.g. the pods api available at localhost:8011/k8s-api/v1/pods/
|
||||
$ kubectl proxy --api-prefix=/k8s-api
|
||||
{% endraw %}
|
||||
```
|
||||
|
||||
### Options
|
||||
|
||||
```
|
||||
{% raw %}
|
||||
--accept-hosts="^localhost$,^127\\.0\\.0\\.1$,^\\[::1\\]$": Regular expression for hosts that the proxy should accept.
|
||||
--accept-paths="^/.*": Regular expression for paths that the proxy should accept.
|
||||
--api-prefix="/": Prefix to serve the proxied API under.
|
||||
--disable-filter[=false]: If true, disable request filtering in the proxy. This is dangerous, and can leave you vulnerable to XSRF attacks, when used with an accessible port.
|
||||
-p, --port=8001: The port on which to run the proxy. Set to 0 to pick a random port.
|
||||
--reject-methods="POST,PUT,PATCH": Regular expression for HTTP methods that the proxy should reject.
|
||||
--reject-paths="^/api/.*/exec,^/api/.*/run": Regular expression for paths that the proxy should reject.
|
||||
-u, --unix-socket="": Unix socket on which to run the proxy.
|
||||
-w, --www="": Also serve static files from the given directory under the specified prefix.
|
||||
-P, --www-prefix="/static/": Prefix to serve static files under, if static file directory is specified.
|
||||
{% endraw %}
|
||||
```
|
||||
|
||||
### Options inherited from parent commands
|
||||
|
||||
```
|
||||
{% raw %}
|
||||
--alsologtostderr[=false]: log to standard error as well as files
|
||||
--api-version="": The API version to use when talking to the server
|
||||
--certificate-authority="": Path to a cert. file for the certificate authority.
|
||||
--client-certificate="": Path to a client key file for TLS.
|
||||
--client-key="": Path to a client key file for TLS.
|
||||
--cluster="": The name of the kubeconfig cluster to use
|
||||
--context="": The name of the kubeconfig context to use
|
||||
--insecure-skip-tls-verify[=false]: If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure.
|
||||
--kubeconfig="": Path to the kubeconfig file to use for CLI requests.
|
||||
--log-backtrace-at=:0: when logging hits line file:N, emit a stack trace
|
||||
--log-dir="": If non-empty, write log files in this directory
|
||||
--log-flush-frequency=5s: Maximum number of seconds between log flushes
|
||||
--logtostderr[=true]: log to standard error instead of files
|
||||
--match-server-version[=false]: Require server version to match client version
|
||||
--namespace="": If present, the namespace scope for this CLI request.
|
||||
--password="": Password for basic authentication to the API server.
|
||||
-s, --server="": The address and port of the Kubernetes API server
|
||||
--stderrthreshold=2: logs at or above this threshold go to stderr
|
||||
--token="": Bearer token for authentication to the API server.
|
||||
--user="": The name of the kubeconfig user to use
|
||||
--username="": Username for basic authentication to the API server.
|
||||
--v=0: log level for V logs
|
||||
--vmodule=: comma-separated list of pattern=N settings for file-filtered logging
|
||||
{% endraw %}
|
||||
```
|
||||
|
||||
### SEE ALSO
|
||||
|
||||
* [kubectl](kubectl.html) - kubectl controls the Kubernetes cluster manager
|
||||
|
||||
###### Auto generated by spf13/cobra at 2015-09-10 18:53:03.156927042 +0000 UTC
|
||||
|
||||
|
||||
|
||||
<!-- BEGIN MUNGE: IS_VERSIONED -->
|
||||
<!-- TAG IS_VERSIONED -->
|
||||
<!-- END MUNGE: IS_VERSIONED -->
|
||||
|
||||
|
||||
<!-- BEGIN MUNGE: GENERATED_ANALYTICS -->
|
||||
[]()
|
||||
<!-- END MUNGE: GENERATED_ANALYTICS -->
|
||||
|
|
@ -0,0 +1,110 @@
|
|||
---
|
||||
layout: docwithnav
|
||||
title: "kubectl replace"
|
||||
---
|
||||
<!-- BEGIN MUNGE: UNVERSIONED_WARNING -->
|
||||
|
||||
|
||||
<!-- END MUNGE: UNVERSIONED_WARNING -->
|
||||
|
||||
## kubectl replace
|
||||
|
||||
Replace a resource by filename or stdin.
|
||||
|
||||
### Synopsis
|
||||
|
||||
|
||||
Replace a resource by filename or stdin.
|
||||
|
||||
JSON and YAML formats are accepted. If replacing an existing resource, the
|
||||
complete resource spec must be provided. This can be obtained by
|
||||
$ kubectl get TYPE NAME -o yaml
|
||||
|
||||
Please refer to the models in http://kubernetes.io/v1.1/docs/api-reference/v1/definitions.html to find if a field is mutable.
|
||||
|
||||
```
|
||||
{% raw %}
|
||||
kubectl replace -f FILENAME
|
||||
{% endraw %}
|
||||
```
|
||||
|
||||
### Examples
|
||||
|
||||
```
|
||||
{% raw %}
|
||||
# Replace a pod using the data in pod.json.
|
||||
$ kubectl replace -f ./pod.json
|
||||
|
||||
# Replace a pod based on the JSON passed into stdin.
|
||||
$ cat pod.json | kubectl replace -f -
|
||||
|
||||
# Update a single-container pod's image version (tag) to v4
|
||||
kubectl get pod mypod -o yaml | sed 's/\(image: myimage\):.*$/\1:v4/' | kubectl replace -f -
|
||||
|
||||
# Force replace, delete and then re-create the resource
|
||||
kubectl replace --force -f ./pod.json
|
||||
{% endraw %}
|
||||
```
|
||||
|
||||
### Options
|
||||
|
||||
```
|
||||
{% raw %}
|
||||
--cascade[=false]: Only relevant during a force replace. If true, cascade the deletion of the resources managed by this resource (e.g. Pods created by a ReplicationController).
|
||||
-f, --filename=[]: Filename, directory, or URL to file to use to replace the resource.
|
||||
--force[=false]: Delete and re-create the specified resource
|
||||
--grace-period=-1: Only relevant during a force replace. Period of time in seconds given to the old resource to terminate gracefully. Ignored if negative.
|
||||
-o, --output="": Output mode. Use "-o name" for shorter output (resource/name).
|
||||
--schema-cache-dir="/tmp/kubectl.schema": If non-empty, load/store cached API schemas in this directory, default is '/tmp/kubectl.schema'
|
||||
--timeout=0: Only relevant during a force replace. The length of time to wait before giving up on a delete of the old resource, zero means determine a timeout from the size of the object
|
||||
--validate[=true]: If true, use a schema to validate the input before sending it
|
||||
{% endraw %}
|
||||
```
|
||||
|
||||
### Options inherited from parent commands
|
||||
|
||||
```
|
||||
{% raw %}
|
||||
--alsologtostderr[=false]: log to standard error as well as files
|
||||
--api-version="": The API version to use when talking to the server
|
||||
--certificate-authority="": Path to a cert. file for the certificate authority.
|
||||
--client-certificate="": Path to a client key file for TLS.
|
||||
--client-key="": Path to a client key file for TLS.
|
||||
--cluster="": The name of the kubeconfig cluster to use
|
||||
--context="": The name of the kubeconfig context to use
|
||||
--insecure-skip-tls-verify[=false]: If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure.
|
||||
--kubeconfig="": Path to the kubeconfig file to use for CLI requests.
|
||||
--log-backtrace-at=:0: when logging hits line file:N, emit a stack trace
|
||||
--log-dir="": If non-empty, write log files in this directory
|
||||
--log-flush-frequency=5s: Maximum number of seconds between log flushes
|
||||
--logtostderr[=true]: log to standard error instead of files
|
||||
--match-server-version[=false]: Require server version to match client version
|
||||
--namespace="": If present, the namespace scope for this CLI request.
|
||||
--password="": Password for basic authentication to the API server.
|
||||
-s, --server="": The address and port of the Kubernetes API server
|
||||
--stderrthreshold=2: logs at or above this threshold go to stderr
|
||||
--token="": Bearer token for authentication to the API server.
|
||||
--user="": The name of the kubeconfig user to use
|
||||
--username="": Username for basic authentication to the API server.
|
||||
--v=0: log level for V logs
|
||||
--vmodule=: comma-separated list of pattern=N settings for file-filtered logging
|
||||
{% endraw %}
|
||||
```
|
||||
|
||||
### SEE ALSO
|
||||
|
||||
* [kubectl](kubectl.html) - kubectl controls the Kubernetes cluster manager
|
||||
|
||||
###### Auto generated by spf13/cobra at 2015-09-25 23:39:47.893270992 +0000 UTC
|
||||
|
||||
|
||||
|
||||
<!-- BEGIN MUNGE: IS_VERSIONED -->
|
||||
<!-- TAG IS_VERSIONED -->
|
||||
<!-- END MUNGE: IS_VERSIONED -->
|
||||
|
||||
|
||||
<!-- BEGIN MUNGE: GENERATED_ANALYTICS -->
|
||||
[]()
|
||||
<!-- END MUNGE: GENERATED_ANALYTICS -->
|
||||
|
|
@ -0,0 +1,118 @@
|
|||
---
|
||||
layout: docwithnav
|
||||
title: "kubectl rolling-update"
|
||||
---
|
||||
<!-- BEGIN MUNGE: UNVERSIONED_WARNING -->
|
||||
|
||||
|
||||
<!-- END MUNGE: UNVERSIONED_WARNING -->
|
||||
|
||||
## kubectl rolling-update
|
||||
|
||||
Perform a rolling update of the given ReplicationController.
|
||||
|
||||
### Synopsis
|
||||
|
||||
|
||||
Perform a rolling update of the given ReplicationController.
|
||||
|
||||
Replaces the specified replication controller with a new replication controller by updating one pod at a time to use the
|
||||
new PodTemplate. The new-controller.json must specify the same namespace as the
|
||||
existing replication controller and overwrite at least one (common) label in its replicaSelector.
|
||||
|
||||
```
|
||||
{% raw %}
|
||||
kubectl rolling-update OLD_CONTROLLER_NAME ([NEW_CONTROLLER_NAME] --image=NEW_CONTAINER_IMAGE | -f NEW_CONTROLLER_SPEC)
|
||||
{% endraw %}
|
||||
```
|
||||
|
||||
### Examples
|
||||
|
||||
```
|
||||
{% raw %}
|
||||
# Update pods of frontend-v1 using new replication controller data in frontend-v2.json.
|
||||
$ kubectl rolling-update frontend-v1 -f frontend-v2.json
|
||||
|
||||
# Update pods of frontend-v1 using JSON data passed into stdin.
|
||||
$ cat frontend-v2.json | kubectl rolling-update frontend-v1 -f -
|
||||
|
||||
# Update the pods of frontend-v1 to frontend-v2 by just changing the image, and switching the
|
||||
# name of the replication controller.
|
||||
$ kubectl rolling-update frontend-v1 frontend-v2 --image=image:v2
|
||||
|
||||
# Update the pods of frontend by just changing the image, and keeping the old name
|
||||
$ kubectl rolling-update frontend --image=image:v2
|
||||
|
||||
{% endraw %}
|
||||
```
|
||||
|
||||
### Options
|
||||
|
||||
```
|
||||
{% raw %}
|
||||
--deployment-label-key="deployment": The key to use to differentiate between two different controllers, default 'deployment'. Only relevant when --image is specified, ignored otherwise
|
||||
--dry-run[=false]: If true, print out the changes that would be made, but don't actually make them.
|
||||
-f, --filename=[]: Filename or URL to file to use to create the new replication controller.
|
||||
--image="": Image to use for upgrading the replication controller. Can not be used with --filename/-f
|
||||
--no-headers[=false]: When using the default output, don't print headers.
|
||||
-o, --output="": Output format. One of: json|yaml|wide|name|go-template=...|go-template-file=...|jsonpath=...|jsonpath-file=... See golang template [http://golang.org/pkg/text/template/#pkg-overview] and jsonpath template [http://releases.k8s.io/release-1.1/docs/user-guide/jsonpath.md].
|
||||
--output-version="": Output the formatted object with the given version (default api-version).
|
||||
--poll-interval=3s: Time delay between polling for replication controller status after the update. Valid time units are "ns", "us" (or "µs"), "ms", "s", "m", "h".
|
||||
--rollback[=false]: If true, this is a request to abort an existing rollout that is partially rolled out. It effectively reverses current and next and runs a rollout
|
||||
--schema-cache-dir="/tmp/kubectl.schema": If non-empty, load/store cached API schemas in this directory, default is '/tmp/kubectl.schema'
|
||||
-a, --show-all[=false]: When printing, show all resources (default hide terminated pods.)
|
||||
--sort-by="": If non-empty, sort list types using this field specification. The field specification is expressed as a JSONPath expression (e.g. 'ObjectMeta.Name'). The field in the API resource specified by this JSONPath expression must be an integer or a string.
|
||||
--template="": Template string or path to template file to use when -o=go-template, -o=go-template-file. The template format is golang templates [http://golang.org/pkg/text/template/#pkg-overview].
|
||||
--timeout=5m0s: Max time to wait for a replication controller to update before giving up. Valid time units are "ns", "us" (or "µs"), "ms", "s", "m", "h".
|
||||
--update-period=1m0s: Time to wait between updating pods. Valid time units are "ns", "us" (or "µs"), "ms", "s", "m", "h".
|
||||
--validate[=true]: If true, use a schema to validate the input before sending it
|
||||
{% endraw %}
|
||||
```
|
||||
|
||||
### Options inherited from parent commands
|
||||
|
||||
```
|
||||
{% raw %}
|
||||
--alsologtostderr[=false]: log to standard error as well as files
|
||||
--api-version="": The API version to use when talking to the server
|
||||
--certificate-authority="": Path to a cert. file for the certificate authority.
|
||||
--client-certificate="": Path to a client key file for TLS.
|
||||
--client-key="": Path to a client key file for TLS.
|
||||
--cluster="": The name of the kubeconfig cluster to use
|
||||
--context="": The name of the kubeconfig context to use
|
||||
--insecure-skip-tls-verify[=false]: If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure.
|
||||
--kubeconfig="": Path to the kubeconfig file to use for CLI requests.
|
||||
--log-backtrace-at=:0: when logging hits line file:N, emit a stack trace
|
||||
--log-dir="": If non-empty, write log files in this directory
|
||||
--log-flush-frequency=5s: Maximum number of seconds between log flushes
|
||||
--logtostderr[=true]: log to standard error instead of files
|
||||
--match-server-version[=false]: Require server version to match client version
|
||||
--namespace="": If present, the namespace scope for this CLI request.
|
||||
--password="": Password for basic authentication to the API server.
|
||||
-s, --server="": The address and port of the Kubernetes API server
|
||||
--stderrthreshold=2: logs at or above this threshold go to stderr
|
||||
--token="": Bearer token for authentication to the API server.
|
||||
--user="": The name of the kubeconfig user to use
|
||||
--username="": Username for basic authentication to the API server.
|
||||
--v=0: log level for V logs
|
||||
--vmodule=: comma-separated list of pattern=N settings for file-filtered logging
|
||||
{% endraw %}
|
||||
```
|
||||
|
||||
### SEE ALSO
|
||||
|
||||
* [kubectl](kubectl.html) - kubectl controls the Kubernetes cluster manager
|
||||
|
||||
###### Auto generated by spf13/cobra at 2015-09-25 23:39:47.894124392 +0000 UTC
|
||||
|
||||
|
||||
|
||||
<!-- BEGIN MUNGE: IS_VERSIONED -->
|
||||
<!-- TAG IS_VERSIONED -->
|
||||
<!-- END MUNGE: IS_VERSIONED -->
|
||||
|
||||
|
||||
<!-- BEGIN MUNGE: GENERATED_ANALYTICS -->
|
||||
[]()
|
||||
<!-- END MUNGE: GENERATED_ANALYTICS -->
|
||||
|
|
@ -0,0 +1,135 @@
|
|||
---
|
||||
layout: docwithnav
|
||||
title: "kubectl run"
|
||||
---
|
||||
<!-- BEGIN MUNGE: UNVERSIONED_WARNING -->
|
||||
|
||||
|
||||
<!-- END MUNGE: UNVERSIONED_WARNING -->
|
||||
|
||||
## kubectl run
|
||||
|
||||
Run a particular image on the cluster.
|
||||
|
||||
### Synopsis
|
||||
|
||||
|
||||
Create and run a particular image, possibly replicated.
|
||||
Creates a replication controller to manage the created container(s).
|
||||
|
||||
```
|
||||
{% raw %}
|
||||
kubectl run NAME --image=image [--env="key=value"] [--port=port] [--replicas=replicas] [--dry-run=bool] [--overrides=inline-json]
|
||||
{% endraw %}
|
||||
```
|
||||
|
||||
### Examples
|
||||
|
||||
```
|
||||
{% raw %}
|
||||
# Start a single instance of nginx.
|
||||
$ kubectl run nginx --image=nginx
|
||||
|
||||
# Start a single instance of hazelcast and let the container expose port 5701 .
|
||||
$ kubectl run hazelcast --image=hazelcast --port=5701
|
||||
|
||||
# Start a single instance of hazelcast and set environment variables "DNS_DOMAIN=cluster" and "POD_NAMESPACE=default" in the container.
|
||||
$ kubectl run hazelcast --image=hazelcast --env="DNS_DOMAIN=local" --env="POD_NAMESPACE=default"
|
||||
|
||||
# Start a replicated instance of nginx.
|
||||
$ kubectl run nginx --image=nginx --replicas=5
|
||||
|
||||
# Dry run. Print the corresponding API objects without creating them.
|
||||
$ kubectl run nginx --image=nginx --dry-run
|
||||
|
||||
# Start a single instance of nginx, but overload the spec of the replication controller with a partial set of values parsed from JSON.
|
||||
$ kubectl run nginx --image=nginx --overrides='{ "apiVersion": "v1", "spec": { ... } }'
|
||||
|
||||
# Start a single instance of nginx and keep it in the foreground, don't restart it if it exits.
|
||||
$ kubectl run -i -tty nginx --image=nginx --restart=Never
|
||||
|
||||
# Start the nginx container using the default command, but use custom arguments (arg1 .. argN) for that command.
|
||||
$ kubectl run nginx --image=nginx -- <arg1> <arg2> ... <argN>
|
||||
|
||||
# Start the nginx container using a different command and custom arguments
|
||||
$ kubectl run nginx --image=nginx --command -- <cmd> <arg1> ... <argN>
|
||||
{% endraw %}
|
||||
```
|
||||
|
||||
### Options
|
||||
|
||||
```
|
||||
{% raw %}
|
||||
--attach[=false]: If true, wait for the Pod to start running, and then attach to the Pod as if 'kubectl attach ...' were called. Default false, unless '-i/--interactive' is set, in which case the default is true.
|
||||
--command[=false]: If true and extra arguments are present, use them as the 'command' field in the container, rather than the 'args' field which is the default.
|
||||
--dry-run[=false]: If true, only print the object that would be sent, without sending it.
|
||||
--env=[]: Environment variables to set in the container
|
||||
--generator="": The name of the API generator to use. Default is 'run/v1' if --restart=Always, otherwise the default is 'run-pod/v1'.
|
||||
--hostport=-1: The host port mapping for the container port. To demonstrate a single-machine container.
|
||||
--image="": The image for the container to run.
|
||||
-l, --labels="": Labels to apply to the pod(s).
|
||||
--leave-stdin-open[=false]: If the pod is started in interactive mode or with stdin, leave stdin open after the first attach completes. By default, stdin will be closed after the first attach completes.
|
||||
--limits="": The resource requirement limits for this container. For example, 'cpu=200m,memory=512Mi'
|
||||
--no-headers[=false]: When using the default output, don't print headers.
|
||||
-o, --output="": Output format. One of: json|yaml|wide|name|go-template=...|go-template-file=...|jsonpath=...|jsonpath-file=... See golang template [http://golang.org/pkg/text/template/#pkg-overview] and jsonpath template [http://releases.k8s.io/release-1.1/docs/user-guide/jsonpath.md].
|
||||
--output-version="": Output the formatted object with the given version (default api-version).
|
||||
--overrides="": An inline JSON override for the generated object. If this is non-empty, it is used to override the generated object. Requires that the object supply a valid apiVersion field.
|
||||
--port=-1: The port that this container exposes.
|
||||
-r, --replicas=1: Number of replicas to create for this container. Default is 1.
|
||||
--requests="": The resource requirement requests for this container. For example, 'cpu=100m,memory=256Mi'
|
||||
--restart="Always": The restart policy for this Pod. Legal values [Always, OnFailure, Never]. If set to 'Always' a replication controller is created for this pod, if set to OnFailure or Never, only the Pod is created and --replicas must be 1. Default 'Always'
|
||||
-a, --show-all[=false]: When printing, show all resources (default hide terminated pods.)
|
||||
--sort-by="": If non-empty, sort list types using this field specification. The field specification is expressed as a JSONPath expression (e.g. 'ObjectMeta.Name'). The field in the API resource specified by this JSONPath expression must be an integer or a string.
|
||||
-i, --stdin[=false]: Keep stdin open on the container(s) in the pod, even if nothing is attached.
|
||||
--template="": Template string or path to template file to use when -o=go-template, -o=go-template-file. The template format is golang templates [http://golang.org/pkg/text/template/#pkg-overview].
|
||||
--tty[=false]: Allocated a TTY for each container in the pod. Because -t is currently shorthand for --template, -t is not supported for --tty. This shorthand is deprecated and we expect to adopt -t for --tty soon.
|
||||
{% endraw %}
|
||||
```
|
||||
|
||||
### Options inherited from parent commands
|
||||
|
||||
```
|
||||
{% raw %}
|
||||
--alsologtostderr[=false]: log to standard error as well as files
|
||||
--api-version="": The API version to use when talking to the server
|
||||
--certificate-authority="": Path to a cert. file for the certificate authority.
|
||||
--client-certificate="": Path to a client key file for TLS.
|
||||
--client-key="": Path to a client key file for TLS.
|
||||
--cluster="": The name of the kubeconfig cluster to use
|
||||
--context="": The name of the kubeconfig context to use
|
||||
--insecure-skip-tls-verify[=false]: If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure.
|
||||
--kubeconfig="": Path to the kubeconfig file to use for CLI requests.
|
||||
--log-backtrace-at=:0: when logging hits line file:N, emit a stack trace
|
||||
--log-dir="": If non-empty, write log files in this directory
|
||||
--log-flush-frequency=5s: Maximum number of seconds between log flushes
|
||||
--logtostderr[=true]: log to standard error instead of files
|
||||
--match-server-version[=false]: Require server version to match client version
|
||||
--namespace="": If present, the namespace scope for this CLI request.
|
||||
--password="": Password for basic authentication to the API server.
|
||||
-s, --server="": The address and port of the Kubernetes API server
|
||||
--stderrthreshold=2: logs at or above this threshold go to stderr
|
||||
--token="": Bearer token for authentication to the API server.
|
||||
--user="": The name of the kubeconfig user to use
|
||||
--username="": Username for basic authentication to the API server.
|
||||
--v=0: log level for V logs
|
||||
--vmodule=: comma-separated list of pattern=N settings for file-filtered logging
|
||||
{% endraw %}
|
||||
```
|
||||
|
||||
### SEE ALSO
|
||||
|
||||
* [kubectl](kubectl.html) - kubectl controls the Kubernetes cluster manager
|
||||
|
||||
###### Auto generated by spf13/cobra at 2015-09-25 23:39:47.894967733 +0000 UTC
|
||||
|
||||
|
||||
|
||||
<!-- BEGIN MUNGE: IS_VERSIONED -->
|
||||
<!-- TAG IS_VERSIONED -->
|
||||
<!-- END MUNGE: IS_VERSIONED -->
|
||||
|
||||
|
||||
<!-- BEGIN MUNGE: GENERATED_ANALYTICS -->
|
||||
[]()
|
||||
<!-- END MUNGE: GENERATED_ANALYTICS -->
|
||||
|
|
@ -0,0 +1,107 @@
|
|||
---
|
||||
layout: docwithnav
|
||||
title: "kubectl scale"
|
||||
---
|
||||
<!-- BEGIN MUNGE: UNVERSIONED_WARNING -->
|
||||
|
||||
|
||||
<!-- END MUNGE: UNVERSIONED_WARNING -->
|
||||
|
||||
## kubectl scale
|
||||
|
||||
Set a new size for a Replication Controller.
|
||||
|
||||
### Synopsis
|
||||
|
||||
|
||||
Set a new size for a Replication Controller.
|
||||
|
||||
Scale also allows users to specify one or more preconditions for the scale action.
|
||||
If --current-replicas or --resource-version is specified, it is validated before the
|
||||
scale is attempted, and it is guaranteed that the precondition holds true when the
|
||||
scale is sent to the server.
|
||||
|
||||
```
|
||||
{% raw %}
|
||||
kubectl scale [--resource-version=version] [--current-replicas=count] --replicas=COUNT (-f FILENAME | TYPE NAME)
|
||||
{% endraw %}
|
||||
```
|
||||
|
||||
### Examples
|
||||
|
||||
```
|
||||
{% raw %}
|
||||
# Scale replication controller named 'foo' to 3.
|
||||
$ kubectl scale --replicas=3 replicationcontrollers foo
|
||||
|
||||
# Scale a replication controller identified by type and name specified in "foo-controller.yaml" to 3.
|
||||
$ kubectl scale --replicas=3 -f foo-controller.yaml
|
||||
|
||||
# If the replication controller named foo's current size is 2, scale foo to 3.
|
||||
$ kubectl scale --current-replicas=2 --replicas=3 replicationcontrollers foo
|
||||
|
||||
# Scale multiple replication controllers.
|
||||
$ kubectl scale --replicas=5 rc/foo rc/bar
|
||||
{% endraw %}
|
||||
```
|
||||
|
||||
### Options
|
||||
|
||||
```
|
||||
{% raw %}
|
||||
--current-replicas=-1: Precondition for current size. Requires that the current size of the replication controller match this value in order to scale.
|
||||
-f, --filename=[]: Filename, directory, or URL to a file identifying the replication controller to set a new size
|
||||
-o, --output="": Output mode. Use "-o name" for shorter output (resource/name).
|
||||
--replicas=-1: The new desired number of replicas. Required.
|
||||
--resource-version="": Precondition for resource version. Requires that the current resource version match this value in order to scale.
|
||||
--timeout=0: The length of time to wait before giving up on a scale operation, zero means don't wait.
|
||||
{% endraw %}
|
||||
```
|
||||
|
||||
### Options inherited from parent commands
|
||||
|
||||
```
|
||||
{% raw %}
|
||||
--alsologtostderr[=false]: log to standard error as well as files
|
||||
--api-version="": The API version to use when talking to the server
|
||||
--certificate-authority="": Path to a cert. file for the certificate authority.
|
||||
--client-certificate="": Path to a client key file for TLS.
|
||||
--client-key="": Path to a client key file for TLS.
|
||||
--cluster="": The name of the kubeconfig cluster to use
|
||||
--context="": The name of the kubeconfig context to use
|
||||
--insecure-skip-tls-verify[=false]: If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure.
|
||||
--kubeconfig="": Path to the kubeconfig file to use for CLI requests.
|
||||
--log-backtrace-at=:0: when logging hits line file:N, emit a stack trace
|
||||
--log-dir="": If non-empty, write log files in this directory
|
||||
--log-flush-frequency=5s: Maximum number of seconds between log flushes
|
||||
--logtostderr[=true]: log to standard error instead of files
|
||||
--match-server-version[=false]: Require server version to match client version
|
||||
--namespace="": If present, the namespace scope for this CLI request.
|
||||
--password="": Password for basic authentication to the API server.
|
||||
-s, --server="": The address and port of the Kubernetes API server
|
||||
--stderrthreshold=2: logs at or above this threshold go to stderr
|
||||
--token="": Bearer token for authentication to the API server.
|
||||
--user="": The name of the kubeconfig user to use
|
||||
--username="": Username for basic authentication to the API server.
|
||||
--v=0: log level for V logs
|
||||
--vmodule=: comma-separated list of pattern=N settings for file-filtered logging
|
||||
{% endraw %}
|
||||
```
|
||||
|
||||
### SEE ALSO
|
||||
|
||||
* [kubectl](kubectl.html) - kubectl controls the Kubernetes cluster manager
|
||||
|
||||
###### Auto generated by spf13/cobra at 2015-09-10 18:53:03.155304524 +0000 UTC
|
||||
|
||||
|
||||
|
||||
<!-- BEGIN MUNGE: IS_VERSIONED -->
|
||||
<!-- TAG IS_VERSIONED -->
|
||||
<!-- END MUNGE: IS_VERSIONED -->
|
||||
|
||||
|
||||
<!-- BEGIN MUNGE: GENERATED_ANALYTICS -->
|
||||
[]()
|
||||
<!-- END MUNGE: GENERATED_ANALYTICS -->
|
||||
|
|
@ -0,0 +1,109 @@
|
|||
---
|
||||
layout: docwithnav
|
||||
title: "kubectl stop"
|
||||
---
|
||||
<!-- BEGIN MUNGE: UNVERSIONED_WARNING -->
|
||||
|
||||
|
||||
<!-- END MUNGE: UNVERSIONED_WARNING -->
|
||||
|
||||
## kubectl stop
|
||||
|
||||
Deprecated: Gracefully shut down a resource by name or filename.
|
||||
|
||||
### Synopsis
|
||||
|
||||
|
||||
Deprecated: Gracefully shut down a resource by name or filename.
|
||||
|
||||
The stop command is deprecated, all its functionalities are covered by delete command.
|
||||
See 'kubectl delete --help' for more details.
|
||||
|
||||
Attempts to shut down and delete a resource that supports graceful termination.
|
||||
If the resource is scalable it will be scaled to 0 before deletion.
|
||||
|
||||
```
|
||||
{% raw %}
|
||||
kubectl stop (-f FILENAME | TYPE (NAME | -l label | --all))
|
||||
{% endraw %}
|
||||
```
|
||||
|
||||
### Examples
|
||||
|
||||
```
|
||||
{% raw %}
|
||||
# Shut down foo.
|
||||
$ kubectl stop replicationcontroller foo
|
||||
|
||||
# Stop pods and services with label name=myLabel.
|
||||
$ kubectl stop pods,services -l name=myLabel
|
||||
|
||||
# Shut down the service defined in service.json
|
||||
$ kubectl stop -f service.json
|
||||
|
||||
# Shut down all resources in the path/to/resources directory
|
||||
$ kubectl stop -f path/to/resources
|
||||
{% endraw %}
|
||||
```
|
||||
|
||||
### Options
|
||||
|
||||
```
|
||||
{% raw %}
|
||||
--all[=false]: [-all] to select all the specified resources.
|
||||
-f, --filename=[]: Filename, directory, or URL to file of resource(s) to be stopped.
|
||||
--grace-period=-1: Period of time in seconds given to the resource to terminate gracefully. Ignored if negative.
|
||||
--ignore-not-found[=false]: Treat "resource not found" as a successful stop.
|
||||
-o, --output="": Output mode. Use "-o name" for shorter output (resource/name).
|
||||
-l, --selector="": Selector (label query) to filter on.
|
||||
--timeout=0: The length of time to wait before giving up on a delete, zero means determine a timeout from the size of the object
|
||||
{% endraw %}
|
||||
```
|
||||
|
||||
### Options inherited from parent commands
|
||||
|
||||
```
|
||||
{% raw %}
|
||||
--alsologtostderr[=false]: log to standard error as well as files
|
||||
--api-version="": The API version to use when talking to the server
|
||||
--certificate-authority="": Path to a cert. file for the certificate authority.
|
||||
--client-certificate="": Path to a client key file for TLS.
|
||||
--client-key="": Path to a client key file for TLS.
|
||||
--cluster="": The name of the kubeconfig cluster to use
|
||||
--context="": The name of the kubeconfig context to use
|
||||
--insecure-skip-tls-verify[=false]: If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure.
|
||||
--kubeconfig="": Path to the kubeconfig file to use for CLI requests.
|
||||
--log-backtrace-at=:0: when logging hits line file:N, emit a stack trace
|
||||
--log-dir="": If non-empty, write log files in this directory
|
||||
--log-flush-frequency=5s: Maximum number of seconds between log flushes
|
||||
--logtostderr[=true]: log to standard error instead of files
|
||||
--match-server-version[=false]: Require server version to match client version
|
||||
--namespace="": If present, the namespace scope for this CLI request.
|
||||
--password="": Password for basic authentication to the API server.
|
||||
-s, --server="": The address and port of the Kubernetes API server
|
||||
--stderrthreshold=2: logs at or above this threshold go to stderr
|
||||
--token="": Bearer token for authentication to the API server.
|
||||
--user="": The name of the kubeconfig user to use
|
||||
--username="": Username for basic authentication to the API server.
|
||||
--v=0: log level for V logs
|
||||
--vmodule=: comma-separated list of pattern=N settings for file-filtered logging
|
||||
{% endraw %}
|
||||
```
|
||||
|
||||
### SEE ALSO
|
||||
|
||||
* [kubectl](kubectl.html) - kubectl controls the Kubernetes cluster manager
|
||||
|
||||
###### Auto generated by spf13/cobra at 2015-09-10 18:53:03.158360787 +0000 UTC
|
||||
|
||||
|
||||
|
||||
<!-- BEGIN MUNGE: IS_VERSIONED -->
|
||||
<!-- TAG IS_VERSIONED -->
|
||||
<!-- END MUNGE: IS_VERSIONED -->
|
||||
|
||||
|
||||
<!-- BEGIN MUNGE: GENERATED_ANALYTICS -->
|
||||
[]()
|
||||
<!-- END MUNGE: GENERATED_ANALYTICS -->
|
||||
|
|
@ -0,0 +1,79 @@
|
|||
---
|
||||
layout: docwithnav
|
||||
title: "kubectl version"
|
||||
---
|
||||
<!-- BEGIN MUNGE: UNVERSIONED_WARNING -->
|
||||
|
||||
|
||||
<!-- END MUNGE: UNVERSIONED_WARNING -->
|
||||
|
||||
## kubectl version
|
||||
|
||||
Print the client and server version information.
|
||||
|
||||
### Synopsis
|
||||
|
||||
|
||||
Print the client and server version information.
|
||||
|
||||
```
|
||||
{% raw %}
|
||||
kubectl version
|
||||
{% endraw %}
|
||||
```
|
||||
|
||||
### Options
|
||||
|
||||
```
|
||||
{% raw %}
|
||||
-c, --client[=false]: Client version only (no server required).
|
||||
{% endraw %}
|
||||
```
|
||||
|
||||
### Options inherited from parent commands
|
||||
|
||||
```
|
||||
{% raw %}
|
||||
--alsologtostderr[=false]: log to standard error as well as files
|
||||
--api-version="": The API version to use when talking to the server
|
||||
--certificate-authority="": Path to a cert. file for the certificate authority.
|
||||
--client-certificate="": Path to a client key file for TLS.
|
||||
--client-key="": Path to a client key file for TLS.
|
||||
--cluster="": The name of the kubeconfig cluster to use
|
||||
--context="": The name of the kubeconfig context to use
|
||||
--insecure-skip-tls-verify[=false]: If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure.
|
||||
--kubeconfig="": Path to the kubeconfig file to use for CLI requests.
|
||||
--log-backtrace-at=:0: when logging hits line file:N, emit a stack trace
|
||||
--log-dir="": If non-empty, write log files in this directory
|
||||
--log-flush-frequency=5s: Maximum number of seconds between log flushes
|
||||
--logtostderr[=true]: log to standard error instead of files
|
||||
--match-server-version[=false]: Require server version to match client version
|
||||
--namespace="": If present, the namespace scope for this CLI request.
|
||||
--password="": Password for basic authentication to the API server.
|
||||
-s, --server="": The address and port of the Kubernetes API server
|
||||
--stderrthreshold=2: logs at or above this threshold go to stderr
|
||||
--token="": Bearer token for authentication to the API server.
|
||||
--user="": The name of the kubeconfig user to use
|
||||
--username="": Username for basic authentication to the API server.
|
||||
--v=0: log level for V logs
|
||||
--vmodule=: comma-separated list of pattern=N settings for file-filtered logging
|
||||
{% endraw %}
|
||||
```
|
||||
|
||||
### SEE ALSO
|
||||
|
||||
* [kubectl](kubectl.html) - kubectl controls the Kubernetes cluster manager
|
||||
|
||||
###### Auto generated by spf13/cobra at 2015-09-10 18:53:03.164581808 +0000 UTC
|
||||
|
||||
|
||||
|
||||
<!-- BEGIN MUNGE: IS_VERSIONED -->
|
||||
<!-- TAG IS_VERSIONED -->
|
||||
<!-- END MUNGE: IS_VERSIONED -->
|
||||
|
||||
|
||||
<!-- BEGIN MUNGE: GENERATED_ANALYTICS -->
|
||||
[]()
|
||||
<!-- END MUNGE: GENERATED_ANALYTICS -->
|
||||
|
Loading…
Reference in New Issue