Merge branch 'master' of https://github.com/kubernetes/kubernetes.github.io into release-1.6
* 'master' of https://github.com/kubernetes/kubernetes.github.io: (58 commits) fix unsupported parameter of wget command Update Weave Net's title. Update garbage-collection.md Correct ASM logo filename fixed ASM blurb Repair Spotinst logo Doc formatting update fixing typo Minor spelling correction -- "rtk" to "rkt" update init-containers.md Update local instructions to new method. Update ubuntu install instructions. Highlighted command for `kubectl proxy`. Update Tasks landing page. (#2697) Deprecate Guide topics: quick-start ... (#2696) Deprecate Guide topic: Secrets Walkthrough. (#2695) Update multiple-schedulers doc (#2063) remove extra space Reimplement PR #2525 Deprecate Guide topic: Persistent Volume Walkthrough. (#2692) ...pull/2618/head
|
@ -33,6 +33,10 @@ toc:
|
|||
section:
|
||||
- docs/concepts/workloads/pods/pod-lifecycle.md
|
||||
|
||||
- title: Clusters
|
||||
section:
|
||||
- docs/concepts/clusters/logging.md
|
||||
|
||||
- title: Configuration
|
||||
section:
|
||||
- docs/concepts/configuration/container-command-args.md
|
||||
|
|
|
@ -18,14 +18,11 @@ toc:
|
|||
|
||||
- title: Workload Deployment and Management
|
||||
section:
|
||||
- docs/user-guide/quick-start.md
|
||||
- docs/user-guide/deploying-applications.md
|
||||
- docs/user-guide/managing-deployments.md
|
||||
- docs/user-guide/replication-controller/operations.md
|
||||
- docs/user-guide/resizing-a-replication-controller.md
|
||||
- docs/user-guide/rolling-updates.md
|
||||
- docs/user-guide/update-demo/index.md
|
||||
- docs/user-guide/secrets/walkthrough.md
|
||||
- docs/user-guide/configmap/index.md
|
||||
- docs/user-guide/projected-volume/index.md
|
||||
- docs/user-guide/horizontal-pod-autoscaling/walkthrough.md
|
||||
|
@ -57,13 +54,9 @@ toc:
|
|||
|
||||
- title: Containers and Pods
|
||||
section:
|
||||
- docs/user-guide/pods/single-container.md
|
||||
- docs/user-guide/pods/multi-container.md
|
||||
- docs/user-guide/pods/init-container.md
|
||||
- docs/user-guide/configuring-containers.md
|
||||
- docs/user-guide/pod-templates.md
|
||||
- docs/user-guide/production-pods.md
|
||||
- docs/user-guide/containers.md
|
||||
- docs/user-guide/environment-guide/index.md
|
||||
- docs/user-guide/compute-resources.md
|
||||
- docs/user-guide/pod-states.md
|
||||
|
@ -72,7 +65,6 @@ toc:
|
|||
- docs/user-guide/node-selection/index.md
|
||||
- docs/user-guide/downward-api/index.md
|
||||
- docs/user-guide/downward-api/volume/index.md
|
||||
- docs/user-guide/persistent-volumes/walkthrough.md
|
||||
- docs/user-guide/petset/bootstrapping/index.md
|
||||
|
||||
- title: Monitoring, Logging, and Debugging Containers
|
||||
|
|
|
@ -29,9 +29,12 @@ toc:
|
|||
- docs/tasks/access-application-cluster/port-forward-access-application-cluster.md
|
||||
- docs/tasks/access-application-cluster/load-balance-access-application-cluster.md
|
||||
|
||||
- title: Debugging Applications in a Cluster
|
||||
- title: Monitoring, Logging, and Debugging
|
||||
section:
|
||||
- docs/tasks/debug-application-cluster/determine-reason-pod-failure.md
|
||||
- docs/tasks/debug-application-cluster/debug-init-containers.md
|
||||
- docs/tasks/debug-application-cluster/logging-stackdriver.md
|
||||
- docs/tasks/debug-application-cluster/logging-elasticsearch-kibana.md
|
||||
|
||||
- title: Accessing the Kubernetes API
|
||||
section:
|
||||
|
@ -51,8 +54,3 @@ toc:
|
|||
- docs/tasks/manage-stateful-set/deleting-a-statefulset.md
|
||||
- docs/tasks/manage-stateful-set/debugging-a-statefulset.md
|
||||
- docs/tasks/manage-stateful-set/delete-pods.md
|
||||
|
||||
- title: Troubleshooting
|
||||
section:
|
||||
- docs/tasks/troubleshoot/debug-init-containers.md
|
||||
- docs/tasks/administer-cluster/access-control-identity-management/
|
||||
|
|
|
@ -279,14 +279,120 @@
|
|||
logo: 'harbur',
|
||||
link: 'https://harbur.io/',
|
||||
blurb: 'Based in Barcelona, Harbur is a consulting firm that helps companies deploy self-healing solutions empowered by Container technologies'
|
||||
},
|
||||
},
|
||||
{
|
||||
type: 1,
|
||||
name: 'Endocode',
|
||||
logo: 'endocode',
|
||||
link: 'https://endocode.com/kubernetes/',
|
||||
blurb: 'Endocode practices and teaches the open source way. Kernel to cluster - Dev to Ops. We offer Kubernetes trainings, services and support.'
|
||||
}
|
||||
},
|
||||
{
|
||||
type: 0,
|
||||
name: 'Spotinst',
|
||||
logo: 'spotinst',
|
||||
link: 'http://blog.spotinst.com/2016/08/04/elastigroup-kubernetes-minions-steroids/',
|
||||
blurb: 'Spotinst uses a prediction algorithm in the Amazon EC2 Spot allowing k8s clusters to increase performance and lower the infrastructure costs'
|
||||
},
|
||||
{
|
||||
type: 1,
|
||||
name: 'inwinSTACK',
|
||||
logo: 'inwinstack',
|
||||
link: 'http://www.inwinstack.com/index.php/en/solutions-en/',
|
||||
blurb: 'Our container service leverages OpenStack-based infrastructure and its container orchestration engine Magnum to manage Kubernetes clusters.'
|
||||
},
|
||||
{
|
||||
type: 1,
|
||||
name: 'Semantix',
|
||||
logo: 'semantix',
|
||||
link: 'http://www.semantix.com.br/',
|
||||
blurb: 'Semantix is a company that works with data analytics and distributed systems. Kubernetes is used to orchestrate services for our customers.'
|
||||
},
|
||||
{
|
||||
type: 0,
|
||||
name: 'ASM Technologies Limited',
|
||||
logo: 'asm',
|
||||
link: 'http://www.asmtech.com/',
|
||||
blurb: 'Our technology supply chain portfolio enables your software products to be accessible, viable and available more effectively.'
|
||||
},
|
||||
{
|
||||
type: 1,
|
||||
name: 'InfraCloud Technologies',
|
||||
logo: 'infracloud',
|
||||
link: 'http://blog.infracloud.io/state-of-kubernetes/',
|
||||
blurb: 'InfraCloud Technologies is software consultancy which provides services in Containers, Cloud and DevOps.'
|
||||
},
|
||||
{
|
||||
type: 0,
|
||||
name: 'SignalFx',
|
||||
logo: 'signalfx',
|
||||
link: 'https://github.com/signalfx/integrations/tree/master/kubernetes',
|
||||
blurb: 'Gain real-time visibility across metrics & the most intelligent alerts for todays architectures, including deep integration with Kubernetes'
|
||||
},
|
||||
{
|
||||
type: 0,
|
||||
name: 'NATS',
|
||||
logo: 'nats',
|
||||
link: 'https://github.com/pires/kubernetes-nats-cluster',
|
||||
blurb: 'NATS is a simple, secure, and scalable cloud native messaging system.'
|
||||
},
|
||||
{
|
||||
type: 1,
|
||||
name: 'RX-M',
|
||||
logo: 'rxm',
|
||||
link: 'http://rx-m.com/training/kubernetes-training/',
|
||||
blurb: 'Market neutral Kubernetes Dev, DevOps and Production training and consulting services'
|
||||
},
|
||||
{
|
||||
type: 1,
|
||||
name: 'Emerging Technology Advisors',
|
||||
logo: 'eta',
|
||||
link: 'https://www.emergingtechnologyadvisors.com/services/kubernetes.html',
|
||||
blurb: 'ETA helps companies architect, implement, and manage scalable applications using Kubernetes on on public or private cloud.'
|
||||
},
|
||||
{
|
||||
type: 0,
|
||||
name: 'CloudPlex.io',
|
||||
logo: 'cloudplex',
|
||||
link: 'http://www.cloudplex.io',
|
||||
blurb: 'CloudPlex enables operations teams to visually deploy, orchestrate, manage, and monitor infrastructure, applications, and services in public or private cloud.'
|
||||
},
|
||||
{
|
||||
type: 1,
|
||||
name: 'Kumina',
|
||||
logo: 'kumina',
|
||||
link: 'https://www.kumina.nl/managed_kubernetes',
|
||||
blurb: 'Kumina creates Kubernetes solutions on your choice of infrastructure with around-the-clock management and unlimited support.'
|
||||
},
|
||||
{
|
||||
type: 0,
|
||||
name: 'CA Technologies',
|
||||
logo: 'ca',
|
||||
link: 'https://www.ca.com/us/products/application-deployment.html',
|
||||
blurb: 'The RA CDE Kubernetes plugin enables an automated process for pushing changes to production by applying standard Kubernetes YAML files'
|
||||
},
|
||||
{
|
||||
type: 0,
|
||||
name: 'CoScale',
|
||||
logo: 'coscale',
|
||||
link: 'http://www.coscale.com/blog/how-to-monitor-your-kubernetes-cluster',
|
||||
blurb: 'Full stack monitoring of containers and microservices orchestrated by Kubernetes. Powered by anomaly detection to find problems faster.'
|
||||
},
|
||||
{
|
||||
type: 0,
|
||||
name: 'Supergiant.io',
|
||||
logo: 'supergiant',
|
||||
link: 'https://supergiant.io/blog/supergiant-packing-algorithm-unique-save-money',
|
||||
blurb: 'Supergiant autoscales hardware for Kubernetes. Open-source, it makes HA, distributed, stateful apps easy to deploy, manage, and scale.'
|
||||
},
|
||||
{
|
||||
type: 0,
|
||||
name: 'Avi Networks',
|
||||
logo: 'avinetworks',
|
||||
link: 'https://kb.avinetworks.com/avi-vantage-openshift-installation-guide/',
|
||||
blurb: 'Avis elastic application services fabric provides scalable, feature rich & integrated L4-7 networking for K8S environments.'
|
||||
}
|
||||
|
||||
]
|
||||
|
||||
var isvContainer = document.getElementById('isvContainer')
|
||||
|
|
|
@ -33,7 +33,7 @@
|
|||
<li><a href="/docs/troubleshooting/" {% if toc.bigheader == "Support" %}class="YAH"{% endif %}>SUPPORT</a></li>
|
||||
</ul>
|
||||
<div id="searchBox">
|
||||
<input type="text" id="search" placeholder="Search" onkeydown="if (event.keyCode==13) window.location.replace('/docs/search/?q=' + this.value)">
|
||||
<input type="text" id="search" placeholder="Search" onkeydown="if (event.keyCode==13) window.location.replace('/docs/search/?q=' + this.value)" autofocus="autofocus">
|
||||
</div>
|
||||
</div>
|
||||
</section>
|
||||
|
|
|
@ -94,6 +94,6 @@ cid: caseStudies
|
|||
|
||||
<div id="videoPlayer">
|
||||
<!--<iframe data-url="https://www.youtube.com/watch?v=B0_5Nms8sD0" frameborder="0" allowfullscreen></iframe>-->
|
||||
<iframe data-url="https://www.youtube.com/embed/4gyeixJLabo?autoplay=1" frameborder="0" allowfullscreen></iframe>
|
||||
<iframe data-url="https://www.youtube.com/embed/4gyeixJLabo?autoplay=1" frameborder="0" allowfullscreen="true"></iframe>
|
||||
<button id="closeButton"></button>
|
||||
</div>
|
||||
|
|
|
@ -87,7 +87,7 @@ The ImagePolicyWebhook plug-in allows a backend webhook to make admission decisi
|
|||
```
|
||||
|
||||
#### Configuration File Format
|
||||
ImagePolicyWebhook uses the admission controller config file (`--admission-controller-config-file`) to set configuration options for the behavior of the backend. This file may be json or yaml and has the following format:
|
||||
ImagePolicyWebhook uses the admission config file `--admission-controller-config-file` to set configuration options for the behavior of the backend. This file may be json or yaml and has the following format:
|
||||
|
||||
```javascript
|
||||
{
|
||||
|
|
|
@ -85,15 +85,25 @@ properties:
|
|||
- Subject-matching properties:
|
||||
- `user`, type string; the user-string from `--token-auth-file`. If you specify `user`, it must match the username of the authenticated user.
|
||||
- `group`, type string; if you specify `group`, it must match one of the groups of the authenticated user. `system:authenticated` matches all authenticated requests. `system:unauthenticated` matches all unauthenticated requests.
|
||||
- `readonly`, type boolean, when true, means that the policy only applies to get, list, and watch operations.
|
||||
- Resource-matching properties:
|
||||
- `apiGroup`, type string; an API group, such as `extensions`. `*` matches all API groups.
|
||||
- `namespace`, type string; a namespace string. `*` matches all resource requests.
|
||||
- `resource`, type string; a resource, such as `pods`. `*` matches all resource requests.
|
||||
- `apiGroup`, type string; an API group.
|
||||
- Ex: `extensions`
|
||||
- Wildard: `*` matches all API groups.
|
||||
- `namespace`, type string; a namespace.
|
||||
- Ex: `kube-system`
|
||||
- Wildard: `*` matches all resource requests.
|
||||
- `resource`, type string; a resource type
|
||||
- Ex: `pods`
|
||||
- Wildcard: `*` matches all resource requests.
|
||||
- Non-resource-matching properties:
|
||||
- `nonResourcePath`, type string; matches the non-resource request paths (like `/version` and `/apis`). `*` matches all non-resource requests. `/foo/*` matches `/foo/` and all of its subpaths.
|
||||
- `nonResourcePath`, type string; non-resource request paths.
|
||||
- Ex: `/version` or `/apis`
|
||||
- Wildcard:
|
||||
- `*` matches all non-resource requests.
|
||||
- `/foo/*` matches `/foo/` and all of its subpaths.
|
||||
- `readonly`, type boolean, when true, means that the policy only applies to get, list, and watch operations.
|
||||
|
||||
An unset property is the same as a property set to the zero value for its type
|
||||
**NOTES:** An unset property is the same as a property set to the zero value for its type
|
||||
(e.g. empty string, 0, false). However, unset should be preferred for
|
||||
readability.
|
||||
|
||||
|
|
|
@ -15,7 +15,7 @@ cluster (e.g., scheduling), and detecting and responding to cluster events
|
|||
(e.g., starting up a new pod when a replication controller's 'replicas' field is
|
||||
unsatisfied).
|
||||
|
||||
Master components could in theory be run on any node in the cluster. However,
|
||||
In theory, Master components can be run on any node in the cluster. However,
|
||||
for simplicity, current set up scripts typically start all master components on
|
||||
the same VM, and does not run user containers on this VM. See
|
||||
[high-availability.md](/docs/admin/high-availability) for an example multi-master-VM setup.
|
||||
|
|
|
@ -23,7 +23,7 @@ threshold has been met.
|
|||
|
||||
### Container Collection
|
||||
|
||||
The policy for garbage collecting containers considers three user-defined variables. `MinAge` is the minimum age at which a container can be garbage collected. `MaxPerPodContainer` is the maximum number of dead containers any single
|
||||
The policy for garbage collecting containers considers three user-defined variables. `MinAge` is the minimum age at which a container can be garbage collected. `MaxPerPodContainer` is the maximum number of dead containers every single
|
||||
pod (UID, container name) pair is allowed to have. `MaxContainers` is the maximum number of total dead containers. These variables can be individually disabled by setting `MinAge` to zero and setting `MaxPerPodContainer` and `MaxContainers` respectively to less than zero.
|
||||
|
||||
Kubelet will act on containers that are unidentified, deleted, or outside of the boundaries set by the previously mentioned flags. The oldest containers will generally be removed first. `MaxPerPodContainer` and `MaxContainer` may potentially conflict with each other in situations where retaining the maximum number of containers per pod (`MaxPerPodContainer`) would go outside the allowable range of global dead containers (`MaxContainers`). `MaxPerPodContainer` would be adjusted in this situation: A worst case scenario would be to downgrade `MaxPerPodContainer` to 1 and evict the oldest containers. Additionally, containers owned by pods that have been deleted are removed once they are older than `MinAge`.
|
||||
|
@ -42,7 +42,7 @@ to free. Default is 80%.
|
|||
We also allow users to customize garbage collection policy through the following kubelet flags:
|
||||
|
||||
1. `minimum-container-ttl-duration`, minimum age for a finished container before it is
|
||||
garbage collected. Default is 0 minute, which means any finished container will be garbaged collected.
|
||||
garbage collected. Default is 0 minute, which means every finished container will be garbaged collected.
|
||||
2. `maximum-dead-containers-per-container`, maximum number of old instances to retain
|
||||
per container. Default is 1.
|
||||
3. `maximum-dead-containers`, maximum number of old instances of containers to retain globally.
|
||||
|
@ -72,4 +72,4 @@ Including:
|
|||
| `--low-diskspace-threshold-mb` | `--eviction-hard` or `eviction-soft` | eviction generalizes disk thresholds to other resources |
|
||||
| `--outofdisk-transition-frequency` | `--eviction-pressure-transition-period` | eviction generalizes disk pressure transition to other resources |
|
||||
|
||||
See [kubelet eviction design doc](https://github.com/kubernetes/community/blob/master/contributors/design-proposals/kubelet-eviction.md) for more details.
|
||||
See [kubelet eviction design doc](https://github.com/kubernetes/kubernetes.github.io/blob/master/docs/admin/out-of-resource.md) for more details.
|
||||
|
|
|
@ -107,7 +107,7 @@ This operation may be sped up by migrating etcd data directory, as described [he
|
|||
|
||||
## Implementation notes
|
||||
|
||||
![](ha-master-gce.png)
|
||||
![ha-master-gce](/images/docs/ha-master-gce.png)
|
||||
|
||||
### Overview
|
||||
|
||||
|
|
|
@ -34,7 +34,7 @@ resources to create on startup. These are the core components of Kubernetes, and
|
|||
once they are up and running we can use `kubectl` to set up or manage any
|
||||
additional components.
|
||||
|
||||
1. kubeadm installs any add-on components, such as DNS or discovery, via the API
|
||||
1. kubeadm installs some add-on components, such as DNS or discovery, via the API
|
||||
server.
|
||||
|
||||
Running `kubeadm join` on each node in the cluster consists of the following steps:
|
||||
|
@ -273,6 +273,22 @@ These environment variables are a short-term solution, eventually they will be i
|
|||
|
||||
If you want to use kubeadm with an http proxy, you may need to configure it to support http_proxy, https_proxy, or no_proxy.
|
||||
|
||||
For example, if your kube master node IP address is 10.18.17.16 and you have proxy support both http/https on 10.18.17.16 port 8080, you can use the following command:
|
||||
|
||||
You can using following command
|
||||
|
||||
```bash
|
||||
export PROXY_PORT=8080
|
||||
export PROXY_IP=10.18.17.16
|
||||
export http_proxy=http://$PROXY_IP:$PROXY_PORT
|
||||
export HTTP_PROXY=$http_proxy
|
||||
export https_proxy=$http_proxy
|
||||
export HTTPS_PROXY=$http_proxy
|
||||
export no_proxy="localhost,127.0.0.1,localaddress,.localdomain.com,example.com,10.18.17.16"
|
||||
```
|
||||
|
||||
Remember to change ```proxy_ip``` and add a kube master node IP address to ```no_proxy```.
|
||||
|
||||
## Releases and release notes
|
||||
|
||||
If you already have kubeadm installed and want to upgrade, run `apt-get update && apt-get upgrade` or `yum update` to get the latest version of kubeadm.
|
||||
|
|
|
@ -34,7 +34,7 @@ to build the image:
|
|||
|
||||
```docker
|
||||
FROM busybox
|
||||
ADD _output/local/go/bin/kube-scheduler /usr/local/bin/kube-scheduler
|
||||
ADD ./_output/dockerized/bin/linux/amd64/kube-scheduler /usr/local/bin/kube-scheduler
|
||||
```
|
||||
|
||||
Save the file as `Dockerfile`, build the image and push it to a registry. This example
|
||||
|
@ -45,7 +45,7 @@ For more details, please read the GCR
|
|||
|
||||
```shell
|
||||
docker build -t my-kube-scheduler:1.0 .
|
||||
gcloud docker push gcr.io/my-gcp-project/my-kube-scheduler:1.0
|
||||
gcloud docker -- push gcr.io/my-gcp-project/my-kube-scheduler:1.0
|
||||
```
|
||||
|
||||
### 2. Define a Kubernetes Deployment for the scheduler
|
||||
|
@ -131,15 +131,15 @@ scheduler in that pod spec. Let's look at three examples.
|
|||
|
||||
Save this file as `pod3.yaml` and submit it to the Kubernetes cluster.
|
||||
|
||||
```shell
|
||||
kubectl create -f pod3.yaml
|
||||
```
|
||||
```shell
|
||||
kubectl create -f pod3.yaml
|
||||
```
|
||||
|
||||
Verify that all three pods are running.
|
||||
|
||||
```shell
|
||||
kubectl get pods
|
||||
```
|
||||
```shell
|
||||
kubectl get pods
|
||||
```
|
||||
|
||||
### Verifying that the pods were scheduled using the desired schedulers
|
||||
|
||||
|
|
|
@ -16,8 +16,11 @@ spec:
|
|||
version: second
|
||||
spec:
|
||||
containers:
|
||||
- command: [/usr/local/bin/kube-scheduler, --address=0.0.0.0,
|
||||
--scheduler-name=my-scheduler]
|
||||
- command:
|
||||
- /usr/local/bin/kube-scheduler
|
||||
- --address=0.0.0.0
|
||||
- --leader-elect=false
|
||||
- --scheduler-name=my-scheduler
|
||||
image: gcr.io/my-gcp-project/my-kube-scheduler:1.0
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
|
@ -37,4 +40,4 @@ spec:
|
|||
volumeMounts: []
|
||||
hostNetwork: false
|
||||
hostPID: false
|
||||
volumes: []
|
||||
volumes: []
|
||||
|
|
|
@ -67,7 +67,7 @@ have some advantages for start-up related code:
|
|||
`sed`, `awk`, `python`, or `dig` during setup.
|
||||
* The application image builder and deployer roles can work independently without
|
||||
the need to jointly build a single app image.
|
||||
* They use Linux namespaces so they have a different filesystem view from app Containers.
|
||||
* They use Linux namespaces so that they have a different filesystem view from app Containers.
|
||||
Consequently, they can be given access to Secrets that app Containers are not able to
|
||||
access.
|
||||
* They run to completion before any app Containers start, whereas app
|
||||
|
|
|
@ -0,0 +1,223 @@
|
|||
---
|
||||
assignees:
|
||||
- crassirostris
|
||||
- piosz
|
||||
title: Logging and Monitoring Cluster Activity
|
||||
---
|
||||
|
||||
Application and systems logs can help you understand what is happening inside your cluster. The logs are particularly useful for debugging problems and monitoring cluster activity. Most modern applications have some kind of logging mechanism; as such, most container engines are likewise designed to support some kind of logging. The easiest and most embraced logging method for containerized applications is to write to the standard output and standard error streams.
|
||||
|
||||
However, the native functionality provided by a container engine or runtime is usually not enough for a complete logging solution. For example, if a container crashes, a pod is evicted, or a node dies, you'll usually still want to access your application's logs. As such, logs should have a separate storage and lifecycle independent of nodes, pods, or containers. This concept is called _cluster-level-logging_. Cluster-level logging requires a separate backend to store, analyze, and query logs. Kubernetes provides no native storage solution for log data, but you can integrate many existing logging solutions into your Kubernetes cluster.
|
||||
|
||||
This document includes:
|
||||
|
||||
* A basic demonstration of logging in Kubernetes using the standard output stream
|
||||
* A detailed description of the node logging architecture in Kubernetes
|
||||
* Guidance for implementing cluster-level logging in Kubernetes
|
||||
|
||||
The guidance for cluster-level logging assumes that a logging backend is present inside or outside of your cluster. If you're not interested in having cluster-level logging, you might still find the description of how logs are stored and handled on the node to be useful.
|
||||
|
||||
## Basic logging in Kubernetes
|
||||
|
||||
In this section, you can see an example of basic logging in Kubernetes that
|
||||
outputs data to the standard output stream. This demonstration uses
|
||||
a [pod specification](/docs/concepts/clusters/counter-pod.yaml) with
|
||||
a container that writes some text to standard output once per second.
|
||||
|
||||
{% include code.html language="yaml" file="counter-pod.yaml" ghlink="/docs/tasks/debug-application-cluster/counter-pod.yaml" %}
|
||||
|
||||
To run this pod, use the following command:
|
||||
|
||||
```shell
|
||||
$ kubectl create -f http://k8s.io/docs/tasks/debug-application-cluster/counter-pod.yaml
|
||||
pod "counter" created
|
||||
```
|
||||
|
||||
To fetch the logs, use the `kubectl logs` command, as follows
|
||||
|
||||
```shell
|
||||
$ kubectl logs counter
|
||||
0: Mon Jan 1 00:00:00 UTC 2001
|
||||
1: Mon Jan 1 00:00:01 UTC 2001
|
||||
2: Mon Jan 1 00:00:02 UTC 2001
|
||||
...
|
||||
```
|
||||
|
||||
You can use `kubectl logs` to retrieve logs from a previous instantiation of a container with `--previous` flag, in case the container has crashed. If your pod has multiple containers, you should specify which container's logs you want to access by appending a container name to the command. See the [`kubectl logs` documentation](/docs/user-guide/kubectl/kubectl_logs/) for more details.
|
||||
|
||||
## Logging at the node level
|
||||
|
||||
![Node level logging](/images/docs/user-guide/logging/logging-node-level.png)
|
||||
|
||||
Everything a containerized application writes to `stdout` and `stderr` is handled and redirected somewhere by a container engine. For example, the Docker container engine redirects those two streams to [a logging driver](https://docs.docker.com/engine/admin/logging/overview), which is configured in Kubernetes to write to a file in json format.
|
||||
|
||||
**Note:** The Docker json logging driver treats each line as a separate message. When using the Docker logging driver, there is no direct support for multi-line messages. You need to handle multi-line messages at the logging agent level or higher.
|
||||
|
||||
By default, if a container restarts, the kubelet keeps one terminated container with its logs. If a pod is evicted from the node, all corresponding containers are also evicted, along with their logs.
|
||||
|
||||
An important consideration in node-level logging is implementing log rotation, so that logs don't consume all available storage on the node. Kubernetes uses the [`logrotate`](http://www.linuxcommand.org/man_pages/logrotate8.html) tool to implement log rotation.
|
||||
|
||||
Kubernetes performs log rotation daily, or if the log file grows beyond 10MB in size. Each rotation belongs to a single container; if the container repeatedly fails or the pod is evicted, all previous rotations for the container are lost. By default, Kubernetes keeps up to five logging rotations per container.
|
||||
|
||||
The Kubernetes logging configuration differs depending on the node type. For example, you can find detailed information for GCI in the corresponding [configure helper](https://github.com/kubernetes/kubernetes/blob/{{page.githubbranch}}/cluster/gce/gci/configure-helper.sh#L96).
|
||||
|
||||
When you run [`kubectl logs`](/docs/user-guide/kubectl/kubectl_logs), as in the basic logging example, the kubelet on the node handles the request and reads directly from the log file, returning the contents in the response. Note that `kubectl logs` **only returns the last rotation**; you must manually extract prior rotations, if desired and cluster-level logging is not enabled.
|
||||
|
||||
### System component logs
|
||||
|
||||
There are two types of system components: those that run in a container and those
|
||||
that do not run in a container. For example:
|
||||
|
||||
* The Kubernetes scheduler and kube-proxy run in a container.
|
||||
* The kubelet and container runtime, for example Docker, do not run in containers.
|
||||
|
||||
On machines with systemd, the kubelet and container runtime write to journald. If
|
||||
systemd is not present, they write to `.log` files in the `/var/log` directory.
|
||||
System components inside containers always write to the `/var/log` directory,
|
||||
bypassing the default logging mechanism. They use the [glog](https://godoc.org/github.com/golang/glog)
|
||||
logging library. You can find the conventions for logging severity for those
|
||||
components in the [development docs on logging](https://github.com/kubernetes/community/blob/master/contributors/devel/logging.md).
|
||||
|
||||
Similarly to the container logs, system component logs in the `/var/log`
|
||||
directory are rotated daily and based on the log size. However,
|
||||
system component logs have a higher size retention: by default,
|
||||
they can store up to 100MB.
|
||||
|
||||
## Cluster-level logging architectures
|
||||
|
||||
While Kubernetes does not provide a native solution for cluster-level logging, there are several common approaches you can consider. Here are some options:
|
||||
|
||||
* Use a node-level logging agent that runs on every node.
|
||||
* Include a dedicated sidecar container for logging in an application pod.
|
||||
* Push logs directly to a backend from within an application.
|
||||
|
||||
### Using a node logging agent
|
||||
|
||||
![Using a node level logging agent](/images/docs/user-guide/logging/logging-with-node-agent.png)
|
||||
|
||||
You can implement cluster-level logging by including a _node-level logging agent_ on each node. The logging agent is a dedicated tool that exposes logs or pushes logs to a backend. Commonly, the logging agent is a container that has access to a directory with log files from all of the application containers on that node.
|
||||
|
||||
Because the logging agent must run on every node, it's common to implement it as either a DaemonSet replica, a manifest pod, or a dedicated native process on the node. However the latter two approaches are deprecated and highly discouraged.
|
||||
|
||||
Using a node-level logging agent is the most common and encouraged approach for a Kubernetes cluster, because it creates only one agent per node, and it doesn't require any changes to the applications running on the node. However, node-level logging _only works for applications' standard output and standard error_.
|
||||
|
||||
Kubernetes doesn't specify a logging agent, but two optional logging agents are packaged with the Kubernetes release: [Stackdriver Logging](/docs/user-guide/logging/stackdriver) for use with Google Cloud Platform, and [Elasticsearch](/docs/user-guide/logging/elasticsearch). You can find more information and instructions in the dedicated documents. Both use [fluentd](http://www.fluentd.org/) with custom configuration as an agent on the node.
|
||||
|
||||
### Using a sidecar container with the logging agent
|
||||
|
||||
You can use a sidecar container in one of the following ways:
|
||||
|
||||
* The sidecar container streams application logs to its own `stdout`.
|
||||
* The sidecar container runs a logging agent, which is configured to pick up logs from an application container.
|
||||
|
||||
#### Streaming sidecar container
|
||||
|
||||
![Sidecar container with a streaming container](/images/docs/user-guide/logging/logging-with-streaming-sidecar.png)
|
||||
|
||||
By having your sidecar containers stream to their own `stdout` and `stderr`
|
||||
streams, you can take advantage of the kubelet and the logging agent that
|
||||
already run on each node. The sidecar containers read logs from a file, a socket,
|
||||
or the journald. Each individual sidecar container prints log to its own `stdout`
|
||||
or `stderr` stream.
|
||||
|
||||
This approach allows you to separate several log streams from different
|
||||
parts of your application, some of which can lack support
|
||||
for writing to `stdout` or `stderr`. The logic behind redirecting logs
|
||||
is minimal, so it's hardly a significant overhead. Additionally, because
|
||||
`stdout` and `stderr` are handled by the kubelet, you can use built-in tools
|
||||
like `kubectl logs`.
|
||||
|
||||
Consider the following example. A pod runs a single container, and the container
|
||||
writes to two different log files, using two different formats. Here's a
|
||||
configuration file for the Pod:
|
||||
|
||||
{% include code.html language="yaml" file="two-files-counter-pod.yaml" ghlink="/docs/concepts/clusters/two-files-counter-pod.yaml" %}
|
||||
|
||||
It would be a mess to have log entries of different formats in the same log
|
||||
stream, even if you managed to redirect both components to the `stdout` stream of
|
||||
the container. Instead, you could introduce two sidecar containers. Each sidecar
|
||||
container could tail a particular log file from a shared volume and then redirect
|
||||
the logs to its own `stdout` stream.
|
||||
|
||||
Here's a configuration file for a pod that has two sidecar containers:
|
||||
|
||||
{% include code.html language="yaml" file="two-files-counter-pod-streaming-sidecar.yaml" ghlink="/docs/concepts/clusters/two-files-counter-pod-streaming-sidecar.yaml" %}
|
||||
|
||||
Now when you run this pod, you can access each log stream separately by
|
||||
running the following commands:
|
||||
|
||||
```shell
|
||||
$ kubectl logs counter count-log-1
|
||||
0: Mon Jan 1 00:00:00 UTC 2001
|
||||
1: Mon Jan 1 00:00:01 UTC 2001
|
||||
2: Mon Jan 1 00:00:02 UTC 2001
|
||||
...
|
||||
```
|
||||
|
||||
```shell
|
||||
$ kubectl logs counter count-log-2
|
||||
Mon Jan 1 00:00:00 UTC 2001 INFO 0
|
||||
Mon Jan 1 00:00:01 UTC 2001 INFO 1
|
||||
Mon Jan 1 00:00:02 UTC 2001 INFO 2
|
||||
...
|
||||
```
|
||||
|
||||
The node-level agent installed in your cluster picks up those log streams
|
||||
automatically without any further configuration. If you like, you can configure
|
||||
the agent to parse log lines depending on the source container.
|
||||
|
||||
Note, that despite low CPU and memory usage (order of couple of millicores
|
||||
for cpu and order of several megabytes for memory), writing logs to a file and
|
||||
then streaming them to `stdout` can double disk usage. If you have
|
||||
an application that writes to a single file, it's generally better to set
|
||||
`/dev/stdout` as destination rather than implementing the streaming sidecar
|
||||
container approach.
|
||||
|
||||
Sidecar containers can also be used to rotate log files that cannot be
|
||||
rotated by the application itself. [An example](https://github.com/samsung-cnct/logrotate)
|
||||
of this approach is a small container running logrotate periodically.
|
||||
However, it's recommended to use `stdout` and `stderr` directly and leave rotation
|
||||
and retention policies to the kubelet.
|
||||
|
||||
#### Sidecar container with a logging agent
|
||||
|
||||
![Sidecar container with a logging agent](/images/docs/user-guide/logging/logging-with-sidecar-agent.png)
|
||||
|
||||
If the node-level logging agent is not flexible enough for your situation, you
|
||||
can create a sidecar container with a separate logging agent that you have
|
||||
configured specifically to run with your application.
|
||||
|
||||
**Note**: Using a logging agent in a sidecar container can lead
|
||||
to significant resource consumption. Moreover, you won't be able to access
|
||||
those logs using `kubectl logs` command, because they are not controlled
|
||||
by the kubelet.
|
||||
|
||||
As an example, you could use [Stackdriver](/docs/user-guide/logging/stackdriver/),
|
||||
which uses fluentd as a logging agent. Here are two configuration files that
|
||||
you can use to implement this approach. The first file contains
|
||||
a [ConfigMap](/docs/user-guide/configmap/) to configure fluentd.
|
||||
|
||||
{% include code.html language="yaml" file="fluentd-sidecar-config.yaml" ghlink="/docs/concepts/clusters/fluentd-sidecar-config.yaml" %}
|
||||
|
||||
**Note**: The configuration of fluentd is beyond the scope of this article. For
|
||||
information about configuring fluentd, see the
|
||||
[official fluentd documentation](http://docs.fluentd.org/).
|
||||
|
||||
The second file describes a pod that has a sidecar container running fluentd.
|
||||
The pod mounts a volume where fluentd can pick up its configuration data.
|
||||
|
||||
{% include code.html language="yaml" file="two-files-counter-pod-agent-sidecar.yaml" ghlink="/docs/concepts/clusters/two-files-counter-pod-agent-sidecar.yaml" %}
|
||||
|
||||
After some time you can find log messages in the Stackdriver interface.
|
||||
|
||||
Remember, that this is just an example and you can actually replace fluentd
|
||||
with any logging agent, reading from any source inside an application
|
||||
container.
|
||||
|
||||
### Exposing logs directly from the application
|
||||
|
||||
![Exposing logs directly from the application](/images/docs/user-guide/logging/logging-from-application.png)
|
||||
|
||||
You can implement cluster-level logging by exposing or pushing logs directly from
|
||||
every application; however, the implementation for such a logging mechanism
|
||||
is outside the scope of Kubernetes.
|
|
@ -1,5 +1,8 @@
|
|||
---
|
||||
title: Container Command and Arguments
|
||||
redirect_from:
|
||||
- "/docs/user-guide/containers/"
|
||||
- "/docs/user-guide/containers.html"
|
||||
---
|
||||
|
||||
{% capture overview %}
|
||||
|
|
|
@ -103,7 +103,15 @@ Note: Disabling SELinux by running `setenforce 0` is required in order to allow
|
|||
### (2/4) Initializing your master
|
||||
|
||||
The master is the machine where the "control plane" components run, including `etcd` (the cluster database) and the API server (which the `kubectl` CLI communicates with).
|
||||
All of these components run in pods started by `kubelet`.
|
||||
All of these components run in pods started by `kubelet` and the following images are required and will be automatically pulled by `kubelet` if they are absent while `kubeadm init` is initializing your master:
|
||||
|
||||
gcr.io/google_containers/kube-proxy-amd64 v1.5.3
|
||||
gcr.io/google_containers/kube-controller-manager-amd64 v1.5.3
|
||||
gcr.io/google_containers/kube-scheduler-amd64 v1.5.3
|
||||
gcr.io/google_containers/kube-apiserver-amd64 v1.5.3
|
||||
gcr.io/google_containers/etcd-amd64 3.0.14-kubeadm
|
||||
gcr.io/google_containers/kube-discovery-amd64 1.0
|
||||
gcr.io/google_containers/pause-amd64 3.0
|
||||
|
||||
Right now you can't run `kubeadm init` twice without tearing down the cluster in between, see [Tear down](#tear-down).
|
||||
|
||||
|
|
|
@ -50,7 +50,7 @@ Waiting for pod default/busybox-472357175-y0m47 to be running, status is Pending
|
|||
|
||||
Hit enter for command prompt
|
||||
|
||||
/ # wget -s --timeout=1 nginx
|
||||
/ # wget --spider --timeout=1 nginx
|
||||
Connecting to nginx (10.100.0.16:80)
|
||||
/ #
|
||||
```
|
||||
|
@ -96,7 +96,7 @@ Waiting for pod default/busybox-472357175-y0m47 to be running, status is Pending
|
|||
|
||||
Hit enter for command prompt
|
||||
|
||||
/ # wget -s --timeout=1 nginx
|
||||
/ # wget --spider --timeout=1 nginx
|
||||
Connecting to nginx (10.100.0.16:80)
|
||||
wget: download timed out
|
||||
/ #
|
||||
|
@ -110,7 +110,7 @@ Waiting for pod default/busybox-472357175-y0m47 to be running, status is Pending
|
|||
|
||||
Hit enter for command prompt
|
||||
|
||||
/ # wget -s --timeout=1 nginx
|
||||
/ # wget --spider --timeout=1 nginx
|
||||
Connecting to nginx (10.100.0.16:80)
|
||||
/ #
|
||||
```
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
---
|
||||
assignees:
|
||||
- bboreham
|
||||
title: Weave Net Addon
|
||||
title: Using Weave Net for NetworkPolicy
|
||||
---
|
||||
|
||||
The [Weave Net Addon](https://www.weave.works/docs/net/latest/kube-addon/) for Kubernetes comes with a Network Policy Controller.
|
||||
|
|
|
@ -10,6 +10,7 @@ This page shows you how to backup and restore data from the different deployed s
|
|||
This page assumes you have a working Juju deployed cluster.
|
||||
{% endcapture %}
|
||||
|
||||
{% capture steps %}
|
||||
## Exporting cluster data
|
||||
|
||||
Exporting of cluster data is not supported at this time.
|
||||
|
@ -18,7 +19,6 @@ Exporting of cluster data is not supported at this time.
|
|||
|
||||
Importing of cluster data is not supported at this time.
|
||||
|
||||
{% capture steps %}
|
||||
## Exporting etcd data
|
||||
|
||||
Migrating etcd is a fairly easy task.
|
||||
|
|
|
@ -80,7 +80,7 @@ The master requires the root CA public key, `ca.pem`; the apiserver certificate,
|
|||
|
||||
Calico needs its own etcd cluster to store its state. In this guide we install a single-node cluster on the master server.
|
||||
|
||||
> Note: In a production deployment we recommend running a distributed etcd cluster for redundancy. In this guide, we use a single etcd for simplicitly.
|
||||
> Note: In a production deployment we recommend running a distributed etcd cluster for redundancy. In this guide, we use a single etcd for simplicity.
|
||||
|
||||
1. Download the template manifest file:
|
||||
|
||||
|
|
|
@ -13,7 +13,7 @@ This page assumes you have a working Juju deployed cluster.
|
|||
{% endcapture %}
|
||||
|
||||
{% capture steps %}
|
||||
It is recommended to deploy individual Kubernetes clusters in their own models, so that there is a clean seperation between environments. To remove a cluster first find out which model it's in with `juju list-models`. The controller reserves an `admin` model for itself. If you have chosen to not name your model it might show up as `default`.
|
||||
It is recommended to deploy individual Kubernetes clusters in their own models, so that there is a clean separation between environments. To remove a cluster first find out which model it's in with `juju list-models`. The controller reserves an `admin` model for itself. If you have chosen to not name your model it might show up as `default`.
|
||||
|
||||
```
|
||||
$ juju list-models
|
||||
|
|
|
@ -16,7 +16,7 @@ This page assumes you have a working Juju deployed cluster.
|
|||
|
||||
controller - The management node of a cloud environment. Typically you have one controller per cloud region, or more in HA environments. The controller is responsible for managing all subsequent models in a given environment. It contains the Juju API server and its underlying database.
|
||||
|
||||
model - A collection of charms and their relationships that define a deployment. This includes machines and units. A controller can host multiple models. It is recommended to seperate Kubernetes clusters into individual models for management and isolation reasons.
|
||||
model - A collection of charms and their relationships that define a deployment. This includes machines and units. A controller can host multiple models. It is recommended to separate Kubernetes clusters into individual models for management and isolation reasons.
|
||||
|
||||
charm - The definition of a service, including its metadata, dependencies with other services, required packages, and application management logic. It contains all the operational knowledge of deploying a Kubernetes cluster. Included charm examples are `kubernetes-core`, `easy-rsa`, `kibana`, and `etcd`.
|
||||
|
||||
|
@ -25,4 +25,4 @@ unit - A given instance of a service. These may or may not use up a whole machin
|
|||
machine - A physical node, these can either be bare metal nodes, or virtual machines provided by a cloud.
|
||||
{% endcapture %}
|
||||
|
||||
{% include templates/task.md %}
|
||||
{% include templates/task.md %}
|
||||
|
|
|
@ -15,16 +15,13 @@ Supports AWS, GCE, Azure, Joyent, OpenStack, Bare Metal and local workstation de
|
|||
|
||||
### Quick Start
|
||||
|
||||
[conjure-up](http://conjure-up.io/) provides quick wasy to deploy Kubernetes on multiple clouds and bare metal. It provides a user-friendly UI that prompts you for cloud credentials and configuration options:
|
||||
[conjure-up](http://conjure-up.io/) provides a quick way to deploy Kubernetes on multiple clouds and bare metal. It provides a user-friendly UI that prompts you for cloud credentials and configuration options:
|
||||
|
||||
Available for Ubuntu 16.04 and newer:
|
||||
|
||||
```
|
||||
sudo apt-add-repository ppa:juju/stable
|
||||
sudo apt-add-repository ppa:conjure-up/next
|
||||
sudo apt update
|
||||
sudo apt install conjure-up
|
||||
conjure-up
|
||||
sudo snap install conjure-up --classic
|
||||
conjure-up kubernetes
|
||||
```
|
||||
|
||||
### Operational Guides
|
||||
|
|
|
@ -42,14 +42,11 @@ Next, apply those kernel parameters (you should see the above options echoed bac
|
|||
Now you're ready to install conjure-up and deploy Kubernetes.
|
||||
|
||||
```
|
||||
sudo apt-add-repository ppa:juju/stable
|
||||
sudo apt-add-repository ppa:conjure-up/next
|
||||
sudo apt update
|
||||
sudo apt install conjure-up
|
||||
|
||||
sudo snap install conjure-up --classic
|
||||
conjure-up kubernetes
|
||||
```
|
||||
|
||||
Note: During this set up phase cojure-up will ask you to "Setup an ipv6 subnet" with LXD, ensure you answer NO. ipv6 with Juju/LXD is currently unsupported.
|
||||
Note: During this set up phase conjure-up will ask you to "Setup an ipv6 subnet" with LXD, ensure you answer NO. ipv6 with Juju/LXD is currently unsupported.
|
||||
|
||||
### Walkthrough
|
||||
|
||||
|
|
|
@ -28,7 +28,7 @@ Configure Datadog with your api-key, found in the [Datadog dashboard](). Replace
|
|||
juju configure datadog api-key=XXXX
|
||||
```
|
||||
|
||||
Finally, attach `datadog` to all applications you wish to montior. For example, kubernetes-master, kubernetes-worker, and etcd:
|
||||
Finally, attach `datadog` to all applications you wish to monitor. For example, kubernetes-master, kubernetes-worker, and etcd:
|
||||
|
||||
```
|
||||
juju add-relation datadog kubernetes-worker
|
||||
|
@ -74,7 +74,7 @@ juju add-relation kubernetes-worker filebeat
|
|||
|
||||
### Existing ElasticSearch cluster
|
||||
|
||||
In the event an ElasticSearch cluster already exists, the following can be used to connect and leverage it instead of creating a new, seprate, cluster. First deploy the two beats, filebeat and topbeat
|
||||
In the event an ElasticSearch cluster already exists, the following can be used to connect and leverage it instead of creating a new, separate, cluster. First deploy the two beats, filebeat and topbeat
|
||||
|
||||
```
|
||||
juju deploy filebeat
|
||||
|
@ -122,7 +122,7 @@ juju add-relation nrpe kubeapi-load-balancer
|
|||
|
||||
### Existing install of Nagios
|
||||
|
||||
If you already have an exisiting Nagios installation, the `nrpe-external-master` charm can be used instead. This will allow you to supply configuration options that map your exisiting external Nagios installation to NRPE. Replace `255.255.255.255` with the IP address of the nagios instance.
|
||||
If you already have an existing Nagios installation, the `nrpe-external-master` charm can be used instead. This will allow you to supply configuration options that map your existing external Nagios installation to NRPE. Replace `255.255.255.255` with the IP address of the nagios instance.
|
||||
|
||||
```
|
||||
juju deploy nrpe-external-master
|
||||
|
|
|
@ -45,7 +45,7 @@ $ route | grep default | head -n 1 | awk {'print $8'}
|
|||
establishing networking setup with etcd. Ensure this network range is not active
|
||||
on layers 2/3 you're deploying to, as it will cause collisions and odd behavior
|
||||
if care is not taken when selecting a good CIDR range to assign to flannel. It's
|
||||
also good practice to ensure you alot yourself a large enough IP range to support
|
||||
also good practice to ensure you allot yourself a large enough IP range to support
|
||||
how large your cluster will potentially scale. Class A IP ranges with /24 are
|
||||
a good option.
|
||||
{% endcapture %}
|
||||
|
|
|
@ -83,8 +83,8 @@ test 50M RWO Available 10s
|
|||
```
|
||||
|
||||
To consume these Persistent Volumes, your pods will need an associated
|
||||
Persistant Volume Claim with them, and is outside the scope of this README. See the
|
||||
[Persistant Volumes](http://kubernetes.io/docs/user-guide/persistent-volumes/)
|
||||
Persistent Volume Claim with them, and is outside the scope of this README. See the
|
||||
[Persistent Volumes](http://kubernetes.io/docs/user-guide/persistent-volumes/)
|
||||
documentation for more information.
|
||||
{% endcapture %}
|
||||
|
||||
|
|
|
@ -42,9 +42,9 @@ Machine State DNS Inst id Series AZ
|
|||
|
||||
In this example we can glean some information. The `Workload` column will show the status of a given service. The `Message` section will show you the health of a given service in the cluster. During deployment and maintenance these workload statuses will update to reflect what a given node is doing. For example the workload my say `maintenance` while message will describe this maintenance as `Installing docker`.
|
||||
|
||||
During normal oprtation the Workload should read `active`, the Agent column (which reflects what the Juju agent is doing) should read `idle`, and the messages will either say `Ready` or another descriptive term. `juju status --color` will also return all green results when a cluster's deployment is healthy.
|
||||
During normal operation the Workload should read `active`, the Agent column (which reflects what the Juju agent is doing) should read `idle`, and the messages will either say `Ready` or another descriptive term. `juju status --color` will also return all green results when a cluster's deployment is healthy.
|
||||
|
||||
Status can become unweildly for large clusters, it is then recommended to check status on individual services, for example to check the status on the workers only:
|
||||
Status can become unwieldy for large clusters, it is then recommended to check status on individual services, for example to check the status on the workers only:
|
||||
|
||||
juju status kubernetes-workers
|
||||
|
||||
|
|
|
@ -31,7 +31,7 @@ users do, when unit and integration tests are insufficient.
|
|||
To deploy the end-to-end test suite, you need to relate the `kubernetes-e2e` charm to your existing kubernetes-master nodes and easyrsa:
|
||||
|
||||
```
|
||||
juju deploy kubernetes-e2e
|
||||
juju deploy cs:~containers/kubernetes-e2e
|
||||
juju add-relation kubernetes-e2e kubernetes-master
|
||||
juju add-relation kubernetes-e2e easyrsa
|
||||
```
|
||||
|
@ -83,7 +83,7 @@ As an example, you can run a more limited set of tests for rapid validation of
|
|||
a deployed cluster. The following example will skip the `Flaky`, `Slow`, and
|
||||
`Feature` labeled tests:
|
||||
|
||||
juju run-action kubernetes-e2e/0 skip='\[(Flaky|Slow|Feature:.*)\]'
|
||||
juju run-action kubernetes-e2e/0 test skip='\[(Flaky|Slow|Feature:.*)\]'
|
||||
|
||||
> Note: the escaping of the regex due to how bash handles brackets.
|
||||
|
||||
|
@ -120,13 +120,13 @@ Output:
|
|||
|
||||
Action queued with id: 4ceed33a-d96d-465a-8f31-20d63442e51b
|
||||
|
||||
Copy output to your local machine
|
||||
Copy output to your local machine:
|
||||
|
||||
juju scp kubernetes-e2e/0:4ceed33a-d96d-465a-8f31-20d63442e51b.log .
|
||||
|
||||
##### Action result output
|
||||
|
||||
Or you can just show the output inline::
|
||||
Or you can just show the output inline:
|
||||
|
||||
juju run-action kubernetes-e2e/0 test
|
||||
|
||||
|
|
|
@ -1,5 +1,8 @@
|
|||
---
|
||||
title: Configuring a Pod to Use a PersistentVolume for Storage
|
||||
redirect_from:
|
||||
- "/docs/user-guide/persistent-volumes/walkthrough/"
|
||||
- "/docs/user-guide/persistent-volumes/walkthrough.html"
|
||||
---
|
||||
|
||||
{% capture overview %}
|
||||
|
|
|
@ -1,5 +1,8 @@
|
|||
---
|
||||
title: Distributing Credentials Securely
|
||||
redirect_from:
|
||||
- "/docs/user-guide/secrets/walkthrough/"
|
||||
- "/docs/user-guide/secrets/walkthrough.html"
|
||||
---
|
||||
|
||||
{% capture overview %}
|
||||
|
@ -22,11 +25,11 @@ Suppose you want to have two pieces of secret data: a username `my-app` and a pa
|
|||
convert your username and password to a base-64 representation. Here's a Linux
|
||||
example:
|
||||
|
||||
echo 'my-app' | base64
|
||||
echo '39528$vdg7Jb' | base64
|
||||
echo -n 'my-app' | base64
|
||||
echo -n '39528$vdg7Jb' | base64
|
||||
|
||||
The output shows that the base-64 representation of your username is `bXktYXBwCg==`,
|
||||
and the base-64 representation of your password is `Mzk1MjgkdmRnN0piCg==`.
|
||||
The output shows that the base-64 representation of your username is `bXktYXBw`,
|
||||
and the base-64 representation of your password is `Mzk1MjgkdmRnN0pi`.
|
||||
|
||||
## Creating a Secret
|
||||
|
||||
|
@ -37,12 +40,12 @@ username and password:
|
|||
|
||||
1. Create the Secret
|
||||
|
||||
kubectl create -f http://k8s.io/docs/tasks/administer-cluster/secret.yaml
|
||||
kubectl create -f secret.yaml
|
||||
|
||||
**Note:** If you want to skip the Base64 encoding step, you can create a Secret
|
||||
by using the `kubectl create secret` command:
|
||||
|
||||
kubectl create secret generic test-secret --from-literal=username="my-app",password="39528$vdg7Jb"
|
||||
kubectl create secret generic test-secret --from-literal=username='my-app',password='39528$vdg7Jb'
|
||||
|
||||
1. View information about the Secret:
|
||||
|
||||
|
@ -69,8 +72,8 @@ username and password:
|
|||
|
||||
Data
|
||||
====
|
||||
password: 13 bytes
|
||||
username: 7 bytes
|
||||
password: 12 bytes
|
||||
username: 6 bytes
|
||||
|
||||
## Creating a Pod that has access to the secret data through a Volume
|
||||
|
||||
|
@ -80,7 +83,7 @@ Here is a configuration file you can use to create a Pod:
|
|||
|
||||
1. Create the Pod:
|
||||
|
||||
kubectl create -f http://k8s.io/docs/tasks/administer-cluster/secret-pod.yaml
|
||||
kubectl create -f secret-pod.yaml
|
||||
|
||||
1. Verify that your Pod is running:
|
||||
|
||||
|
@ -112,7 +115,7 @@ is exposed:
|
|||
|
||||
1. In your shell, display the contents of the `username` and `password` files:
|
||||
|
||||
root@secret-test-pod:/etc/secret-volume# cat username password
|
||||
root@secret-test-pod:/etc/secret-volume# cat username; echo; cat password; echo
|
||||
|
||||
The output is your username and password:
|
||||
|
||||
|
@ -127,7 +130,7 @@ Here is a configuration file you can use to create a Pod:
|
|||
|
||||
1. Create the Pod:
|
||||
|
||||
kubectl create -f http://k8s.io/docs/tasks/administer-cluster/secret-envars-pod.yaml
|
||||
kubectl create -f secret-envars-pod.yaml
|
||||
|
||||
1. Verify that your Pod is running:
|
||||
|
||||
|
|
|
@ -0,0 +1,10 @@
|
|||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: counter
|
||||
spec:
|
||||
containers:
|
||||
- name: count
|
||||
image: busybox
|
||||
args: [/bin/sh, -c,
|
||||
'i=0; while true; do echo "$i: $(date)"; i=$((i+1)); sleep 1; done']
|
|
@ -8,6 +8,9 @@ assignees:
|
|||
- kow3ns
|
||||
- smarterclayton
|
||||
title: Debugging Init Containers
|
||||
redirect_from:
|
||||
- "/docs/tasks/troubleshoot/debug-init-containers/"
|
||||
- "/docs/tasks/troubleshoot/debug-init-containers.html"
|
||||
---
|
||||
|
||||
{% capture overview %}
|
|
@ -0,0 +1,104 @@
|
|||
---
|
||||
assignees:
|
||||
- crassirostris
|
||||
- piosz
|
||||
title: Logging Using Elasticsearch and Kibana
|
||||
---
|
||||
|
||||
On the Google Compute Engine (GCE) platform, the default logging support targets
|
||||
[Stackdriver Logging](https://cloud.google.com/logging/), which is described in detail
|
||||
in the [Logging With Stackdriver Logging](/docs/user-guide/logging/stackdriver).
|
||||
|
||||
This article describes how to set up a cluster to ingest logs into
|
||||
[Elasticsearch](https://www.elastic.co/products/elasticsearch), and view
|
||||
them using [Kibana](https://www.elastic.co/products/kibana), as an alternative to
|
||||
Stackdriver Logging when running on GCE. Note that Elasticsearch and Kibana do not work with Kubernetes clusters hosted on Google Container Engine.
|
||||
|
||||
To use Elasticsearch and Kibana for cluster logging, you should set the
|
||||
following environment variable as shown below when creating your cluster with
|
||||
kube-up.sh:
|
||||
|
||||
```shell
|
||||
KUBE_LOGGING_DESTINATION=elasticsearch
|
||||
```
|
||||
|
||||
You should also ensure that `KUBE_ENABLE_NODE_LOGGING=true` (which is the default for the GCE platform).
|
||||
|
||||
Now, when you create a cluster, a message will indicate that the Fluentd log
|
||||
collection daemons that run on each node will target Elasticsearch:
|
||||
|
||||
```shell
|
||||
$ cluster/kube-up.sh
|
||||
...
|
||||
Project: kubernetes-satnam
|
||||
Zone: us-central1-b
|
||||
... calling kube-up
|
||||
Project: kubernetes-satnam
|
||||
Zone: us-central1-b
|
||||
+++ Staging server tars to Google Storage: gs://kubernetes-staging-e6d0e81793/devel
|
||||
+++ kubernetes-server-linux-amd64.tar.gz uploaded (sha1 = 6987c098277871b6d69623141276924ab687f89d)
|
||||
+++ kubernetes-salt.tar.gz uploaded (sha1 = bdfc83ed6b60fa9e3bff9004b542cfc643464cd0)
|
||||
Looking for already existing resources
|
||||
Starting master and configuring firewalls
|
||||
Created [https://www.googleapis.com/compute/v1/projects/kubernetes-satnam/zones/us-central1-b/disks/kubernetes-master-pd].
|
||||
NAME ZONE SIZE_GB TYPE STATUS
|
||||
kubernetes-master-pd us-central1-b 20 pd-ssd READY
|
||||
Created [https://www.googleapis.com/compute/v1/projects/kubernetes-satnam/regions/us-central1/addresses/kubernetes-master-ip].
|
||||
+++ Logging using Fluentd to elasticsearch
|
||||
```
|
||||
|
||||
The per-node Fluentd pods, the Elasticsearch pods, and the Kibana pods should
|
||||
all be running in the kube-system namespace soon after the cluster comes to
|
||||
life.
|
||||
|
||||
```shell
|
||||
$ kubectl get pods --namespace=kube-system
|
||||
NAME READY REASON RESTARTS AGE
|
||||
elasticsearch-logging-v1-78nog 1/1 Running 0 2h
|
||||
elasticsearch-logging-v1-nj2nb 1/1 Running 0 2h
|
||||
fluentd-elasticsearch-kubernetes-node-5oq0 1/1 Running 0 2h
|
||||
fluentd-elasticsearch-kubernetes-node-6896 1/1 Running 0 2h
|
||||
fluentd-elasticsearch-kubernetes-node-l1ds 1/1 Running 0 2h
|
||||
fluentd-elasticsearch-kubernetes-node-lz9j 1/1 Running 0 2h
|
||||
kibana-logging-v1-bhpo8 1/1 Running 0 2h
|
||||
kube-dns-v3-7r1l9 3/3 Running 0 2h
|
||||
monitoring-heapster-v4-yl332 1/1 Running 1 2h
|
||||
monitoring-influx-grafana-v1-o79xf 2/2 Running 0 2h
|
||||
```
|
||||
|
||||
The `fluentd-elasticsearch` pods gather logs from each node and send them to
|
||||
the `elasticsearch-logging` pods, which are part of a
|
||||
[service](/docs/user-guide/services/) named `elasticsearch-logging`. These
|
||||
Elasticsearch pods store the logs and expose them via a REST API.
|
||||
The `kibana-logging` pod provides a web UI for reading the logs stored in
|
||||
Elasticsearch, and is part of a service named `kibana-logging`.
|
||||
|
||||
The Elasticsearch and Kibana services are both in the `kube-system` namespace
|
||||
and are not directly exposed via a publicly reachable IP address. To reach them,
|
||||
follow the instructions for [Accessing services running in a cluster](/docs/user-guide/accessing-the-cluster/#accessing-services-running-on-the-cluster).
|
||||
|
||||
If you try accessing the `elasticsearch-logging` service in your browser, you'll
|
||||
see a status page that looks something like this:
|
||||
|
||||
![Elasticsearch Status](/images/docs/es-browser.png)
|
||||
|
||||
You can now type Elasticsearch queries directly into the browser, if you'd
|
||||
like. See [Elasticsearch's documentation](https://www.elastic.co/guide/en/elasticsearch/reference/current/search-uri-request.html)
|
||||
for more details on how to do so.
|
||||
|
||||
Alternatively, you can view your cluster's logs using Kibana (again using the
|
||||
[instructions for accessing a service running in the cluster](/docs/user-guide/accessing-the-cluster/#accessing-services-running-on-the-cluster)).
|
||||
The first time you visit the Kibana URL you will be presented with a page that
|
||||
asks you to configure your view of the ingested logs. Select the option for
|
||||
timeseries values and select `@timestamp`. On the following page select the
|
||||
`Discover` tab and then you should be able to see the ingested logs.
|
||||
You can set the refresh interval to 5 seconds to have the logs
|
||||
regularly refreshed.
|
||||
|
||||
Here is a typical view of ingested logs from the Kibana viewer:
|
||||
|
||||
![Kibana logs](/images/docs/kibana-logs.png)
|
||||
|
||||
Kibana opens up all sorts of powerful options for exploring your logs! For some
|
||||
ideas on how to dig into it, check out [Kibana's documentation](https://www.elastic.co/guide/en/kibana/current/discover.html).
|
||||
|
|
@ -0,0 +1,148 @@
|
|||
---
|
||||
assignees:
|
||||
- crassirostris
|
||||
- piosz
|
||||
title: Logging Using Stackdriver
|
||||
---
|
||||
|
||||
Before reading this page, it's highly recommended to familiasrize yourself with the [overview of logging in Kubernetes](/docs/user-guide/logging/overview).
|
||||
|
||||
This article assumes that you have created a Kubernetes cluster with cluster-level logging support for sending logs to Stackdriver Logging. You can do this either by selecting the **Enable Stackdriver Logging** checkbox in the create cluster dialogue in [GKE](https://cloud.google.com/container-engine/), or by setting the `KUBE_LOGGING_DESTINATION` flag to `gcp` when manually starting a cluster using `kube-up.sh`.
|
||||
|
||||
The following guide describes gathering a container's standard output and standard error. To gather logs written by an application to a file, you can use [a sidecar approach](https://github.com/kubernetes/contrib/blob/master/logging/fluentd-sidecar-gcp/README.md).
|
||||
|
||||
## Overview
|
||||
|
||||
After creation, you can discover logging agent pods in the `kube-system` namespace,
|
||||
one per node, by running the following command:
|
||||
|
||||
```shell
|
||||
$ kubectl get pods --namespace=kube-system
|
||||
NAME READY STATUS RESTARTS AGE
|
||||
...
|
||||
fluentd-gcp-v1.30-50gnc 1/1 Running 0 5d
|
||||
fluentd-gcp-v1.30-v255c 1/1 Running 0 5d
|
||||
fluentd-gcp-v1.30-f02l5 1/1 Running 0 5d
|
||||
...
|
||||
```
|
||||
|
||||
To understand how logging with Stackdriver works, consider the following
|
||||
synthetic log generator pod specification [counter-pod.yaml](/docs/tasks/debug-application-cluster/counter-pod.yaml):
|
||||
|
||||
{% include code.html language="yaml" file="counter-pod.yaml" ghlink="/docs/tasks/debug-application-cluster/counter-pod.yaml" %}
|
||||
|
||||
This pod specification has one container that runs a bash script
|
||||
that writes out the value of a counter and the date once per
|
||||
second, and runs indefinitely. Let's create this pod in the default namespace.
|
||||
|
||||
```shell
|
||||
$ kubectl create -f http://k8s.io/docs/user-guide/logging/examples/counter-pod.yaml
|
||||
pod "counter" created
|
||||
```
|
||||
|
||||
You can observe the running pod:
|
||||
|
||||
```shell
|
||||
$ kubectl get pods
|
||||
NAME READY STATUS RESTARTS AGE
|
||||
counter 1/1 Running 0 5m
|
||||
```
|
||||
|
||||
For a short period of time you can observe the 'Pending' pod status, because the kubelet
|
||||
has to download the container image first. When the pod status changes to `Running`
|
||||
you can use the `kubectl logs` command to view the output of this counter pod.
|
||||
|
||||
```shell
|
||||
$ kubectl logs counter
|
||||
0: Mon Jan 1 00:00:00 UTC 2001
|
||||
1: Mon Jan 1 00:00:01 UTC 2001
|
||||
2: Mon Jan 1 00:00:02 UTC 2001
|
||||
...
|
||||
```
|
||||
|
||||
As described in the logging overview, this command fetches log entries
|
||||
from the container log file. If the container is killed and then restarted by
|
||||
Kubernetes, you can still access logs from the previous container. However,
|
||||
if the pod is evicted from the node, log files are lost. Let's demonstrate this
|
||||
by deleting the currently running counter container:
|
||||
|
||||
```shell
|
||||
$ kubectl delete pod counter
|
||||
pod "counter" deleted
|
||||
```
|
||||
|
||||
and then recreating it:
|
||||
|
||||
```shell
|
||||
$ kubectl create -f http://k8s.io/docs/user-guide/logging/examples/counter-pod.yaml
|
||||
pod "counter" created
|
||||
```
|
||||
|
||||
After some time, you can access logs from the counter pod again:
|
||||
|
||||
```shell
|
||||
$ kubectl logs counter
|
||||
0: Mon Jan 1 00:01:00 UTC 2001
|
||||
1: Mon Jan 1 00:01:01 UTC 2001
|
||||
2: Mon Jan 1 00:01:02 UTC 2001
|
||||
...
|
||||
```
|
||||
|
||||
As expected, only recent log lines are present. However, for a real-world
|
||||
application you will likely want to be able to access logs from all containers,
|
||||
especially for the debug purposes. This is exactly when the previously enabled
|
||||
Stackdriver Logging can help.
|
||||
|
||||
## Viewing logs
|
||||
|
||||
Stackdriver Logging agent attaches metadata to each log entry, for you to use later
|
||||
in queries to select only the messages you're interested in: for example,
|
||||
the messages from a particular pod.
|
||||
|
||||
The most important pieces of metadata are the resource type and log name.
|
||||
The resource type of a container log is `container`, which is named
|
||||
`GKE Containers` in the UI (even if the Kubernetes cluster is not on GKE).
|
||||
The log name is the name of the container, so that if you have a pod with
|
||||
two containers, named `container_1` and `container_2` in the spec, their logs
|
||||
will have log names `container_1` and `container_2` respectively.
|
||||
|
||||
System components have resource type `compute`, which is named
|
||||
`GCE VM Instance` in the interface. Log names for system components are fixed.
|
||||
For a GKE node, every log entry from a system component has one the following
|
||||
log names:
|
||||
|
||||
* docker
|
||||
* kubelet
|
||||
* kube-proxy
|
||||
|
||||
You can learn more about viewing logs on [the dedicated Stackdriver page](https://cloud.google.com/logging/docs/view/logs_viewer).
|
||||
|
||||
One of the possible ways to view logs is using the
|
||||
[`gcloud logging`](https://cloud.google.com/logging/docs/api/gcloud-logging)
|
||||
command line interface from the [Google Cloud SDK](https://cloud.google.com/sdk/).
|
||||
It uses Stackdriver Logging [filtering syntax](https://cloud.google.com/logging/docs/view/advanced_filters)
|
||||
to query specific logs. For example, you can run the following command:
|
||||
|
||||
```shell
|
||||
$ gcloud beta logging read 'logName="projects/$YOUR_PROJECT_ID/logs/count"' --format json | jq '.[].textPayload'
|
||||
...
|
||||
"2: Mon Jan 1 00:01:02 UTC 2001\n"
|
||||
"1: Mon Jan 1 00:01:01 UTC 2001\n"
|
||||
"0: Mon Jan 1 00:01:00 UTC 2001\n"
|
||||
...
|
||||
"2: Mon Jan 1 00:00:02 UTC 2001\n"
|
||||
"1: Mon Jan 1 00:00:01 UTC 2001\n"
|
||||
"0: Mon Jan 1 00:00:00 UTC 2001\n"
|
||||
```
|
||||
|
||||
As you can see, it outputs messages for the count container from both
|
||||
the first and second runs, despite the fact that the kubelet already deleted
|
||||
the logs for the first container.
|
||||
|
||||
### Exporting logs
|
||||
|
||||
You can export logs to [Google Cloud Storage](https://cloud.google.com/storage/)
|
||||
or to [BigQuery](https://cloud.google.com/bigquery/) to run further
|
||||
analysis. Stackdriver Logging offers the concept of sinks, where you can
|
||||
specify the destination of log entries. More information is available on
|
||||
the Stackdriver [Exporting Logs page](https://cloud.google.com/logging/docs/export/configure_export_v2).
|
|
@ -1,5 +1,10 @@
|
|||
---
|
||||
title: Tasks
|
||||
redirect_from:
|
||||
- "/docs/user-guide/configuring-containers/"
|
||||
- "/docs/user-guide/configuring-containers.html"
|
||||
- "/docs/user-guide/production-pods/"
|
||||
- "/docs/user-guide/production-pods.html"
|
||||
---
|
||||
|
||||
This section of the Kubernetes documentation contains pages that
|
||||
|
@ -32,9 +37,12 @@ single thing, typically by giving a short sequence of steps.
|
|||
* [Using Port Forwarding to Access Applications in a Cluster](/docs/tasks/access-application-cluster/port-forward-access-application-cluster/)
|
||||
* [Providing Load-Balanced Access to an Application in a Cluster](/docs/tasks/access-application-cluster/load-balance-access-application-cluster/)
|
||||
|
||||
#### Debugging Applications in a Cluster
|
||||
#### Monitoring, Logging, and Debugging
|
||||
|
||||
* [Determining the Reason for Pod Failure](/docs/tasks/debug-application-cluster/determine-reason-pod-failure/)
|
||||
* [Debugging Init Containers](/docs/tasks/debug-application-cluster/debug-init-containers/)
|
||||
* [Logging Using Stackdriver](/docs/tasks/debug-application-cluster/logging-stackdriver/)
|
||||
* [Logging Using ElasticSearch and Kibana](/docs/tasks/debug-application-cluster/logging-elasticsearch-kibana/)
|
||||
|
||||
#### Accessing the Kubernetes API
|
||||
|
||||
|
@ -55,10 +63,6 @@ single thing, typically by giving a short sequence of steps.
|
|||
* [Debugging a StatefulSet](/docs/tasks/manage-stateful-set/debugging-a-statefulset/)
|
||||
* [Force Deleting StatefulSet Pods](/docs/tasks/manage-stateful-set/delete-pods/)
|
||||
|
||||
#### Troubleshooting
|
||||
|
||||
* [Debugging Init Containers](/docs/tasks/troubleshoot/debug-init-containers/)
|
||||
|
||||
### What's next
|
||||
|
||||
If you would like to write a task page, see
|
||||
|
|
|
@ -74,7 +74,7 @@ title: Using Minikube to Create a Cluster
|
|||
<div class="row">
|
||||
<div class="col-md-8">
|
||||
<p><b>The Master is responsible for managing the cluster.</b> The master coordinates all activities in your cluster, such as scheduling applications, maintaining applications' desired state, scaling applications, and rolling out new updates.</p>
|
||||
<p><b>A node is a VM or a physical computer that serves as a worker machine in a Kubernetes cluster.</b> Each node has a Kubelet, which is an agent for managing the node and communicating with the Kubernetes master. The node should also have tools for handling container operations, such as Docker or rkt. A Kubernetes cluster that handles production traffic should have a minimum of three nodes.</p>
|
||||
<p><b>A node is a VM or a physical computer that serves as a worker machine in a Kubernetes cluster.</b> Each node has a Kubelet, which is an agent for managing the node and communicating with the Kubernetes master. The node should also have tools for handling container operations, such as <a href="https://www.docker.com/">Docker</a> or <a href="https://coreos.com/rkt/">rkt</a>. A Kubernetes cluster that handles production traffic should have a minimum of three nodes.</p>
|
||||
|
||||
</div>
|
||||
<div class="col-md-4">
|
||||
|
|
|
@ -174,7 +174,7 @@ service "nodeport" annotated
|
|||
Now, re-run the test:
|
||||
|
||||
```console
|
||||
$ for node in $NODES; do curl --connect-timeout 1 -s $node:$NODEPORT | grep -i client_address; do
|
||||
$ for node in $NODES; do curl --connect-timeout 1 -s $node:$NODEPORT | grep -i client_address; done
|
||||
client_address=104.132.1.79
|
||||
```
|
||||
|
||||
|
|
|
@ -1,5 +1,8 @@
|
|||
---
|
||||
title: Using a Service to Access an Application in a Cluster
|
||||
redirect_from:
|
||||
- "/docs/user-guide/quick-start/"
|
||||
- "/docs/user-guide/quick-start.html"
|
||||
---
|
||||
|
||||
{% capture overview %}
|
||||
|
|
|
@ -3,6 +3,10 @@ title: Running a Stateless Application Using a Deployment
|
|||
redirect_from:
|
||||
- "/docs/user-guide/simple-nginx/"
|
||||
- "/docs/user-guide/simple-nginx.html"
|
||||
- "/docs/user-guide/pods/single-container/"
|
||||
- "/docs/user-guide/pods/single-container.html"
|
||||
- "/docs/user-guide/deploying-applications/"
|
||||
- "/docs/user-guide/deploying-applications.html"
|
||||
---
|
||||
|
||||
{% capture overview %}
|
||||
|
|
|
@ -1,7 +0,0 @@
|
|||
---
|
||||
title: Configuring Containers
|
||||
---
|
||||
|
||||
{% include user-guide-content-moved.md %}
|
||||
|
||||
[Tasks](/docs/tasks/)
|
|
@ -1,10 +0,0 @@
|
|||
---
|
||||
assignees:
|
||||
- mikedanese
|
||||
title: Commands and Capabilities
|
||||
---
|
||||
|
||||
{% include user-guide-content-moved.md %}
|
||||
|
||||
* [Container Command and Arguments](/docs/concepts/configuration/container-command-args/)
|
||||
* [Container Capabilities](/docs/concepts/policy/container-capabilities/)
|
|
@ -32,7 +32,7 @@ A typical use case is:
|
|||
|
||||
### Prerequisites
|
||||
|
||||
You need a working Kubernetes cluster at version >= 1.4 (for ScheduledJob), >= 1.5 (for CronJobs),
|
||||
You need a working Kubernetes cluster at version >= 1.4 (for ScheduledJob), >= 1.5 (for CronJob),
|
||||
with batch/v2alpha1 API turned on by passing `--runtime-config=batch/v2alpha1` while bringing up
|
||||
the API server (see [Turn on or off an API version for your cluster](/docs/admin/cluster-management/#turn-on-or-off-an-api-version-for-your-cluster)
|
||||
for more). You cannot use Cron Jobs on a hosted Kubernetes provider that has disabled alpha resources.
|
||||
|
|
|
@ -1,11 +0,0 @@
|
|||
---
|
||||
assignees:
|
||||
- bgrant0607
|
||||
- caesarxuchao
|
||||
- thockin
|
||||
title: Deploying Applications
|
||||
---
|
||||
|
||||
{% include user-guide-content-moved.md %}
|
||||
|
||||
[Running a Stateless Application Using a Deployment](/docs/tutorials/stateless-application/run-stateless-application-deployment/)
|
|
@ -622,7 +622,7 @@ See the [Kubernetes API conventions](https://github.com/kubernetes/community/blo
|
|||
Note that in version 1.5, Kubernetes will take no action on a stalled Deployment other than to report a status condition with
|
||||
`Reason=ProgressDeadlineExceeded`.
|
||||
|
||||
**Note:** If you pause a Deployment, Kubernetes does not check progress against your specified deadline. You can safely pause a Deployment in the middle of a rollout and resume without triggering a the condition for exceeding the deadline.
|
||||
**Note:** If you pause a Deployment, Kubernetes does not check progress against your specified deadline. You can safely pause a Deployment in the middle of a rollout and resume without triggering the condition for exceeding the deadline.
|
||||
|
||||
You may experience transient errors with your Deployments, either due to a low timeout that you have set or due to any other kind
|
||||
of error that can be treated as transient. For example, let's suppose you have insufficient quota. If you describe the Deployment
|
||||
|
|
|
@ -8,7 +8,7 @@ The Kubernetes **Guides** can help you work with various aspects of the Kubernet
|
|||
|
||||
* The Kubernetes [User Guide](#user-guide-internal) can help you run programs and services on an existing Kubernetes cluster.
|
||||
* The [Cluster Admin Guide](/docs/admin/) can help you set up and administrate your own Kubernetes cluster.
|
||||
* The [Developer Guide](https://github.com/kubernetes/kubernetes/tree/{{page.githubbranch}}/docs/devel) can help you either write code to directly access the Kubernetes API, or to contribute directly to the Kubernetes project.
|
||||
* The [Developer Guide] can help you either write code to directly access the Kubernetes API, or to contribute directly to the Kubernetes project.
|
||||
|
||||
## <a name="user-guide-internal"></a>Kubernetes User Guide
|
||||
|
||||
|
@ -86,3 +86,5 @@ Pods and containers
|
|||
* [Configuration Best Practices and Tips](/docs/user-guide/config-best-practices/)
|
||||
* [Assign pods to selected nodes](/docs/user-guide/node-selection/)
|
||||
* [Perform a rolling update on a running group of pods](/docs/user-guide/update-demo/)
|
||||
|
||||
[Developer Guide]: https://github.com/kubernetes/community/blob/master/contributors/devel/README.md
|
||||
|
|
|
@ -71,7 +71,7 @@ job "process-item-cherry" created
|
|||
Now, check on the jobs:
|
||||
|
||||
```shell
|
||||
$ kubectl get jobs -l app=jobexample
|
||||
$ kubectl get jobs -l jobgroup=jobexample
|
||||
JOB CONTAINER(S) IMAGE(S) SELECTOR SUCCESSFUL
|
||||
process-item-apple c busybox app in (jobexample),item in (apple) 1
|
||||
process-item-banana c busybox app in (jobexample),item in (banana) 1
|
||||
|
@ -85,7 +85,7 @@ do not care to see.)
|
|||
We can check on the pods as well using the same label selector:
|
||||
|
||||
```shell
|
||||
$ kubectl get pods -l app=jobexample
|
||||
$ kubectl get pods -l jobgroup=jobexample --show-all
|
||||
NAME READY STATUS RESTARTS AGE
|
||||
process-item-apple-kixwv 0/1 Completed 0 4m
|
||||
process-item-banana-wrsf7 0/1 Completed 0 4m
|
||||
|
@ -96,7 +96,7 @@ There is not a single command to check on the output of all jobs at once,
|
|||
but looping over all the pods is pretty easy:
|
||||
|
||||
```shell
|
||||
$ for p in $(kubectl get pods -l app=jobexample -o name)
|
||||
$ for p in $(kubectl get pods -l jobgroup=jobexample -o name)
|
||||
do
|
||||
kubectl logs $p
|
||||
done
|
||||
|
|
|
@ -11,19 +11,26 @@ Run a proxy to the Kubernetes API server
|
|||
|
||||
To proxy all of the Kubernetes api and nothing else, use:
|
||||
|
||||
```
|
||||
$ kubectl proxy --api-prefix=/
|
||||
```
|
||||
|
||||
To proxy only part of the Kubernetes api and also some static files:
|
||||
|
||||
```
|
||||
$ kubectl proxy --www=/my/files --www-prefix=/static/ --api-prefix=/api/
|
||||
```
|
||||
|
||||
The above lets you 'curl localhost:8001/api/v1/pods'.
|
||||
|
||||
To proxy the entire Kubernetes api at a different root, use:
|
||||
|
||||
```
|
||||
$ kubectl proxy --api-prefix=/custom/
|
||||
```
|
||||
|
||||
The above lets you `curl localhost:8001/custom/api/v1/pods`
|
||||
|
||||
The above lets you 'curl localhost:8001/custom/api/v1/pods'
|
||||
|
||||
```
|
||||
kubectl proxy [--port=PORT] [--www=static-dir] [--www-prefix=prefix] [--api-prefix=prefix]
|
||||
|
|
|
@ -5,99 +5,6 @@ assignees:
|
|||
title: Logging with Elasticsearch and Kibana
|
||||
---
|
||||
|
||||
On the Google Compute Engine (GCE) platform, the default logging support targets
|
||||
[Stackdriver Logging](https://cloud.google.com/logging/), which is described in detail
|
||||
in the [Logging With Stackdriver Logging](/docs/user-guide/logging/stackdriver).
|
||||
{% include user-guide-content-moved.md %}
|
||||
|
||||
This article describes how to set up a cluster to ingest logs into
|
||||
[Elasticsearch](https://www.elastic.co/products/elasticsearch), and view
|
||||
them using [Kibana](https://www.elastic.co/products/kibana), as an alternative to
|
||||
Stackdriver Logging when running on GCE. Note that Elasticsearch and Kibana do not work with Kubernetes clusters hosted on Google Container Engine.
|
||||
|
||||
To use Elasticsearch and Kibana for cluster logging, you should set the
|
||||
following environment variable as shown below when creating your cluster with
|
||||
kube-up.sh:
|
||||
|
||||
```shell
|
||||
KUBE_LOGGING_DESTINATION=elasticsearch
|
||||
```
|
||||
|
||||
You should also ensure that `KUBE_ENABLE_NODE_LOGGING=true` (which is the default for the GCE platform).
|
||||
|
||||
Now, when you create a cluster, a message will indicate that the Fluentd log
|
||||
collection daemons that run on each node will target Elasticsearch:
|
||||
|
||||
```shell
|
||||
$ cluster/kube-up.sh
|
||||
...
|
||||
Project: kubernetes-satnam
|
||||
Zone: us-central1-b
|
||||
... calling kube-up
|
||||
Project: kubernetes-satnam
|
||||
Zone: us-central1-b
|
||||
+++ Staging server tars to Google Storage: gs://kubernetes-staging-e6d0e81793/devel
|
||||
+++ kubernetes-server-linux-amd64.tar.gz uploaded (sha1 = 6987c098277871b6d69623141276924ab687f89d)
|
||||
+++ kubernetes-salt.tar.gz uploaded (sha1 = bdfc83ed6b60fa9e3bff9004b542cfc643464cd0)
|
||||
Looking for already existing resources
|
||||
Starting master and configuring firewalls
|
||||
Created [https://www.googleapis.com/compute/v1/projects/kubernetes-satnam/zones/us-central1-b/disks/kubernetes-master-pd].
|
||||
NAME ZONE SIZE_GB TYPE STATUS
|
||||
kubernetes-master-pd us-central1-b 20 pd-ssd READY
|
||||
Created [https://www.googleapis.com/compute/v1/projects/kubernetes-satnam/regions/us-central1/addresses/kubernetes-master-ip].
|
||||
+++ Logging using Fluentd to elasticsearch
|
||||
```
|
||||
|
||||
The per-node Fluentd pods, the Elasticsearch pods, and the Kibana pods should
|
||||
all be running in the kube-system namespace soon after the cluster comes to
|
||||
life.
|
||||
|
||||
```shell
|
||||
$ kubectl get pods --namespace=kube-system
|
||||
NAME READY REASON RESTARTS AGE
|
||||
elasticsearch-logging-v1-78nog 1/1 Running 0 2h
|
||||
elasticsearch-logging-v1-nj2nb 1/1 Running 0 2h
|
||||
fluentd-elasticsearch-kubernetes-node-5oq0 1/1 Running 0 2h
|
||||
fluentd-elasticsearch-kubernetes-node-6896 1/1 Running 0 2h
|
||||
fluentd-elasticsearch-kubernetes-node-l1ds 1/1 Running 0 2h
|
||||
fluentd-elasticsearch-kubernetes-node-lz9j 1/1 Running 0 2h
|
||||
kibana-logging-v1-bhpo8 1/1 Running 0 2h
|
||||
kube-dns-v3-7r1l9 3/3 Running 0 2h
|
||||
monitoring-heapster-v4-yl332 1/1 Running 1 2h
|
||||
monitoring-influx-grafana-v1-o79xf 2/2 Running 0 2h
|
||||
```
|
||||
|
||||
The `fluentd-elasticsearch` pods gather logs from each node and send them to
|
||||
the `elasticsearch-logging` pods, which are part of a
|
||||
[service](/docs/user-guide/services/) named `elasticsearch-logging`. These
|
||||
Elasticsearch pods store the logs and expose them via a REST API.
|
||||
The `kibana-logging` pod provides a web UI for reading the logs stored in
|
||||
Elasticsearch, and is part of a service named `kibana-logging`.
|
||||
|
||||
The Elasticsearch and Kibana services are both in the `kube-system` namespace
|
||||
and are not directly exposed via a publicly reachable IP address. To reach them,
|
||||
follow the instructions for [Accessing services running in a cluster](/docs/user-guide/accessing-the-cluster/#accessing-services-running-on-the-cluster).
|
||||
|
||||
If you try accessing the `elasticsearch-logging` service in your browser, you'll
|
||||
see a status page that looks something like this:
|
||||
|
||||
![Elasticsearch Status](/images/docs/es-browser.png)
|
||||
|
||||
You can now type Elasticsearch queries directly into the browser, if you'd
|
||||
like. See [Elasticsearch's documentation](https://www.elastic.co/guide/en/elasticsearch/reference/current/search-uri-request.html)
|
||||
for more details on how to do so.
|
||||
|
||||
Alternatively, you can view your cluster's logs using Kibana (again using the
|
||||
[instructions for accessing a service running in the cluster](/docs/user-guide/accessing-the-cluster/#accessing-services-running-on-the-cluster)).
|
||||
The first time you visit the Kibana URL you will be presented with a page that
|
||||
asks you to configure your view of the ingested logs. Select the option for
|
||||
timeseries values and select `@timestamp`. On the following page select the
|
||||
`Discover` tab and then you should be able to see the ingested logs.
|
||||
You can set the refresh interval to 5 seconds to have the logs
|
||||
regularly refreshed.
|
||||
|
||||
Here is a typical view of ingested logs from the Kibana viewer:
|
||||
|
||||
![Kibana logs](/images/docs/kibana-logs.png)
|
||||
|
||||
Kibana opens up all sorts of powerful options for exploring your logs! For some
|
||||
ideas on how to dig into it, check out [Kibana's documentation](https://www.elastic.co/guide/en/kibana/current/discover.html).
|
||||
[Logging Using ElasticSearch and Kibana](/docs/tasks/debug-application-cluster/logging-elasticsearch-kibana/)
|
||||
|
|
|
@ -5,219 +5,6 @@ assignees:
|
|||
title: Logging Overview
|
||||
---
|
||||
|
||||
Application and systems logs can help you understand what is happening inside your cluster. The logs are particularly useful for debugging problems and monitoring cluster activity. Most modern applications have some kind of logging mechanism; as such, most container engines are likewise designed to support some kind of logging. The easiest and most embraced logging method for containerized applications is to write to the standard output and standard error streams.
|
||||
{% include user-guide-content-moved.md %}
|
||||
|
||||
However, the native functionality provided by a container engine or runtime is usually not enough for a complete logging solution. For example, if a container crashes, a pod is evicted, or a node dies, you'll usually still want to access your application's logs. As such, logs should have a separate storage and lifecycle independent of nodes, pods, or containers. This concept is called _cluster-level-logging_. Cluster-level logging requires a separate backend to store, analyze, and query logs. Kubernetes provides no native storage solution for log data, but you can integrate many existing logging solutions into your Kubernetes cluster.
|
||||
|
||||
This document includes:
|
||||
|
||||
* A basic demonstration of logging in Kubernetes using the standard output stream
|
||||
* A detailed description of the node logging architecture in Kubernetes
|
||||
* Guidance for implementing cluster-level logging in Kubernetes
|
||||
|
||||
The guidance for cluster-level logging assumes that a logging backend is present inside or outside of your cluster. If you're not interested in having cluster-level logging, you might still find the description of how logs are stored and handled on the node to be useful.
|
||||
|
||||
## Basic logging in Kubernetes
|
||||
|
||||
In this section, you can see an example of basic logging in Kubernetes that
|
||||
outputs data to the standard output stream. This demonstration uses
|
||||
a [pod specification](/docs/user-guide/logging/examples/counter-pod.yaml) with
|
||||
a container that writes some text to standard output once per second.
|
||||
|
||||
{% include code.html language="yaml" file="examples/counter-pod.yaml" ghlink="/docs/user-guide/logging/examples/counter-pod.yaml" %}
|
||||
|
||||
To run this pod, use the following command:
|
||||
|
||||
```shell
|
||||
$ kubectl create -f http://k8s.io/docs/user-guide/logging/examples/counter-pod.yaml
|
||||
pod "counter" created
|
||||
```
|
||||
|
||||
To fetch the logs, use the `kubectl logs` command, as follows
|
||||
|
||||
```shell
|
||||
$ kubectl logs counter
|
||||
0: Mon Jan 1 00:00:00 UTC 2001
|
||||
1: Mon Jan 1 00:00:01 UTC 2001
|
||||
2: Mon Jan 1 00:00:02 UTC 2001
|
||||
...
|
||||
```
|
||||
|
||||
You can use `kubectl logs` to retrieve logs from a previous instantiation of a container with `--previous` flag, in case the container has crashed. If your pod has multiple containers, you should specify which container's logs you want to access by appending a container name to the command. See the [`kubectl logs` documentation](/docs/user-guide/kubectl/kubectl_logs/) for more details.
|
||||
|
||||
## Logging at the node level
|
||||
|
||||
![Node level logging](/images/docs/user-guide/logging/logging-node-level.png)
|
||||
|
||||
Everything a containerized application writes to `stdout` and `stderr` is handled and redirected somewhere by a container engine. For example, the Docker container engine redirects those two streams to [a logging driver](https://docs.docker.com/engine/admin/logging/overview), which is configured in Kubernetes to write to a file in json format.
|
||||
|
||||
**Note:** The Docker json logging driver treats each line as a separate message. When using the Docker logging driver, there is no direct support for multi-line messages. You need to handle multi-line messages at the logging agent level or higher.
|
||||
|
||||
By default, if a container restarts, the kubelet keeps one terminated container with its logs. If a pod is evicted from the node, all corresponding containers are also evicted, along with their logs.
|
||||
|
||||
An important consideration in node-level logging is implementing log rotation, so that logs don't consume all available storage on the node. Kubernetes uses the [`logrotate`](http://www.linuxcommand.org/man_pages/logrotate8.html) tool to implement log rotation.
|
||||
|
||||
Kubernetes performs log rotation daily, or if the log file grows beyond 10MB in size. Each rotation belongs to a single container; if the container repeatedly fails or the pod is evicted, all previous rotations for the container are lost. By default, Kubernetes keeps up to five logging rotations per container.
|
||||
|
||||
The Kubernetes logging configuration differs depending on the node type. For example, you can find detailed information for GCI in the corresponding [configure helper](https://github.com/kubernetes/kubernetes/blob/{{page.githubbranch}}/cluster/gce/gci/configure-helper.sh#L96).
|
||||
|
||||
When you run [`kubectl logs`](/docs/user-guide/kubectl/kubectl_logs), as in the basic logging example, the kubelet on the node handles the request and reads directly from the log file, returning the contents in the response. Note that `kubectl logs` **only returns the last rotation**; you must manually extract prior rotations, if desired and cluster-level logging is not enabled.
|
||||
|
||||
### System component logs
|
||||
|
||||
There are two types of system components: those that run in a container and those
|
||||
that do not run in a container. For example:
|
||||
|
||||
* The Kubernetes scheduler and kube-proxy run in a container.
|
||||
* The kubelet and container runtime, for example Docker, do not run in containers.
|
||||
|
||||
On machines with systemd, the kubelet and container runtime write to journald. If
|
||||
systemd is not present, they write to `.log` files in the `/var/log` directory.
|
||||
System components inside containers always write to the `/var/log` directory,
|
||||
bypassing the default logging mechanism. They use the [glog](https://godoc.org/github.com/golang/glog)
|
||||
logging library. You can find the conventions for logging severity for those
|
||||
components in the [development docs on logging](https://github.com/kubernetes/community/blob/master/contributors/devel/logging.md).
|
||||
|
||||
Similarly to the container logs, system component logs in the `/var/log`
|
||||
directory are rotated daily and based on the log size. However,
|
||||
system component logs have a higher size retention: by default,
|
||||
they can store up to 100MB.
|
||||
|
||||
## Cluster-level logging architectures
|
||||
|
||||
While Kubernetes does not provide a native solution for cluster-level logging, there are several common approaches you can consider. Here are some options:
|
||||
|
||||
* Use a node-level logging agent that runs on every node.
|
||||
* Include a dedicated sidecar container for logging in an application pod.
|
||||
* Push logs directly to a backend from within an application.
|
||||
|
||||
### Using a node logging agent
|
||||
|
||||
![Using a node level logging agent](/images/docs/user-guide/logging/logging-with-node-agent.png)
|
||||
|
||||
You can implement cluster-level logging by including a _node-level logging agent_ on each node. The logging agent is a dedicated tool that exposes logs or pushes logs to a backend. Commonly, the logging agent is a container that has access to a directory with log files from all of the application containers on that node.
|
||||
|
||||
Because the logging agent must run on every node, it's common to implement it as either a DaemonSet replica, a manifest pod, or a dedicated native process on the node. However the latter two approaches are deprecated and highly discouraged.
|
||||
|
||||
Using a node-level logging agent is the most common and encouraged approach for a Kubernetes cluster, because it creates only one agent per node, and it doesn't require any changes to the applications running on the node. However, node-level logging _only works for applications' standard output and standard error_.
|
||||
|
||||
Kubernetes doesn't specify a logging agent, but two optional logging agents are packaged with the Kubernetes release: [Stackdriver Logging](/docs/user-guide/logging/stackdriver) for use with Google Cloud Platform, and [Elasticsearch](/docs/user-guide/logging/elasticsearch). You can find more information and instructions in the dedicated documents. Both use [fluentd](http://www.fluentd.org/) with custom configuration as an agent on the node.
|
||||
|
||||
### Using a sidecar container with the logging agent
|
||||
|
||||
You can use a sidecar container in one of the following ways:
|
||||
|
||||
* The sidecar container streams application logs to its own `stdout`.
|
||||
* The sidecar container runs a logging agent, which is configured to pick up logs from an application container.
|
||||
|
||||
#### Streaming sidecar container
|
||||
|
||||
![Sidecar container with a streaming container](/images/docs/user-guide/logging/logging-with-streaming-sidecar.png)
|
||||
|
||||
By having your sidecar containers stream to their own `stdout` and `stderr`
|
||||
streams, you can take advantage of the kubelet and the logging agent that
|
||||
already run on each node. The sidecar containers read logs from a file, a socket,
|
||||
or the journald. Each individual sidecar container prints log to its own `stdout`
|
||||
or `stderr` stream.
|
||||
|
||||
This approach allows you to separate several log streams from different
|
||||
parts of your application, some of which can lack support
|
||||
for writing to `stdout` or `stderr`. The logic behind redirecting logs
|
||||
is minimal, so it's hardly a significant overhead. Additionally, because
|
||||
`stdout` and `stderr` are handled by the kubelet, you can use built-in tools
|
||||
like `kubectl logs`.
|
||||
|
||||
Consider the following example. A pod runs a single container, and the container
|
||||
writes to two different log files, using two different formats. Here's a
|
||||
configuration file for the Pod:
|
||||
|
||||
{% include code.html language="yaml" file="examples/two-files-counter-pod.yaml" ghlink="/docs/user-guide/logging/examples/two-files-counter-pod.yaml" %}
|
||||
|
||||
It would be a mess to have log entries of different formats in the same log
|
||||
stream, even if you managed to redirect both components to the `stdout` stream of
|
||||
the container. Instead, you could introduce two sidecar containers. Each sidecar
|
||||
container could tail a particular log file from a shared volume and then redirect
|
||||
the logs to its own `stdout` stream.
|
||||
|
||||
Here's a configuration file for a pod that has two sidecar containers:
|
||||
|
||||
{% include code.html language="yaml" file="examples/two-files-counter-pod-streaming-sidecar.yaml" ghlink="/docs/user-guide/logging/examples/two-files-counter-pod-streaming-sidecar.yaml" %}
|
||||
|
||||
Now when you run this pod, you can access each log stream separately by
|
||||
running the following commands:
|
||||
|
||||
```shell
|
||||
$ kubectl logs counter count-log-1
|
||||
0: Mon Jan 1 00:00:00 UTC 2001
|
||||
1: Mon Jan 1 00:00:01 UTC 2001
|
||||
2: Mon Jan 1 00:00:02 UTC 2001
|
||||
...
|
||||
```
|
||||
|
||||
```shell
|
||||
$ kubectl logs counter count-log-2
|
||||
Mon Jan 1 00:00:00 UTC 2001 INFO 0
|
||||
Mon Jan 1 00:00:01 UTC 2001 INFO 1
|
||||
Mon Jan 1 00:00:02 UTC 2001 INFO 2
|
||||
...
|
||||
```
|
||||
|
||||
The node-level agent installed in your cluster picks up those log streams
|
||||
automatically without any further configuration. If you like, you can configure
|
||||
the agent to parse log lines depending on the source container.
|
||||
|
||||
Note, that despite low CPU and memory usage (order of couple of millicores
|
||||
for cpu and order of several megabytes for memory), writing logs to a file and
|
||||
then streaming them to `stdout` can double disk usage. If you have
|
||||
an application that writes to a single file, it's generally better to set
|
||||
`/dev/stdout` as destination rather than implementing the streaming sidecar
|
||||
container approach.
|
||||
|
||||
Sidecar containers can also be used to rotate log files that cannot be
|
||||
rotated by the application itself. [An example](https://github.com/samsung-cnct/logrotate)
|
||||
of this approach is a small container running logrotate periodically.
|
||||
However, it's recommended to use `stdout` and `stderr` directly and leave rotation
|
||||
and retention policies to the kubelet.
|
||||
|
||||
#### Sidecar container with a logging agent
|
||||
|
||||
![Sidecar container with a logging agent](/images/docs/user-guide/logging/logging-with-sidecar-agent.png)
|
||||
|
||||
If the node-level logging agent is not flexible enough for your situation, you
|
||||
can create a sidecar container with a separate logging agent that you have
|
||||
configured specifically to run with your application.
|
||||
|
||||
**Note**: Using a logging agent in a sidecar container can lead
|
||||
to significant resource consumption. Moreover, you won't be able to access
|
||||
those logs using `kubectl logs` command, because they are not controlled
|
||||
by the kubelet.
|
||||
|
||||
As an example, you could use [Stackdriver](/docs/user-guide/logging/stackdriver/),
|
||||
which uses fluentd as a logging agent. Here are two configuration files that
|
||||
you can use to implement this approach. The first file contains
|
||||
a [ConfigMap](/docs/user-guide/configmap/) to configure fluentd.
|
||||
|
||||
{% include code.html language="yaml" file="examples/fluentd-sidecar-config.yaml" ghlink="/docs/user-guide/logging/examples/fluentd-sidecar-config.yaml" %}
|
||||
|
||||
**Note**: The configuration of fluentd is beyond the scope of this article. For
|
||||
information about configuring fluentd, see the
|
||||
[official fluentd documentation](http://docs.fluentd.org/).
|
||||
|
||||
The second file describes a pod that has a sidecar container running fluentd.
|
||||
The pod mounts a volume where fluentd can pick up its configuration data.
|
||||
|
||||
{% include code.html language="yaml" file="examples/two-files-counter-pod-agent-sidecar.yaml" ghlink="/docs/user-guide/logging/examples/two-files-counter-pod-agent-sidecar.yaml" %}
|
||||
|
||||
After some time you can find log messages in the Stackdriver interface.
|
||||
|
||||
Remember, that this is just an example and you can actually replace fluentd
|
||||
with any logging agent, reading from any source inside an application
|
||||
container.
|
||||
|
||||
### Exposing logs directly from the application
|
||||
|
||||
![Exposing logs directly from the application](/images/docs/user-guide/logging/logging-from-application.png)
|
||||
|
||||
You can implement cluster-level logging by exposing or pushing logs directly from
|
||||
every application; however, the implementation for such a logging mechanism
|
||||
is outside the scope of Kubernetes.
|
||||
[Logging and Monitoring Cluster Activity](/docs/concepts/clusters/logging/)
|
||||
|
|
|
@ -5,144 +5,6 @@ assignees:
|
|||
title: Logging with Stackdriver Logging
|
||||
---
|
||||
|
||||
Before reading this page, it's highly recommended to familiarize yourself with the [overview of logging in Kubernetes](/docs/user-guide/logging/overview).
|
||||
{% include user-guide-content-moved.md %}
|
||||
|
||||
This article assumes that you have created a Kubernetes cluster with cluster-level logging support for sending logs to Stackdriver Logging. You can do this either by selecting the **Enable Stackdriver Logging** checkbox in the create cluster dialogue in [GKE](https://cloud.google.com/container-engine/), or by setting the `KUBE_LOGGING_DESTINATION` flag to `gcp` when manually starting a cluster using `kube-up.sh`.
|
||||
|
||||
The following guide describes gathering a container's standard output and standard error. To gather logs written by an application to a file, you can use [a sidecar approach](https://github.com/kubernetes/contrib/blob/master/logging/fluentd-sidecar-gcp/README.md).
|
||||
|
||||
## Overview
|
||||
|
||||
After creation, you can discover logging agent pods in the `kube-system` namespace,
|
||||
one per node, by running the following command:
|
||||
|
||||
```shell
|
||||
$ kubectl get pods --namespace=kube-system
|
||||
NAME READY STATUS RESTARTS AGE
|
||||
...
|
||||
fluentd-gcp-v1.30-50gnc 1/1 Running 0 5d
|
||||
fluentd-gcp-v1.30-v255c 1/1 Running 0 5d
|
||||
fluentd-gcp-v1.30-f02l5 1/1 Running 0 5d
|
||||
...
|
||||
```
|
||||
|
||||
To understand how logging with Stackdriver works, consider the following
|
||||
synthetic log generator pod specification [counter-pod.yaml](/docs/user-guide/logging/examples/counter-pod.yaml):
|
||||
|
||||
{% include code.html language="yaml" file="examples/counter-pod.yaml" ghlink="/docs/user-guide/logging/examples/counter-pod.yaml" %}
|
||||
|
||||
This pod specification has one container that runs a bash script
|
||||
that writes out the value of a counter and the date once per
|
||||
second, and runs indefinitely. Let's create this pod in the default namespace.
|
||||
|
||||
```shell
|
||||
$ kubectl create -f http://k8s.io/docs/user-guide/logging/examples/counter-pod.yaml
|
||||
pod "counter" created
|
||||
```
|
||||
|
||||
You can observe the running pod:
|
||||
|
||||
```shell
|
||||
$ kubectl get pods
|
||||
NAME READY STATUS RESTARTS AGE
|
||||
counter 1/1 Running 0 5m
|
||||
```
|
||||
|
||||
For a short period of time you can observe the 'Pending' pod status, because the kubelet
|
||||
has to download the container image first. When the pod status changes to `Running`
|
||||
you can use the `kubectl logs` command to view the output of this counter pod.
|
||||
|
||||
```shell
|
||||
$ kubectl logs counter
|
||||
0: Mon Jan 1 00:00:00 UTC 2001
|
||||
1: Mon Jan 1 00:00:01 UTC 2001
|
||||
2: Mon Jan 1 00:00:02 UTC 2001
|
||||
...
|
||||
```
|
||||
|
||||
As described in the logging overview, this command fetches log entries
|
||||
from the container log file. If the container is killed and then restarted by
|
||||
Kubernetes, you can still access logs from the previous container. However,
|
||||
if the pod is evicted from the node, log files are lost. Let's demonstrate this
|
||||
by deleting the currently running counter container:
|
||||
|
||||
```shell
|
||||
$ kubectl delete pod counter
|
||||
pod "counter" deleted
|
||||
```
|
||||
|
||||
and then recreating it:
|
||||
|
||||
```shell
|
||||
$ kubectl create -f http://k8s.io/docs/user-guide/logging/examples/counter-pod.yaml
|
||||
pod "counter" created
|
||||
```
|
||||
|
||||
After some time, you can access logs from the counter pod again:
|
||||
|
||||
```shell
|
||||
$ kubectl logs counter
|
||||
0: Mon Jan 1 00:01:00 UTC 2001
|
||||
1: Mon Jan 1 00:01:01 UTC 2001
|
||||
2: Mon Jan 1 00:01:02 UTC 2001
|
||||
...
|
||||
```
|
||||
|
||||
As expected, only recent log lines are present. However, for a real-world
|
||||
application you will likely want to be able to access logs from all containers,
|
||||
especially for the debug purposes. This is exactly when the previously enabled
|
||||
Stackdriver Logging can help.
|
||||
|
||||
## Viewing logs
|
||||
|
||||
Stackdriver Logging agent attaches metadata to each log entry, for you to use later
|
||||
in queries to select only the messages you're interested in: for example,
|
||||
the messages from a particular pod.
|
||||
|
||||
The most important pieces of metadata are the resource type and log name.
|
||||
The resource type of a container log is `container`, which is named
|
||||
`GKE Containers` in the UI (even if the Kubernetes cluster is not on GKE).
|
||||
The log name is the name of the container, so that if you have a pod with
|
||||
two containers, named `container_1` and `container_2` in the spec, their logs
|
||||
will have log names `container_1` and `container_2` respectively.
|
||||
|
||||
System components have resource type `compute`, which is named
|
||||
`GCE VM Instance` in the interface. Log names for system components are fixed.
|
||||
For a GKE node, every log entry from a system component has one the following
|
||||
log names:
|
||||
|
||||
* docker
|
||||
* kubelet
|
||||
* kube-proxy
|
||||
|
||||
You can learn more about viewing logs on [the dedicated Stackdriver page](https://cloud.google.com/logging/docs/view/logs_viewer).
|
||||
|
||||
One of the possible ways to view logs is using the
|
||||
[`gcloud logging`](https://cloud.google.com/logging/docs/api/gcloud-logging)
|
||||
command line interface from the [Google Cloud SDK](https://cloud.google.com/sdk/).
|
||||
It uses Stackdriver Logging [filtering syntax](https://cloud.google.com/logging/docs/view/advanced_filters)
|
||||
to query specific logs. For example, you can run the following command:
|
||||
|
||||
```shell
|
||||
$ gcloud beta logging read 'logName="projects/$YOUR_PROJECT_ID/logs/count"' --format json | jq '.[].textPayload'
|
||||
...
|
||||
"2: Mon Jan 1 00:01:02 UTC 2001\n"
|
||||
"1: Mon Jan 1 00:01:01 UTC 2001\n"
|
||||
"0: Mon Jan 1 00:01:00 UTC 2001\n"
|
||||
...
|
||||
"2: Mon Jan 1 00:00:02 UTC 2001\n"
|
||||
"1: Mon Jan 1 00:00:01 UTC 2001\n"
|
||||
"0: Mon Jan 1 00:00:00 UTC 2001\n"
|
||||
```
|
||||
|
||||
As you can see, it outputs messages for the count container from both
|
||||
the first and second runs, despite the fact that the kubelet already deleted
|
||||
the logs for the first container.
|
||||
|
||||
### Exporting logs
|
||||
|
||||
You can export logs to [Google Cloud Storage](https://cloud.google.com/storage/)
|
||||
or to [BigQuery](https://cloud.google.com/bigquery/) to run further
|
||||
analysis. Stackdriver Logging offers the concept of sinks, where you can
|
||||
specify the destination of log entries. More information is available on
|
||||
the Stackdriver [Exporting Logs page](https://cloud.google.com/logging/docs/export/configure_export_v2).
|
||||
[Logging Using Stackdriver](/docs/tasks/debug-application-cluster/logging-stackdriver/)
|
||||
|
|
|
@ -1,10 +0,0 @@
|
|||
---
|
||||
assignees:
|
||||
- jsafrane
|
||||
- saad-ali
|
||||
title: Persistent Volumes Walkthrough
|
||||
---
|
||||
|
||||
{% include user-guide-content-moved.md %}
|
||||
|
||||
[Configuring a Pod to Use a Persistent Volume for Storage](/docs/tasks/configure-pod-container/configure-persistent-volume-storage/)
|
|
@ -1,7 +0,0 @@
|
|||
---
|
||||
title: Creating Single-Container Pods
|
||||
---
|
||||
|
||||
{% include user-guide-content-moved.md %}
|
||||
|
||||
[Running a Stateless Application Using a Deployment](/docs/tutorials/stateless-application/run-stateless-application-deployment/)
|
|
@ -1,23 +0,0 @@
|
|||
---
|
||||
title: Working with Containers in Production
|
||||
---
|
||||
|
||||
{% include user-guide-content-moved.md %}
|
||||
|
||||
* [Configuring a Pod to Use a Volume for Storage](/docs/tasks/configure-pod-container/configure-volume-storage/)
|
||||
|
||||
* [Distributing Credentials Securely](/docs/tasks/configure-pod-container/distribute-credentials-secure/)
|
||||
|
||||
* [Pulling an Image from a Private Registry](/docs/tasks/configure-pod-container/pull-image-private-registry/)
|
||||
|
||||
* [Communicating Between Containers Running in the Same Pod](/docs/tasks/configure-pod-container/communicate-containers-same-pod/)
|
||||
|
||||
* [Assigning CPU and RAM Resources to a Container](/docs/tasks/configure-pod-container/assign-cpu-ram-container/)
|
||||
|
||||
* [Configuring Liveness and Readiness Probes](/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/)
|
||||
|
||||
* [Configuring Pod Initialization](/docs/tasks/configure-pod-container/configure-pod-initialization/)
|
||||
|
||||
* [Attaching Handlers to Container Lifecycle Events](/docs/tasks/configure-pod-container/attach-handler-lifecycle-event/)
|
||||
|
||||
* [Determining the Reason for Pod Failure](/docs/tasks/debug-application-cluster/determine-reason-pod-failure/)
|
|
@ -1,10 +0,0 @@
|
|||
---
|
||||
assignees:
|
||||
- bgrant0607
|
||||
- janetkuo
|
||||
title: Launching, Exposing, and Killing Applications
|
||||
---
|
||||
|
||||
{% include user-guide-content-moved.md %}
|
||||
|
||||
[Using a Service to Access an Application in a Cluster](https://kubernetes.io/docs/tutorials/stateless-application/expose-external-ip-address-service/)
|
|
@ -94,7 +94,7 @@ of the replicated pods.
|
|||
kubectl create -f hpa-rs.yaml
|
||||
```
|
||||
|
||||
Alternatively, you can just use the `kubectl autoscale` command to acomplish the same
|
||||
Alternatively, you can just use the `kubectl autoscale` command to accomplish the same
|
||||
(and it's easier!)
|
||||
|
||||
```shell
|
||||
|
|
|
@ -1,7 +0,0 @@
|
|||
---
|
||||
title: Secrets Walkthrough
|
||||
---
|
||||
|
||||
{% include user-guide-content-moved.md %}
|
||||
|
||||
[Distributing Credentials Securely](/docs/tasks/configure-pod-container/distribute-credentials-secure/)
|
Before Width: | Height: | Size: 34 KiB After Width: | Height: | Size: 34 KiB |
After Width: | Height: | Size: 2.7 KiB |
After Width: | Height: | Size: 21 KiB |
After Width: | Height: | Size: 17 KiB |
After Width: | Height: | Size: 16 KiB |
After Width: | Height: | Size: 73 KiB |
After Width: | Height: | Size: 1.2 KiB |
After Width: | Height: | Size: 11 KiB |
After Width: | Height: | Size: 12 KiB |
After Width: | Height: | Size: 24 KiB |
After Width: | Height: | Size: 5.8 KiB |
Before Width: | Height: | Size: 11 KiB After Width: | Height: | Size: 2.7 KiB |
After Width: | Height: | Size: 11 KiB |
After Width: | Height: | Size: 8.4 KiB |
After Width: | Height: | Size: 18 KiB |
After Width: | Height: | Size: 5.1 KiB |
After Width: | Height: | Size: 14 KiB |
21
robots.txt
|
@ -5,6 +5,25 @@ Disallow: /v1.0/
|
|||
Disallow: /v1.1/
|
||||
Disallow: /404/
|
||||
Disallow: 404.html
|
||||
Disallow: /docs/user-guide/docs/user-guide/simple-nginx/
|
||||
|
||||
Disallow: /docs/user-guide/configuring-containers
|
||||
Disallow: /docs/user-guide/containers
|
||||
Disallow: /docs/user-guide/deploying-applications
|
||||
Disallow: /docs/user-guide/liveness/index
|
||||
Disallow: /docs/user-guide/simple-nginx
|
||||
Disallow: /docs/user-guide/production-pods
|
||||
Disallow: /docs/user-guide/quick-start
|
||||
|
||||
Disallow: /docs/user-guide/persistent-volumes/walkthrough
|
||||
Disallow: /docs/user-guide/pods/single-container
|
||||
|
||||
Disallow: /docs/user-guide/secrets/walkthrough
|
||||
|
||||
SITEMAP: http://kubernetes.io/sitemap.xml
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
|