apiVersion: v1 kind: Pod metadata: name: fluentd-cloud-logging namespace: kube-system labels: k8s-app: fluentd-logging # This annotation ensures that fluentd does not get evicted if the node # supports critical pod annotation based priority scheme. # Note that this does not guarantee admission on the nodes (#40573). annotations: scheduler.alpha.kubernetes.io/critical-pod: '' spec: dnsPolicy: Default containers: - name: fluentd-cloud-logging image: gcr.io/google_containers/fluentd-gcp:2.0.2 # If fluentd consumes its own logs, the following situation may happen: # fluentd fails to send a chunk to the server => writes it to the log => # tries to send this message to the server => fails to send a chunk and so on. # Writing to a file, which is not exported to the back-end prevents it. # It also allows to increase the fluentd verbosity by default. command: - '/bin/sh' - '-c' - |- mkdir /etc/fluent/config.d && echo "$FLUENTD_CONFIG" > /etc/fluent/config.d/main.conf && /run.sh $FLUENTD_ARGS 2>&1 >>/var/log/fluentd.log env: - name: FLUENTD_ARGS value: --no-supervisor # Keep this config as close as possible to cluster/addons/fluentd-gcp/fluentd-gcp-configmap.yaml # Note that backslashes should be doubled, because this is interpreted as shell variable # TODO(crassirostris): Refactor this - name: FLUENTD_CONFIG value: |- # This configuration file for Fluentd is used # to watch changes to Docker log files that live in the # directory /var/lib/docker/containers/ and are symbolically # linked to from the /var/log/containers directory using names that capture the # pod name and container name. These logs are then submitted to # Google Cloud Logging which assumes the installation of the cloud-logging plug-in. # # Example # ======= # A line in the Docker log file might look like this JSON: # # {"log":"2014/09/25 21:15:03 Got request with path wombat\\n", # "stream":"stderr", # "time":"2014-09-25T21:15:03.499185026Z"} # # The record reformer is used to write the tag to focus on the pod name # and the Kubernetes container name. For example a Docker container's logs # might be in the directory: # /var/lib/docker/containers/997599971ee6366d4a5920d25b79286ad45ff37a74494f262e3bc98d909d0a7b # and in the file: # 997599971ee6366d4a5920d25b79286ad45ff37a74494f262e3bc98d909d0a7b-json.log # where 997599971ee6... is the Docker ID of the running container. # The Kubernetes kubelet makes a symbolic link to this file on the host machine # in the /var/log/containers directory which includes the pod name and the Kubernetes # container name: # synthetic-logger-0.25lps-pod_default-synth-lgr-997599971ee6366d4a5920d25b79286ad45ff37a74494f262e3bc98d909d0a7b.log # -> # /var/lib/docker/containers/997599971ee6366d4a5920d25b79286ad45ff37a74494f262e3bc98d909d0a7b/997599971ee6366d4a5920d25b79286ad45ff37a74494f262e3bc98d909d0a7b-json.log # The /var/log directory on the host is mapped to the /var/log directory in the container # running this instance of Fluentd and we end up collecting the file: # /var/log/containers/synthetic-logger-0.25lps-pod_default-synth-lgr-997599971ee6366d4a5920d25b79286ad45ff37a74494f262e3bc98d909d0a7b.log # This results in the tag: # var.log.containers.synthetic-logger-0.25lps-pod_default-synth-lgr-997599971ee6366d4a5920d25b79286ad45ff37a74494f262e3bc98d909d0a7b.log # The record reformer is used is discard the var.log.containers prefix and # the Docker container ID suffix and "kubernetes." is pre-pended giving the tag: # kubernetes.synthetic-logger-0.25lps-pod_default-synth-lgr # Tag is then parsed by google_cloud plugin and translated to the metadata, # visible in the log viewer # Example: # {"log":"[info:2016-02-16T16:04:05.930-08:00] Some log text here\\n","stream":"stdout","time":"2016-02-17T00:04:05.931087621Z"} type tail format json time_key time path /var/log/containers/*.log pos_file /var/log/gcp-containers.log.pos time_format %Y-%m-%dT%H:%M:%S.%N%Z tag reform.* read_from_head true type parser format /^(?\\w)(? type record_reformer enable_ruby true tag raw.kubernetes.${tag_suffix[4].split('-')[0..-2].join('-')} # Detect exceptions in the log output and forward them as one log entry. @type copy @type prometheus type counter name logging_line_count desc Total number of lines generated by application containers tag ${tag} @type detect_exceptions remove_tag_prefix raw message log stream stream multiline_flush_interval 5 max_bytes 500000 max_lines 1000 # Example: # 2015-12-21 23:17:22,066 [salt.state ][INFO ] Completed state [net.ipv4.ip_forward] at time 23:17:22.066081 type tail format /^(?