From 9cedad704e2d57d75eda9703dfc9b68d2a996403 Mon Sep 17 00:00:00 2001 From: Scott Anderson Date: Thu, 30 Jul 2020 16:39:58 -0600 Subject: [PATCH] ported telegraf 1.9 --- content/telegraf/v1.9/_index.md | 24 + .../telegraf/v1.9/about_the_project/_index.md | 26 + .../telegraf/v1.9/about_the_project/cla.md | 10 + .../v1.9/about_the_project/contributing.md | 10 + .../v1.9/about_the_project/license.md | 10 + .../release-notes-changelog.md | 1432 +++++++++++++++++ .../telegraf/v1.9/administration/_index.md | 21 + .../v1.9/administration/configuration.md | 383 +++++ .../v1.9/administration/enterprise-plugins.md | 18 + .../v1.9/administration/troubleshooting.md | 89 + .../v1.9/administration/windows_service.md | 48 + content/telegraf/v1.9/concepts/_index.md | 21 + .../concepts/aggregator_processor_plugins.md | 62 + content/telegraf/v1.9/concepts/glossary.md | 103 ++ content/telegraf/v1.9/concepts/metrics.md | 28 + content/telegraf/v1.9/data_formats/_index.md | 21 + .../v1.9/data_formats/input/_index.md | 46 + .../v1.9/data_formats/input/collectd.md | 48 + .../telegraf/v1.9/data_formats/input/csv.md | 111 ++ .../v1.9/data_formats/input/dropwizard.md | 179 +++ .../v1.9/data_formats/input/graphite.md | 55 + .../telegraf/v1.9/data_formats/input/grok.md | 226 +++ .../v1.9/data_formats/input/influx.md | 27 + .../telegraf/v1.9/data_formats/input/json.md | 224 +++ .../v1.9/data_formats/input/logfmt.md | 42 + .../v1.9/data_formats/input/nagios.md | 29 + .../telegraf/v1.9/data_formats/input/value.md | 44 + .../v1.9/data_formats/input/wavefront.md | 28 + .../v1.9/data_formats/output/_index.md | 33 + .../v1.9/data_formats/output/graphite.md | 58 + .../v1.9/data_formats/output/influx.md | 41 + .../telegraf/v1.9/data_formats/output/json.md | 89 + .../v1.9/data_formats/output/splunkmetric.md | 147 ++ .../v1.9/data_formats/template-patterns.md | 145 ++ content/telegraf/v1.9/introduction/_index.md | 22 + .../telegraf/v1.9/introduction/downloading.md | 12 + .../v1.9/introduction/getting-started.md | 129 ++ .../v1.9/introduction/installation.md | 235 +++ content/telegraf/v1.9/plugins/_index.md | 27 + content/telegraf/v1.9/plugins/aggregators.md | 50 + content/telegraf/v1.9/plugins/inputs.md | 1030 ++++++++++++ content/telegraf/v1.9/plugins/outputs.md | 209 +++ content/telegraf/v1.9/plugins/processors.md | 103 ++ 43 files changed, 5695 insertions(+) create mode 100644 content/telegraf/v1.9/_index.md create mode 100644 content/telegraf/v1.9/about_the_project/_index.md create mode 100644 content/telegraf/v1.9/about_the_project/cla.md create mode 100644 content/telegraf/v1.9/about_the_project/contributing.md create mode 100644 content/telegraf/v1.9/about_the_project/license.md create mode 100644 content/telegraf/v1.9/about_the_project/release-notes-changelog.md create mode 100644 content/telegraf/v1.9/administration/_index.md create mode 100644 content/telegraf/v1.9/administration/configuration.md create mode 100644 content/telegraf/v1.9/administration/enterprise-plugins.md create mode 100644 content/telegraf/v1.9/administration/troubleshooting.md create mode 100644 content/telegraf/v1.9/administration/windows_service.md create mode 100644 content/telegraf/v1.9/concepts/_index.md create mode 100644 content/telegraf/v1.9/concepts/aggregator_processor_plugins.md create mode 100644 content/telegraf/v1.9/concepts/glossary.md create mode 100644 content/telegraf/v1.9/concepts/metrics.md create mode 100644 content/telegraf/v1.9/data_formats/_index.md create mode 100644 content/telegraf/v1.9/data_formats/input/_index.md create mode 100644 content/telegraf/v1.9/data_formats/input/collectd.md create mode 100644 content/telegraf/v1.9/data_formats/input/csv.md create mode 100644 content/telegraf/v1.9/data_formats/input/dropwizard.md create mode 100644 content/telegraf/v1.9/data_formats/input/graphite.md create mode 100644 content/telegraf/v1.9/data_formats/input/grok.md create mode 100644 content/telegraf/v1.9/data_formats/input/influx.md create mode 100644 content/telegraf/v1.9/data_formats/input/json.md create mode 100644 content/telegraf/v1.9/data_formats/input/logfmt.md create mode 100644 content/telegraf/v1.9/data_formats/input/nagios.md create mode 100644 content/telegraf/v1.9/data_formats/input/value.md create mode 100644 content/telegraf/v1.9/data_formats/input/wavefront.md create mode 100644 content/telegraf/v1.9/data_formats/output/_index.md create mode 100644 content/telegraf/v1.9/data_formats/output/graphite.md create mode 100644 content/telegraf/v1.9/data_formats/output/influx.md create mode 100644 content/telegraf/v1.9/data_formats/output/json.md create mode 100644 content/telegraf/v1.9/data_formats/output/splunkmetric.md create mode 100644 content/telegraf/v1.9/data_formats/template-patterns.md create mode 100644 content/telegraf/v1.9/introduction/_index.md create mode 100644 content/telegraf/v1.9/introduction/downloading.md create mode 100644 content/telegraf/v1.9/introduction/getting-started.md create mode 100644 content/telegraf/v1.9/introduction/installation.md create mode 100644 content/telegraf/v1.9/plugins/_index.md create mode 100644 content/telegraf/v1.9/plugins/aggregators.md create mode 100644 content/telegraf/v1.9/plugins/inputs.md create mode 100644 content/telegraf/v1.9/plugins/outputs.md create mode 100644 content/telegraf/v1.9/plugins/processors.md diff --git a/content/telegraf/v1.9/_index.md b/content/telegraf/v1.9/_index.md new file mode 100644 index 000000000..015c0b0d7 --- /dev/null +++ b/content/telegraf/v1.9/_index.md @@ -0,0 +1,24 @@ +--- +title: Telegraf 1.9 documentation +description: Documentation for Telegraf, the plugin-driven server agent of the InfluxData time series platform, used to collect and report metrics. Telegraf supports four categories of plugins -- input, output, aggregator, and processor. +menu: + telegraf: + name: v1.9 + identifier: telegraf_1_9 + weight: 11 +--- + +Telegraf is a plugin-driven server agent for collecting & reporting metrics, +and is the first piece of the [TICK stack](https://influxdata.com/time-series-platform/). +Telegraf has plugins to source a variety of metrics directly from the system it's running on, pull metrics from third party APIs, or even listen for metrics via a statsd and Kafka consumer services. +It also has output plugins to send metrics to a variety of other datastores, services, and message queues, including InfluxDB, Graphite, OpenTSDB, Datadog, Librato, Kafka, MQTT, NSQ, and many others. + +## Key features + +Here are some of the features that Telegraf currently supports that make it a great choice for metrics collection. + +* Written entirely in Go. +It compiles into a single binary with no external dependencies. +* Minimal memory footprint. +* Plugin system allows new inputs and outputs to be easily added. +* A wide number of plugins for many popular services already exist for well known services and APIs. diff --git a/content/telegraf/v1.9/about_the_project/_index.md b/content/telegraf/v1.9/about_the_project/_index.md new file mode 100644 index 000000000..ef91fbd34 --- /dev/null +++ b/content/telegraf/v1.9/about_the_project/_index.md @@ -0,0 +1,26 @@ +--- + title: About the Telegraf project + + menu: + telegraf_1_9: + name: About the project + weight: 10 +--- + +## [Telegraf release notes](/telegraf/v1.9/about_the_project/release-notes-changelog/) + +## [Contributing to Telegraf](https://github.com/influxdata/telegraf/blob/master/CONTRIBUTING.md) + +## [Contributor License Agreement (CLA)](https://influxdata.com/community/cla/) + +## [License](https://github.com/influxdata/telegraf/blob/master/LICENSE) + +## Third party software +InfluxData products contain third party software, which means the copyrighted, patented, or otherwise legally protected +software of third parties that is incorporated in InfluxData products. + +Third party suppliers make no representation nor warranty with respect to such third party software or any portion thereof. +Third party suppliers assume no liability for any claim that might arise with respect to such third party software, nor for a +customer’s use of or inability to use the third party software. + +The [list of third party software components, including references to associated licenses and other materials](https://github.com/influxdata/telegraf/blob/release-1.9/docs/LICENSE_OF_DEPENDENCIES.md), is maintained on a version by version basis. diff --git a/content/telegraf/v1.9/about_the_project/cla.md b/content/telegraf/v1.9/about_the_project/cla.md new file mode 100644 index 000000000..90bb1a816 --- /dev/null +++ b/content/telegraf/v1.9/about_the_project/cla.md @@ -0,0 +1,10 @@ +--- +title: InfluxData Contributor License Agreement (CLA) + +menu: + telegraf_1_9: + name: Contributor License Agreement (CLA) + parent: About the project + weight: 30 + url: https://influxdata.com/community/cla/ +--- diff --git a/content/telegraf/v1.9/about_the_project/contributing.md b/content/telegraf/v1.9/about_the_project/contributing.md new file mode 100644 index 000000000..22cf45983 --- /dev/null +++ b/content/telegraf/v1.9/about_the_project/contributing.md @@ -0,0 +1,10 @@ +--- + title: Contributing to Telegraf + + menu: + telegraf_1_9: + name: Contributing + parent: About the project + weight: 20 + url: https://github.com/influxdata/telegraf/blob/master/CONTRIBUTING.md +--- diff --git a/content/telegraf/v1.9/about_the_project/license.md b/content/telegraf/v1.9/about_the_project/license.md new file mode 100644 index 000000000..13f994448 --- /dev/null +++ b/content/telegraf/v1.9/about_the_project/license.md @@ -0,0 +1,10 @@ +--- + title: License + + menu: + telegraf_1_9: + name: License + parent: About the project + weight: 40 + url: https://github.com/influxdata/telegraf/blob/master/LICENSE +--- diff --git a/content/telegraf/v1.9/about_the_project/release-notes-changelog.md b/content/telegraf/v1.9/about_the_project/release-notes-changelog.md new file mode 100644 index 000000000..a57c12ae9 --- /dev/null +++ b/content/telegraf/v1.9/about_the_project/release-notes-changelog.md @@ -0,0 +1,1432 @@ +--- +title: Telegraf 1.9 release notes +description: See the new features, bug fixes, breaking changes, and enhancements in the latest and earlier Telegraf releases. +menu: + telegraf_1_9: + name: Release notes + weight: 10 + parent: About the project +--- + +## v1.9.5 [2019-02-26] + +### Bug fixes + +* General + * Use `systemd` in Amazon Linux 2 rpm. +* Ceph Storage (`ceph`) input plugin + * Add backwards compatibility fields in usage and pool statistics. +* InfluxDB (`influxdb`) output plugin + * Fix UDP line splitting. +* Microsoft SQL Server (`sqlserver`) input plugin + * Set deadlock priority to low. + * Disable results by row in AzureDB query. +* Nstat (`nstat`) input plugin + * Remove error log when `snmp6` directory does not exist. +* Ping (`ping`) input plugin + * Host not added when using custom arguments. +* Stackdriver (`stackdriver`) output plugin + * Skip string fields when writing to stackdriver output. + * Send metrics in ascending time order. + +## v1.9.4 [2019-02-05] + +### Bug fixes + +* General + * Fix `skip_rows` and `skip_columns` options in csv parser. + * Build official packages with Go 1.11.5. +* Jenkins input plugin + * Always send basic auth in jenkins input. +* Syslog (`syslog`) input plugin + * Fix definition of multiple syslog plugins. + +## v1.9.3 [2019-01-22] + +#### Bug fixes + +* General + * Fix latest metrics not sent first when output fails. + * Fix `internal_write buffer_size` not reset on timed writes. +* AMQP Consumer (`amqp_consumer`) input plugin + - Fix `amqp_consumer` input stops consuming when it receives + unparsable messages. +* Couchbase (`couchbase`) input plugin + * Remove `userinfo` from cluster tag in `couchbase` input. +* Microsoft SQL Server (`sqlserver`) input plugin + * Fix arithmetic overflow in `sqlserver`) input. +* Prometheus (`prometheus`) input plugin + * Fix `prometheus` input not detecting added and removed pods. + +## v1.9.2 [2019-01-08] + +### Bug fixes + +- Increase `varnishstat` timeout. +- Remove storage calculation for non-Azure-managed instances and add server version. +- Fix error sending empty tag value in `azure_monitor` output. +- Fix panic with Prometheus input plugin on shutdown. +- Support non-transparent framing of syslog messages. +- Apply global- and plugin-level metric modifications before filtering. +- Fix `num_remapped_pgs` field in `ceph` plugin. +- Add `PDH_NO_DATA` to known counter error codes in `win_perf_counters`. +- Fix `amqp_consumer` stops consuming on empty message. +- Fix multiple replace tables not working in strings processor. +- Allow non-local UDP connections in `net_response`. +- Fix TOML option names in parser processor. +- Fix panic in Docker input with bad endpoint. +- Fix original metric modified by aggregator filters. + +## v1.9.1 [2018-12-11] + +### Bug fixes + +- Fix boolean handling in splunkmetric serializer. +- Set default config values in Jenkins input. +- Fix server connection and document stats in MongoDB input. +- Add X-Requested-By header to Graylog input. +- Fix metric memory not freed from the metric buffer on write. +- Add support for client TLS certificates in PostgreSQL inputs. +- Prevent panic when marking the offset in `kafka_consumer`. +- Add early metrics to aggregator and honor `drop_original` setting. +- Use `-W` flag on BSD variants in ping input. +- Allow delta metrics in Wavefront parser. + +## v1.9.0 [2018-11-20] + +#### Release Notes + +- The HTTP Listener (`http_listener`) input plugin has been renamed to + InfluxDB Listener (`influxdb_listener`) input plugin and + use of the original name is deprecated. The new name better describes the + intended use of the plugin as an InfluxDB relay. For general purpose + transfer of metrics in any format using HTTP, InfluxData recommends using + HTTP Listener v2 (`http_listener_v2`) input plugin. + +- Input plugins are no longer limited from adding metrics when the output is + writing and new metrics will move into the metric buffer as needed. This + will provide more robust degradation and recovery when writing to a slow + output at high throughput. + + To avoid overconsumption when reading from queue consumers, the following + input plugins use the new option `max_undelivered_messages` to limit the number + of outstanding unwritten metrics: + + * Apache Kafka Consumer (`kafka_consumer`) + * AMQP Consumer (`amqp_consumer`) + * MQTT Consumer (`mqtt_consumer`) + * NATS Consumer (`nats_consumer`) + * NSQ Consumer (`nsq_consumer`) + +#### New input plugins + +- [HTTP Listener v2 (`http_listener_v2`)](https://github.com/influxdata/telegraf/blob/release-1.9/plugins/inputs/http_listener_v2/README.md) - Contributed by @jul1u5 +- [IPVS (`ipvs`)](https://github.com/influxdata/telegraf/blob/release-1.9/plugins/inputs/ipvs/README.md) - Contributed by @amoghe +- [Jenkins (`jenkins`)](https://github.com/influxdata/telegraf/blob/release-1.9/plugins/inputs/jenkins/README.md) - Contributed by @influxdata & @lpic10 +- [NGINX Plus API (`nginx_plus_api`)](https://github.com/influxdata/telegraf/blob/release-1.9/plugins/inputs/nginx_plus_api/README.md) - Contributed by @Bugagazavr +- [NGINX VTS (`nginx_vts`)](https://github.com/influxdata/telegraf/blob/release-1.9/plugins/inputs/nginx_vts/README.md) - Contributed by @monder +- [Wireless (`wireless`)](https://github.com/influxdata/telegraf/blob/release-1.9/plugins/inputs/wireless/README.md) - Contributed by @jamesmaidment + +#### New output plugins + +- [Stackdriver (stackdriver)](https://github.com/influxdata/telegraf/blob/release-1.9/plugins/outputs/stackdriver/README.md) - Contributed by @jamesmaidment + +#### Features + +- General + - Add ability to define a custom service name when installing as a Windows service. + - Add new configuration for CSV column explicit type conversion. + - Add Telegraf version to `User-Agent` header. + - Add ability to specify bytes options as strings with units. + - Add per output `flush_interval`, `metric_buffer_limit`, and `metric_batch_size`. +- Amazon Kinesis (`kinesis`) output plugin + - Use `DescribeStreamSummary` in place of `ListStreams`. +- DNS Query (`dns_query`) input plugin + - Query servers in parallel. +- Datadog (`datadog`) output plugin + - Add an option to specify a custom URL. + - Use non-allocating field and tag accessors. +- Filecount (`filecount`) input plugin + - Add per-directory file count. +- HTTP Output (`http output`) plugin + - Add entity-body compression. +- Memcached (`memcached`) input plugin + - Collect additional statistics. +- NSQ (`nsq`) input plugin + - Add TLS configuration support. +- Ping (`ping`) input plugin + - Add support for IPv6. +- Procstat (`procstat`) input plugin + - Add Windows service name lookup. +- Prometheus (`prometheus`) input plugin + - Add scraping for Prometheus annotation in Kubernetes. + - Allow connecting to Prometheus using UNIX socket. +- Strings (`strings`) processor plugin + - Add `replace` function. +- VMware vSphere (`vsphere`) input plugin + - Add LUN to data source translation. + +#### Bug fixes + +- Remove `time_key` from the field values in JSON parser. +- Fix input time rounding when using a custom interval. +- Fix potential deadlock or leaked resources on restart or reload. +- Fix outputs block inputs when batch size is reached. +- Fix potential missing datastore metrics in VMware vSphere (`vsphere`) input plugin. + +## v1.8.3 [2018-10-30] + +### Bug fixes + +- Add DN attributes as tags in X.509 Certificate (`x509_cert`) input plugin to avoid series overwrite. +- Prevent connection leak by closing unused connections in AMQP (`amqp`) output plugin. +- Use default partition key when tag does not exist in Amazon Kinesis (`kinesis`) output plugin. +- Log the correct error in JTI OpenConfig Telemetry (`jti_openconfig_telemetry`) input plugin. +- Handle panic when IMPI Sensor (`ipmi_sensor`) input plugin gets bad input. +- Don't add unserializable fields to Jolokia2 (`jolokia2`) input plugin. +- Fix version check in PostgreSQL Exstensible (`postgresql_extensible`) plugin. + +## v1.8.2 [2018-10-17] + +### Bug fixes + +* Aerospike (`aerospike`) input plugin + * Support uint fields. +* Docker (`docker`) input plugin + * Use container name from list if no name in container stats. +* Filecount (`filecount`) input plugin + * Prevent panic on error in file stat. +* InfluxDB v2 (`influxdb_v2`) input plugin + * Update write path to match updated v2 API. +* Logparser (`logparser`) input plugin + * Fix panic. +* MongoDB (`mongodb`) input plugin + * Lower authorization errors to debug level. +* MQTT Consumer (`mqtt_consumer`) input plugin + * Fix connect and reconnect. +* Ping (`ping`) input plugin + * Return correct response code. +* VMware vSphere (`vsphere`) input plugin + * Fix missing timeouts. +* X.509 Certificate (`x509_cert`) input plugin + * Fix segfault. + +## v1.8.1 [2018-10-03] + +### Bug fixes + +- Fix `hardware_type` may be truncated in Microsoft SQL Server (`sqlserver`) input plugin. +- Improve performance in Basicstats (`basicstats`) aggregator plugin. +- Add `hostname` to TLS config for SNI support in X.509 Certicate (`x509_cert`) input plugin. +- Don't add tags with empty values to OpenTSDB (`opentsdb`) output plugin. +- Fix panic during network error in VMware vSphere (`vsphere`) input plugin. +- Unify error response in HTTP Listener (`http_listener`) input plugin with InfluxDB (`influxdb`) output plugin. +- Add `UUID` to VMs in VMware vSphere (`vsphere`) input plugin. +- Skip tags with empty values in Amazon Cloudwatch (`cloudwatch`) output plugin. +- Fix missing non-realtime samples in VMware vSphere (`vsphere`) input plugin. +- Fix case of `timezone`/`grok_timezone` options in grok parser and logparser input plugin. + +## v1.8 [2018-09-21] + +### New input plugins + +- [ActiveMQ (`activemq`)](https://github.com/influxdata/telegraf/blob/release-1.8/plugins/inputs/activemq/README.md) - Contributed by @mlabouardy +- [Beanstalkd (`beanstalkd`)](https://github.com/influxdata/telegraf/blob/release-1.8/plugins/inputs/beanstalkd/README.md) - Contributed by @44px +- [File (`file`)](https://github.com/influxdata/telegraf/blob/release-1.8/plugins/inputs/file/README.md) - Contributed by @maxunt +- [Filecount (`filecount`)](https://github.com/influxdata/telegraf/blob/release-1.8/plugins/inputs/filecount/README.md) - Contributed by @sometimesfood +- [Icinga2 (`icinga2`)](https://github.com/influxdata/telegraf/blob/release-1.8/plugins/inputs/icinga2/README.md) - Contributed by @mlabouardy +- [Kibana (`kibana`)](https://github.com/influxdata/telegraf/blob/release-1.8/plugins/inputs/kibana/README.md) - Contributed by @lpic10 +- [PgBouncer (`pgbouncer`)](https://github.com/influxdata/telegraf/blob/release-1.8/plugins/inputs/pgbouncer/README.md) - Contributed by @nerzhul +- [Temp (`temp`)](https://github.com/influxdata/telegraf/blob/release-1.8/plugins/inputs/temp/README.md) - Contributed by @pytimer +- [Tengine (`tengine`)](https://github.com/influxdata/telegraf/blob/release-1.8/plugins/inputs/tengine/README.md) - Contributed by @ertaoxu +- [VMware vSphere (`vsphere`)](https://github.com/influxdata/telegraf/blob/release-1.8/plugins/inputs/vsphere/README.md) - Contributed by @prydin +- [X.509 Certificate (`x509_cert`)](https://github.com/influxdata/telegraf/blob/release-1.8/plugins/inputs/x509_cert/README.md) - Contributed by @jtyr + +### New processor plugins + +- [Enum (`enum`)](https://github.com/influxdata/telegraf/blob/release-1.8/plugins/processors/enum/README.md) - Contributed by @KarstenSchnitter +- [Parser (`parser`)](https://github.com/influxdata/telegraf/blob/release-1.8/plugins/processors/parser/README.md) - Contributed by @Ayrdrie & @maxunt +- [Rename (`rename`)](https://github.com/influxdata/telegraf/blob/release-1.8/plugins/processors/rename/README.md) - Contributed by @goldibex +- [Strings (`strings`)](https://github.com/influxdata/telegraf/blob/release-1.8/plugins/processors/strings/README.md) - Contributed by @bsmaldon + +### New aggregator plugins + +- [ValueCounter (`valuecounter`)](https://github.com/influxdata/telegraf/blob/release-1.8/plugins/aggregators/valuecounter/README.md) - Contributed by @piotr1212 + +### New output plugins + +- [Azure Monitor (`azure_monitor`)](https://github.com/influxdata/telegraf/blob/release-1.8/plugins/outputs/azure_monitor/README.md) - Contributed by @influxdata +- [InfluxDB v2 (`influxdb_v2`)](https://github.com/influxdata/telegraf/blob/release-1.8/plugins/outputs/influxdb_v2/README.md) - Contributed by @influxdata + +### New input data formats (parsers) + +- [csv](/telegraf/v1.8/data_formats/input/csv) - Contributed by @maxunt +- [grok](/telegraf/v1.8/data_formats/input/grok/) - Contributed by @maxunt +- [logfmt](/telegraf/v1.8/data_formats/input/logfmt/) - Contributed by @Ayrdrie & @maxunt +- [wavefront](/telegraf/v1.8/data_formats/input/wavefront/) - Contributed by @puckpuck + +### New output data formats (serializers) + +- [splunkmetric](/telegraf/v1.8/data_formats/output/splunkmetric/) - Contributed by @ronnocol + +### Features + +- Add SSL/TLS support to Redis (`redis`) input plugin. +- Add tengine input plugin. +- Add power draw field to the NVIDIA SMI (`nvidia_smi`) input plugin. +- Add support for Solr 7 to the Solr (`solr`) input plugin. +- Add owner tag on partitions in Burrow (`burrow`) input plugin. +- Add container status tag to Docker (`docker`) input plugin. +- Add ValueCounter (`valuecounter`) aggregator plugin. +- Add new measurement with results of `pgrep` lookup to Procstat (`procstat`) input plugin. +- Add support for comma in logparser timestamp format. +- Add path tag to Tail (`tail`) input plugin. +- Add log message when tail is added or removed from a file. +- Add option to use of counter time in win perf counters. +- Add energy and power field and device id tag to Fibaro (`fibaro`) input plugin. +- Add HTTP path configuration for OpenTSDB output. +- Gather IPMI metrics concurrently. +- Add mongo document and connection metrics. +- Add enum processor plugin. +- Add user tag to procstat input. +- Add support for multivalue metrics to collectd parser. +- Add support for setting kafka client id. +- Add file input plugin and grok parser. +- Improve cloudwatch output performance. +- Add x509_cert input plugin. +- Add IPSIpAddress syntax to ipaddr conversion in snmp plugin. +- Add Filecount filecount input plugin. +- Add support for configuring an AWS `endpoint_url`. +- Send all messages before waiting for results in Kafka output plugin. +- Add support for lz4 compression to Kafka output plugin. +- Split multiple sensor keys in ipmi input. +- Support StatisticValues in cloudwatch output plugin. +- Add ip restriction for the prometheus_client output. +- Add PgBouncer (`pgbouncer`) input plugin. +- Add ActiveMQ input plugin. +- Add wavefront parser plugin. +- Add rename processor plugin. +- Add message 'max_bytes' configuration to kafka input. +- Add gopsutil meminfo fields to Mem (`mem`) input plugin. +- Document how to parse Telegraf logs. +- Use dep v0.5.0. +- Add ability to set measurement from matched text in grok parser. +- Drop message batches in Kafka (`kafka`) output plugin if too large. +- Add support for static and random routing keys in Kafka (`kafka`) output plugin. +- Add logfmt parser plugin. +- Add parser processor plugin. +- Add Icinga2 input plugin. +- Add name, time, path and string field options to JSON parser. +- Add forwarded records to sqlserver input. +- Add Kibana input plugin. +- Add csv parser plugin. +- Add read_buffer_size option to statsd input. +- Add azure_monitor output plugin. +- Add queue_durability parameter to amqp_consumer input. +- Add strings processor. +- Add OAuth 2.0 support to HTTP output plugin. +- Add Unix epoch timestamp support for JSON parser. +- Add options for basic auth to haproxy input. +- Add temp input plugin. +- Add Beanstalkd input plugin. +- Add means to specify server password for redis input. +- Add Splunk Metrics serializer. +- Add input plugin for VMware vSphere. +- Align metrics window to interval in cloudwatch input. +- Improve Azure Managed Instance support + more in sqlserver input. +- Allow alternate binaries for iptables input plugin. +- Add influxdb_v2 output plugin. + +### Bug fixes + +- Fix divide by zero in logparser input. +- Fix instance and object name in performance counters with backslashes. +- Reset/flush saved contents from bad metric. +- Document all supported cli arguments. +- Log access denied opening a service at debug level in win_services. +- Add support for Kafka 2.0. +- Fix nagios parser does not support ranges in performance data. +- Fix nagios parser does not strip quotes from performance data. +- Fix null value crash in postgresql_extensible input. +- Remove the startup authentication check from the cloudwatch output. +- Support tailing files created after startup in tail input. +- Fix CSV format configuration loading. + + +## v1.7.4 [2018-08-29] + +### Bug fixes + +* Continue sending write batch in UDP if a metric is unserializable in InfluxDB (`influxdb`) output plugin. +* Fix PowerDNS (`powerdns`) input plugin tests. +* Fix `burrow_group` offset calculation for Burrow (`burrow`) input plugin. +* Add `result_code` value for errors running ping command. +* Remove timeout deadline for UDP in Syslog (`syslog`) input plugin. +* Ensure channel is closed if an error occurs in CGroup (`cgroup`) input plugin. +* Fix sending of basic authentication credentials in HTTP `(output)` output plugin. +* Use the correct `GOARM` value in the Linux armel package. + +## v1.7.3 [2018-08-07] + +### Bug fixes + +* Reduce required Docker API version. +* Keep leading whitespace for messages in syslog input. +* Skip bad entries on interrupt input. +* Preserve metric type when using filters in output plugins. +* Fix error message if URL is unparseable in InfluxDB output. +* Use explicit `zpool` properties to fix parse error on FreeBSD 11.2. +* Lock buffer when adding metrics. + +## v1.7.2 [2018-07-18] + +### Bug fixes + +* Use localhost as default server tag in Zookeeper (`zookeeper`) input plugin. +* Don't set values when pattern doesn't match in Regex (`regex`) processor plugin. +* Fix output format of Printer (`printer`) processor plugin. +* Fix metric can have duplicate field. +* Return error if NewRequest fails in HTTP (`http`) output plugin. +* Reset read deadline for Syslog (`syslog`) input plugin. +* Exclude cached memory on Docker (`docker`) input plugin. + +## v1.7.1 [2018-07-03] + +### Bug fixes + +* Treat `sigterm` as a clean shutdown signal. +* Fix selection of tags under nested objects in the JSON parser. +* Fix Postfix (`postfix`) input plugin handling of multilevel queues. +* Fix Syslog (`syslog` input plugin timestamp parsing with single digit day of month. +* Handle MySQL (`mysql`) input plugin variations in the `user_statistics` collecting. +* Fix Minmax (`minmax`) and Basicstats (`basicstats`) aggregator plugins to use `uint64`. +* Document Swap (`swap`) input plugin. +* Fix incorrect precision being applied to metric in HTTP Listener (`http_listener`) input plugin. + +## v1.7 [2018-06-12] + +### Release notes + +- The Cassandra (`cassandra`) input plugin has been deprecated in favor of the Jolokia2 (`jolokia2`) + input plugin which is much more configurable and more performant. There is + an [example configuration](https://github.com/influxdata/telegraf/tree/release-1.8/plugins/inputs/jolokia2/examples) to help you + get started. + +- For plugins supporting TLS, you can now specify the certificate and keys + using `tls_ca`, `tls_cert`, `tls_key`. These options behave the same as + the, now deprecated, `ssl` forms. + +### New input plugins + +- [Aurora (`aurora`)](https://github.com/influxdata/telegraf/tree/release-1.8/plugins/inputs/aurora/README.md) - Contributed by @influxdata +- [Burrow (`burrow`) input plugin](https://github.com/influxdata/telegraf/tree/release-1.8/plugins/inputs/burrow/README.md) - Contributed by @arkady-emelyanov +- [`fibaro`](https://github.com/influxdata/telegraf/tree/release-1.8/plugins/inputs/fibaro/README.md) - Contributed by @dynek +- [`jti_openconfig_telemetry`](https://github.com/influxdata/telegraf/tree/release-1.8/plugins/inputs/jti_openconfig_telemetry/README.md) - Contributed by @ajhai +- [`mcrouter`](https://github.com/influxdata/telegraf/tree/release-1.8/plugins/inputs/mcrouter/README.md) - Contributed by @cthayer +- [NVIDIA SMI (`nvidia_smi`)](https://github.com/influxdata/telegraf/tree/release-1.8/plugins/inputs/nvidia_smi/README.md) - Contributed by @jackzampolin +- [Syslog (`syslog`)](https://github.com/influxdata/telegraf/tree/release-1.8/plugins/inputs/syslog/README.md) - Contributed by @influxdata + +### New processor plugins + +- [converter](https://github.com/influxdata/telegraf/tree/release-1.8/plugins/processors/converter/README.md) - Contributed by @influxdata +- [regex](https://github.com/influxdata/telegraf/tree/release-1.8/plugins/processors/regex/README.md) - Contributed by @44px +- [topk](https://github.com/influxdata/telegraf/tree/release-1.8/plugins/processors/topk/README.md) - Contributed by @mirath + +### New output plugins + +- [HTTP (`http`)](https://github.com/influxdata/telegraf/tree/release-1.8/plugins/outputs/http/README.md) - Contributed by @Dark0096 +- [Application Insights (`application_insights`) output plugin](https://github.com/influxdata/telegraf/tree/release-1.8/plugins/outputs/application_insights/README.md): Contribute by @karolz-ms + +### Features + +- Add `repl_oplog_window_sec` metric to MongoDB (`mongodb`) input plugin. +- Add per-host shard metrics in MongoDB (`mongodb`) input plugin. +- Skip files with leading `..` in config directory. +- Add TLS support to `socket_writer` and `socket_listener` plugins. +- Add `snmp` input option to strip non-fixed length index suffixes. +- Add server version tag to the Docker (`docker`) input plugin. +- Add support for LeoFS 1.4 to `leofs` input. +- Add parameter to force the interval of gather for Sysstat (`sysstat`). +- Support BusyBox ping in the Ping (`ping`) input plugin. +- Add Mcrouter (`mcrouter`) input plugin. +- Add TopK (`topk`) processor plugin. +- Add cursor metrics to MongoDB (`mongodb`) input plugin. +- Add tag/integer pair for result to Network Response (`net_response`) input plugin. +- Add Application Insights (`application_insights`) output plugin. +- Added several important Elasticsearch cluster health metrics. +- Add batch mode to `mqtt` output. +- Add Aurora (`aurora`) input plugin. +- Add Regex (`regex`) processor plugin. +- Add support for Graphite 1.1 tags. +- Add timeout option to Sensors (`sensors)` input plugin. +- Add Burrow (`burrow`) input plugin. +- Add option to Unbound (`unbound`) input plugin to use threads as tags. +- Add support for TLS and username/password auth to Aerospike (`aerospike`) input plugin. +- Add special syslog timestamp parser to grok parser that uses current year. +- Add Syslog (`syslog`) input plugin. +- Print the enabled aggregator and processor plugins on startup. +- Add static `routing_key` option to `amqp` output. +- Add passive mode exchange declaration option to AMQP Consumer (`amqp_consumer`) input plugin. +- Add counter fields to PF (`pf`) input plugin. + +### Bug fixes + +- Write to working file outputs if any files are not writeable. +- Add all win_perf_counters fields for a series in a single metric. +- Report results of `dns_query` instead of `0ms` on timeout. +- Add consul service tags to metric. +- Fix wildcards and multi instance processes in win_perf_counters. +- Fix crash on 32-bit Windows in `win_perf_counters`. +- Fix `win_perf_counters` not collecting at every interval. +- Use same flags for all BSD family ping variants. + + +## v1.6.4 [2018-06-05] + +### Bugfixes + +* Fix SNMP overriding of auto-configured table fields. +* Fix uint support in CloudWatch output. +* Fix documentation of `instance_name` option in Varnish input. +* Revert to previous Aerospike library version due to memory leak. + +## v1.6.3 [2018-05-21] + +### Bug fixes + +* Fix intermittent panic in Aerospike input plugin. +* Fix connection leak in the Jolokia agent (`Jolokia2_agent`) input plugin. +* Fix Jolokia agent (`Jolokia2_agent`) input plugin timeout parsing. +* Fix error parsing Dropwizard metrics. +* Fix Librato (`librato`) output plugin support for unsigned integer (`uint`) and Boolean (`bool`). +* Fix WaitGroup deadlock, if URL is incorrect, in Apache input plugin. + +## v1.6.2 [2018-05-08] + +### Bug fixes + +* Use same timestamp for fields in system input. +* Fix handling of uint64 in Datadog (`datadog`) output. +* Ignore UTF8 BOM in JSON parser. +* Fix case for slave metrics in MySQL (`mysql`) input. +* Fix uint support in CrateDB (`cratedb`) output. + + +## v1.6.1 [2018-04-23] + +### Bug fixes + +* Report mem input fields as gauges instead of counters. +* Fix Graphite outputs unsigned integers in wrong format. +* Report available fields if `utmp` is unreadable. +* Fix potential `no fields` error writing to outputs. +* Fix uptime reporting in system input when ran inside docker. +* Fix mem input `cannot allocate memory` error on FreeBSD-based systems. +* Fix duplicate tags when overriding an existing tag. +* Add server argument as first argument in the Unbound (`unbound`) input plugin. +* Fix handling of floats with multiple leading zeroes. +* Return errors in SSL/TLS configuration of MongoDB (`mongodb`) input plugin. + + +## v1.6 [2018-04-16] + +### Release notes + +- The MySQL (`mysql`) input plugin has been updated fix a number of type conversion + issues. This may cause a `field type error` when inserting into InfluxDB due + the change of types. + + To address this, we have introduced a new `metric_version` option to control + enabling the new format. + For in depth recommendations on upgrading, see [Metric version](https://github.com/influxdata/telegraf/tree/release-1.8/plugins/inputs/mysql#metric-version) in the MySQL input plugin documentation. + + You are encouraged to migrate to the new model when possible as the old version + is deprecated and will be removed in a future version. + +- The PostgreSQL (`postgresql`) input plugin now defaults to using a persistent connection to the database. + In environments where TCP connections are terminated, the `max_lifetime` + setting should be set less than the collection `interval` to prevent errors. + +- The SQL Server (`sqlserver`) input plugin has a new query and data model that can be enabled + by setting `query_version = 2`. + Migrate to the new model, if possible, since the old version is deprecated and will be removed in a future version. + +- The OpenLDAP (`openldap`) input plugin has a new option, `reverse_metric_names = true`, that reverses metric + names to improve grouping. + Enable this option, when possible, as the old ordering is deprecated. + +- The new HTTP (`http`) input plugin, when configured with `data_format = "json"`, can perform the + same task as the, now deprecated, HTTP JSON (`httpjson`) input plugin. + + +### New input plugins + +- [HTTP (`http`) input plugin](https://github.com/influxdata/telegraf/tree/release-1.8/plugins/inputs/http/README.md) - Thanks to @grange74 +- [Ipset (`ipset`) input plugin](https://github.com/influxdata/telegraf/tree/release-1.8/plugins/inputs/ipset/README.md) - Thanks to @sajoupa +- [NATS Server Monitoring (`nats`) input plugin](https://github.com/influxdata/telegraf/tree/release-1.8/plugins/inputs/nats/README.md) - Thanks to @mjs and @levex + +### New processor plugins + +- [Override (`override`) processor plugin](https://github.com/influxdata/telegraf/tree/release-1.8/plugins/processors/override/README.md) - Thanks to @KarstenSchnitter + +### New parsers + +- [Dropwizard input data format](https://github.com/influxdata/telegraf/blob/release-1.8/docs/DATA_FORMATS_INPUT.md#dropwizard) - Thanks to @atzoum + +### Features + +* Add health status mapping from `string` to `int` in Elasticsearch (`elasticsearch`) input plugin. +* Add control over which stats to gather in BasicStats (`basicstats`) aggregator plugin. +* Add `messages_delivered_get` to RabbitMQ (`rabbitmq`) input plugin. +* Add `wired` field to mem input plugin. +* Add support for gathering exchange metrics to the RabbitMQ (`rabbitmq`) input plugin. +* Add support for additional metrics on Linux in Zfs (`zfs`) input plugin. +* Add `available_entropy` field to Kernel (`kernel`) input plugin. +* Add user privilege level setting to IPMI sensors. +* Use persistent connection to PostgreSQL database. +* Add support for dropwizard input data format. +* Add container health metrics to Docker (`docker`) input plugin. +* Add support for using globs in devices list of DiskIO (`diskio`) input plugin. +* Allow running as console application on Windows. +* Add listener counts and node running status to RabbitMQ (`rabbitmq`) input plugin. +* Add NATS Server Monitoring (`nats`) input plugin. +* Add ability to select which queues will be gathered in RabbitMQ (`rabbitmq`) input plugin. +* Add support for setting BSD source address to the ping (`ping`) input plugin. +* Add Ipset (`ipset`) input plugin. +* Add TLS and HTTP basic auth to Prometheus Client (`prometheus_client`) output plugin. +* Add new sqlserver output data model. +* Add native Go method for finding `pid` to the Procstat (`procstat`) input plugin. +* Add additional metrics and reverse metric names option to OpenLDAP (`openldap`) input plugin. +* Add TLS support to the Mesos (`mesos`) input plugin. +* Add HTTP (`http`) input plugin. +* Add keep alive support to the TCP mode of StatsD (`statsd`) input plugin . +* Support deadline in Ping (`ping`) input plugin. +* Add option to disable labels in the Prometheus Client (`prometheus`) output plugin for string fields. +* Add shard server stats to the MongoDB (`mongodb`) input plugin. +* Add server option to Unbound (`unbound`) input plugin. +* Convert boolean metric values to float in Datadog (`datadog`) output plugin. +* Add Solr 3 compatibility. +* Add sum stat to BasicStats (`basicstats`) aggregator plugin. +* Add ability to override proxy from environment in HTTP Response (`http_response`) input plugin. +* Add host to ping timeout log message. +* Add override processor plugin. +* Add `status_code` and result tags and `result_type` field to HTTP Response (`http_response`) input plugin. +* Added config flag to skip collection of network protocol metrics. +* Add TLS support to Kapacitor (`kapacitor`) input plugin. +* Add HTTP basic auth support to the HTTP Listener (`http_listener`) input plugin. +* Tags in output InfluxDB Line Protocol are now sorted. +* InfluxDB Line Protocol parser now accepts DOS line endings. +* An option has been added to skip database creation in the InfluxDB (`influxdb`) output plugin. +* Add support for connecting to InfluxDB over a UNIX domain socket. +* Add optional unsigned integer support to the influx data format. +* Add TLS support to Zookeeper (`zookeeper`) input plugin. +* Add filters for container state to Docker (`docker`) input plugin. + +### Bug fixes + +* Fix various MySQL data type conversions. +* Fix metric buffer limit in internal plugin after reload. +* Fix panic in HTTP Response (`http_response`) input plugin on invalid regex. +* Fix socket_listener setting ReadBufferSize on TCP sockets. +* Add tag for target URL to `phpfpm` input plugin. +* Fix cannot unmarshal object error in Mesosphere DC/OS (`dcos`) input plugin. +* Fix InfluxDB output not able to reconnect when server address changes. +* Fix parsing of DOS line endings in the SMART (`smart`) input plugin. +* Fix precision truncation when no timestamp included. +* Fix SNMPv3 connection with Cisco ASA 5515 in SNMP (`snmp`) input plugin. + + +## v1.5.3 [2018-03-14] + +### Bug fixes + +* Set path to `/` if `HOST_MOUNT_PREFIX` matches full path. +* Remove `userinfo` from `url` tag in Prometheus input plugin. +* Fix Ping input plugin not reporting zero durations. +* Disable `keepalive` in MQTT output plugin to prevent deadlock. +* Fix collation difference in SQL Server (`sqlserver`) input plugin. +* Fix uptime metric in Passenger (`passenger`) input plugin. +* Add output of stderr in case of error to exec log message. + +## v1.5.2 [2018-01-30] + +### Bug fixes + +- Ignore empty lines in Graphite plaintext. +- Fix `index out of bounds` error in Solr input plugin. +- Reconnect before sending Graphite metrics if disconnected. +- Align aggregator period with internal ticker to avoid skipping metrics. +- Fix a potential deadlock when using aggregators. +- Limit wait time for writes in MQTT (`mqtt`) output plugin. +- Revert change in Graphite (`graphite`) output plugin where dot(`.`) in field key was replaced by underscore (`_`). +- Add `timeout` to Wavefront output write. +- Exclude `master_replid` fields from Redis input. + +## v1.5.1 [2017-01-10] + +### Bug fixes + +- Fix name error in jolokia2_agent sample config. +- Fix DC/OS input - login expiration time. +- Set Content-Type charset parameter in InfluxDB (`influxdb`) output plugin and allow it to be overridden. +- Document permissions setup for Postfix (`postfix`) input plugin. +- Fix `deliver_get` field in RabbitMQ (`rabbitmq`) input plugin. +- Escape environment variables during config TOML parsing. + +## v1.5 [2017-12-14] + +### New plugins + +#### Input plugins +- [Bond (bond)](https://github.com/influxdata/telegraf/tree/release-1.5/plugins/inputs/bond/README.md) - Thanks to @ildarsv +- [DC/OS (dcos)](https://github.com/influxdata/telegraf/tree/release-1.5/plugins/inputs/dcos/README.md) - Thanks to @influxdata +- [Jolokia2 (jolokia2)](https://github.com/influxdata/telegraf/tree/release-1.5/plugins/inputs/jolokia2/README.md) - Thanks to @dylanmei +- [NGINX Plus (nginx_plus)](https://github.com/influxdata/telegraf/tree/release-1.5/plugins/inputs/nginx_plus/README.md) - Thanks to @mplonka & @poblahblahblah +- [OpenSMTPD (opensmtpd)](https://github.com/influxdata/telegraf/tree/release-1.5/plugins/inputs/opensmtpd/README.md) - Thanks to @aromeyer +- [Particle.io Webhooks (particle)](https://github.com/influxdata/telegraf/tree/release-1.5/plugins/inputs/webhooks/particle/README.md) - Thanks to @davidgs +- [PF (pf)](https://github.com/influxdata/telegraf/tree/release-1.5/plugins/inputs/pf/README.md) - Thanks to @nferch +- [Postfix (postfix)](https://github.com/influxdata/telegraf/tree/release-1.5/plugins/inputs/postfix/README.md) - Thanks to @phemmer +- [SMART (smart)](https://github.com/influxdata/telegraf/tree/release-1.5/plugins/inputs/smart/README.md) - Thanks to @rickard-von-essen +- [Solr (solr)](https://github.com/influxdata/telegraf/tree/release-1.5/plugins/inputs/solr/README.md) - Thanks to @ljagiello +- [Teamspeak (teamspeak)](https://github.com/influxdata/telegraf/tree/release-1.5/plugins/inputs/teamspeak/README.md) - Thanks to @p4ddy1 +- [Unbound (unbound)](https://github.com/influxdata/telegraf/tree/release-1.5/plugins/inputs/unbound/README.md) - Thanks to @aromeyer + +#### Aggregator plugins +- [BasicStats (basicstats)](https://github.com/influxdata/telegraf/tree/release-1.5/plugins/aggregators/basicstats/README.md) - Thanks to @toni-moreno + +#### Output plugins +- [CrateDB (cratedb)](https://github.com/influxdata/telegraf/tree/release-1.8/plugins/outputs/cratedb) - Thanks to @felixge +- [Wavefront (wavefront)](https://github.com/influxdata/telegraf/tree/release-1.5/plugins/outputs/wavefront/README.md) - Thanks to @puckpuck + + +### Release notes + +- In the Kinesis (`kinesis`) output plugin, use of the `partition_key` and + `use_random_partitionkey` options has been deprecated in favor of the + `partition` subtable. This allows for more flexible methods to set the + partition key such as by metric name or by tag. + +- With the release of the new improved Jolokia2 (`jolokia2`) input plugin, the legacy `jolokia` + plugin is deprecated and will be removed in a future release. Users of this + plugin are encouraged to update to the new `jolokia2` plugin. + +### Features + +- Add support for sharding based on metric name. +- Add Kafka output plugin `topic_suffix` option. +- Include mount mode option in disk metrics. +- TLS and MTLS enhancements to HTTP Listener input plugin. +- Add polling method to logparser and tail inputs. +- Add timeout option for Kubernetes (`kubernetes`) input plugin. +- Add support for timing sums in statsd input plugin. +- Add resource limit monitoring to Procstat (`procstat`) input plugin. +- Add support for k8s service DNS discovery to Prometheus Client (`prometheus`) input plugin. +- Add configurable metrics endpoint to (`prometheus`) output plugin. +- Add support for NSQLookupd to `nsq_consumer`. +- Add configurable separator for metrics and fields in OpenTSDB (`opentsdb`) output plugin. +- Add support for the rollbar occurrence webhook event. +- Add extra wired tiger cache metrics to `mongodb` input. +- Collect Docker Swarm service metrics in Docker (`docker`) input plugin. +- Add cluster health level configuration to Elasticsearch (`elasticsearch`) input plugin. +- Add ability to limit node stats in Elasticsearch (`elasticsearch`) input plugin. +- Add UDP IPv6 support to StatsD (`statsd`) input plugin. +- Use labels in Prometheus Client (`prometheus`) output plugin for string fields. +- Add support for decimal timestamps to ts-epoch modifier. +- Add histogram and summary types and use in Prometheus (`prometheus`) plugins. +- Gather concurrently from snmp agents. +- Perform DNS lookup before ping and report result. +- Add instance name option to Varnish (`varnish`) plugin. +- Add support for SSL settings to ElasticSearch (`elasticsearch`) output plugin. +- Add modification_time field to Filestat (`filestat`) input plugin. +- Add systemd unit pid and cgroup matching to Procstat (`procstat`) . +- Use MAX() instead of SUM() for latency measurements in SQL Server (`sqlserver`) input plugin. +- Add index by week number to Elasticsearch (`elasticsearch`) output plugin. +- Add support for tags in the index name in Elasticsearch (`elasticsearch`) output plugin. +- Add slab to mem plugin. +- Add support for glob patterns in net input plugin. +- Add option to AMQP (`amqp`) output plugin to publish persistent messages. +- Support I (idle) process state on procfs+Linux. + +### Bug fixes + +- Fix webhooks input address in use during reload. +- Unlock Statsd when stopping to prevent deadlock. +- Fix cloudwatch output requires unneeded permissions. +- Fix prometheus passthrough for existing value types. +- Always ignore autofs filesystems in disk input. +- Fail metrics parsing on unescaped quotes. +- Whitelist allowed char classes for graphite output. +- Use hexadecimal ids and lowercase names in zipkin input. +- Fix snmp-tools output parsing with Windows EOLs. +- Add shadow-utils dependency to rpm package. +- Use deb-systemd-invoke to restart service. +- Fix kafka_consumer outside range of offsets error. +- Fix separation of multiple prometheus_client outputs. +- Don't add system input uptime_format as a counter. + +## v1.4.5 [2017-12-01] + +### Bug fixes + +- Fix global variable collection when using interval_slow option in MySQL input. +- Fix error getting net connections info in netstat input. +- Fix HOST_MOUNT_PREFIX in Docker with disk input. + +## v1.4.4 [2017-11-08] + +### Bug fixes +- Use schema specified in mqtt_consumer input. +- Redact Datadog API key in log output. +- Fix error getting PIDs in netstat input. +- Support HOST_VAR envvar to locate /var in system input. +- Use current time if Docker container read time is zero value. + +## v1.4.3 [2017-10-25] + +### Bug fixes + +- Fix container name filters in Docker input. +- Fix snmpwalk address format in leofs input. +- Fix case sensitivity issue in SQL Server query. +- Fix CPU input plugin stuck after suspend on Linux. +- Fix MongoDB input panic when restarting MongoDB. +- Preserve URL path prefix in InfluxDB output. +- Fix TELEGRAF_OPTS expansion in systemd service unit. +- Remove warning when JSON contains null value. +- Fix ACL token usage in consul input plugin. +- Fix unquoting error with Tomcat 6. +- Fix syscall panic in diskio on some Linux systems. + +## v1.4.2 [2017-10-10] + +### Bug fixes + +- Fix error if int larger than 32-bit in `/proc/vmstat`. +- Fix parsing of JSON with a UTF8 BOM in `httpjson`. +- Allow JSON data format to contain zero metrics. +- Fix format of connection_timeout in `mqtt_consumer`. +- Fix case sensitivity error in SQL Server input. +- Add support for proxy environment variables to `http_response`. +- Add support for standard proxy env vars in outputs. +- Fix panic in CPU input if number of CPUs changes. +- Use chunked transfer encoding in InfluxDB output. + +## v1.4.1 [2017-09-26] + +### Bug fixes + +- Fix MQTT input exits if Broker is not available on startup. +- Fix optional field value conversions in fluentd input. +- Whitelist allowed char classes for opentsdb output. +- Fix counter and gauge metric types. +- Fix skipped line with empty target in iptables. +- Fix duplicate keys in perf counters sqlserver query. +- Fix panic in statsd p100 calculation. +- Fix arm64 packages contain 32-bit executable. + +## v1.4.0 [2017-09-05] + +### Release Notes + +- The `kafka_consumer` input has been updated to support Kafka 0.9 and + above style consumer offset handling. The previous version of this plugin + supporting Kafka 0.8 and below is available as the `kafka_consumer_legacy` + plugin. +- In the `aerospike` input the `node_name` field has been changed to be a tag + for both the `aerospike_node` and `aerospike_namespace` measurements. +- The default prometheus_client port has been changed to 9273. + +### New plugins + +- fail2ban +- fluentd +- histogram +- minecraft +- openldap +- salesforce +- tomcat +- win_services +- zipkin + +### Features + +- Add Kafka 0.9+ consumer support. +- Add support for self-signed certs to InfluxDB input plugin. +- Add TCP listener for statsd input. +- Add Docker container environment variables as tags. Only whitelisted. +- Add timeout option to IPMI sensor plugin. +- Add support for an optional SSL/TLS configuration to Nginx input plugin. +- Add timezone support for logparser timestamps. +- Add result_type field for http_response input. +- Add include/exclude filters for docker containers. +- Add secure connection support to graphite output. +- Add min/max response time on linux/darwin to ping. +- Add HTTP Proxy support to influxdb output. +- Add standard SSL options to mysql input. +- Add input plugin for fail2ban. +- Support HOST_PROC in processes and linux_sysctl_fs inputs. +- Add Minecraft input plugin. +- Add support for RethinkDB 1.0 handshake protocol. +- Add optional usage_active and time_active CPU metrics. +- Change default prometheus_client port. +- Add fluentd input plugin. +- Add result_type field to net_response input plugin. +- Add read timeout to socket_listener. +- Add input plugin for OpenLDAP. +- Add network option to dns_query. +- Add redis_version field to redis input. +- Add tls options to docker input. +- Add histogram aggregator plugin. +- Add Zipkin input plugin. +- Add Windows Services input plugin. +- Add path tag to logparser containing path of logfile. +- Add Salesforce input plugin. +- Add option to run varnish under sudo. +- Add weighted_io_time to diskio input. +- Add gzip content-encoding support to influxdb output. +- Allow using system plugin in Windows. +- Add Tomcat input plugin. +- HTTP headers can be added to InfluxDB output. + +### Bug fixes + +- Improve logging of errors in Cassandra input. +- [enh] set db_version at 0 if query version fails. +- Fixed SQL Server input to work with case sensitive server collation. +- Systemd does not see all shutdowns as failures. +- Reuse transports in input plugins. +- Inputs processes fails with "no such process". +- Fix multiple plugin loading in win_perf_counters. +- MySQL input: log and continue on field parse error. +- Fix timeout option in Windows ping input sample configuration. +- Fix Kinesis output plugin in govcloud. +- Fix Aerospike input adds all nodes to a single series. +- Improve Prometheus Client output documentation. +- Display error message if prometheus output fails to listen. +- Fix elasticsearch output content type detection warning. +- Prevent possible deadlock when using aggregators. +- Fix combined tagdrop/tagpass filtering. +- Fix filtering when both pass and drop match an item. +- Only report cpu usage for online cpus in docker input. +- Start first aggregator period at startup time. +- Fix panic in logparser if file cannot be opened. +- Default to localhost if zookeeper has no servers set. +- Fix docker memory and cpu reporting in Windows. +- Allow iptable entries with trailing text. +- Sanitize password from couchbase metric. +- Converge to typed value in prometheus output. +- Skip compilcation of logparser and tail on solaris. +- Discard logging from tail library. +- Remove log message on ping timeout. +- Don't retry points beyond retention policy. +- Don't start Telegraf on install in Amazon Linux. +- Enable hddtemp input on all platforms. +- Escape backslash within string fields. +- Fix parsing of SHM remotes in ntpq input +- Don't fail parsing zpool stats if pool health is UNAVAIL on FreeBSD. +- Fix NSQ input plugin when used with version 1.0.0-compat. +- Added CloudWatch metric constraint validation. +- Skip non-numerical values in graphite format. +- Fix panic when handling string fields with escapes. + +## v1.3.5 [2017-07-26] + +### Bug fixes + +- Fix prometheus output cannot be reloaded. +- Fix filestat reporting exists when cannot list directory. +- Fix ntpq parse issue when using dns_lookup. +- Fix panic when agent.interval = "0s". + +## v1.3.4 [2017-07-12] + +### Bug fixes + +- Fix handling of escape characters within fields. +- Fix chrony plugin does not track system time offset. +- Do not allow metrics with trailing slashes. +- Prevent Write from being called concurrently. + +## v1.3.3 [2017-06-28] + +### Bug fixes + +- Allow dos line endings in tail and logparser. +- Remove label value sanitization in prometheus output. +- Fix bug parsing default timestamps with modified precision. +- Fix panic in elasticsearch input if cannot determine master. + +## v1.3.2 [2017-06-14] + +### Bug fixes + +- Fix InfluxDB UDP metric splitting. +- Fix mongodb/leofs urls without scheme. +- Fix inconsistent label dimensions in prometheus output. + +## v1.3.1 [2017-05-31] + +### Bug fixes + +- Fixed sqlserver input to work with case-sensitive server collation. +- Reuse transports in input plugins. +- Process input fails with `no such process`. +- Fix InfluxDB output database quoting. +- Fix net input on older Linux kernels. +- Fix panic in mongo input. +- Fix length calculation of split metric buffer. + +## v1.3.0 [2017-05-09] + +#### Changes to the Windows ping plugin + +Users of the windows [ping plugin](https://github.com/influxdata/telegraf/tree/release-1.8/plugins/inputs/ping) will need to drop or migrate their measurements to continue using the plugin. +The reason for this is that the windows plugin was outputting a different type than the linux plugin. +This made it impossible to use the `ping` plugin for both windows and linux machines. + +#### Changes to the Ceph plugin + +For the [Ceph plugin](https://github.com/influxdata/telegraf/tree/release-1.8/plugins/inputs/ceph), the `ceph_pgmap_state` metric content has been modified to use a unique field `count`, with each state expressed as a `state` tag. + +Telegraf < 1.3: + +``` +# field_name value +active+clean 123 +active+clean+scrubbing 3 +``` + +Telegraf >= 1.3: + +``` +# field_name value tag +count 123 state=active+clean +count 3 state=active+clean+scrubbing +``` + +#### Rewritten Riemann plugin + +The [Riemann output plugin](https://github.com/influxdata/telegraf/tree/release-1.8/plugins/outputs/riemann) has been rewritten +and the [previous riemann plugin](https://github.com/influxdata/telegraf/tree/release-1.8/plugins/outputs/riemann_legacy) is _incompatible_ with the new one. +The reasons for this are outlined in issue [#1878](https://github.com/influxdata/telegraf/issues/1878). +The previous Riemann output will still be available using `outputs.riemann_legacy` if needed, but that will eventually be deprecated. +It is highly recommended that all users migrate to the new Riemann output plugin. + +#### New Socket Listener and Socket Writer plugins + +Generic [Socket Listener](https://github.com/influxdata/telegraf/tree/release-1.8/plugins/inputs/socket_listener) and [Socket Writer](https://github.com/influxdata/telegraf/tree/release-1.8/plugins/outputs/socket_writer) plugins have been implemented for receiving and sending UDP, TCP, unix, & unix-datagram data. +These plugins will replace [udp_listener](https://github.com/influxdata/telegraf/tree/release-1.8/plugins/inputs/udp_listener) and [tcp_listener](https://github.com/influxdata/telegraf/tree/release-1.8/plugins/inputs/tcp_listener), which are still available but will be deprecated eventually. + +### Features + +- Add SASL options for the [Kafka output plugin](https://github.com/influxdata/telegraf/tree/release-1.8/plugins/outputs/kafka). +- Add SSL configuration for [HAproxy input plugin](https://github.com/influxdata/telegraf/tree/release-1.8/plugins/inputs/haproxy). +- Add the [Interrupts input plugin](https://github.com/influxdata/telegraf/tree/release-1.8/plugins/inputs/interrupts). +- Add generic [Socket Listener input plugin](https://github.com/influxdata/telegraf/tree/release-1.8/plugins/inputs/socket_listener) and [socket writer output plugin](https://github.com/influxdata/telegraf/tree/release-1.8/plugins/outputs/socket_writer). +- Extend the [HTTP Response input plugin](https://github.com/influxdata/telegraf/tree/release-1.8/plugins/inputs/http_response) to support searching for a substring in response. Return 1 if found, else 0. +- Add userstats to the [MySQL input plugin](https://github.com/influxdata/telegraf/tree/release-1.8/plugins/inputs/mysql). +- Add more InnoDB metric to the [MySQL input plugin](https://github.com/influxdata/telegraf/tree/release-1.8/plugins/inputs/mysql). +- For the [Ceph input plugin](https://github.com/influxdata/telegraf/tree/release-1.8/plugins/inputs/ceph), `ceph_pgmap_state` metric now uses a single field `count`, with PG state published as `state` tag. +- Use own client for improved through-put and less allocations in the [InfluxDB output plugin](https://github.com/influxdata/telegraf/tree/release-1.8/plugins/outputs/influxdb). +- Keep -config-directory when running as Windows service. +- Rewrite the [Riemann output plugin](https://github.com/influxdata/telegraf/tree/release-1.8/plugins/outputs/riemann). +- Add support for name templates and udev tags to the [DiskIO input plugin](https://github.com/influxdata/telegraf/blob/release-1.8/plugins/inputs/system/DISK_README.md#diskio-input-plugin). +- Add integer metrics for [Consul](https://github.com/influxdata/telegraf/tree/release-1.8/plugins/inputs/consul) check health state. +- Add lock option to the [IPtables input plugin](https://github.com/influxdata/telegraf/tree/release-1.8/plugins/inputs/iptables). +- Support [ipmi_sensor input plugin](https://github.com/influxdata/telegraf/tree/release-1.8/plugins/inputs/ipmi_sensor) querying local ipmi sensors. +- Increment gather_errors for all errors emitted by inputs. +- Use the official docker SDK. +- Add [AMQP consumer input plugin](https://github.com/influxdata/telegraf/tree/release-1.8/plugins/inputs/amqp_consumer). +- Add pprof tool. +- Support DEAD(X) state in the [system input plugin](https://github.com/influxdata/telegraf/tree/release-1.8/plugins/inputs/system). +- Add support for [MongoDB](https://github.com/influxdata/telegraf/tree/release-1.8/plugins/inputs/mongodb) client certificates. +- Support adding [SNMP](https://github.com/influxdata/telegraf/tree/release-1.8/plugins/inputs/snmp) table indexes as tags. +- Add [Elasticsearch 5.x output plugin](https://github.com/influxdata/telegraf/tree/release-1.8/plugins/outputs/elasticsearch). +- Add json timestamp units configurability. +- Add support for Linux sysctl-fs metrics. +- Support to include/exclude docker container labels as tags. +- Add [DMCache input plugin](https://github.com/influxdata/telegraf/tree/release-1.8/plugins/inputs/dmcache). +- Add support for precision in [HTTP Listener input plugin](https://github.com/influxdata/telegraf/tree/release-1.8/plugins/inputs/http_listener). +- Add `message_len_max` option to the [Kafka consumer input plugin](https://github.com/influxdata/telegraf/tree/release-1.8/plugins/inputs/kafka_consumer). +- Add [collectd parser](/telegraf/v1.3/concepts/data_formats_input/#collectd). +- Simplify plugin testing without outputs. +- Check signature in the [GitHub webhook input plugin](https://github.com/influxdata/telegraf/tree/release-1.8/plugins/inputs/webhooks/github). +- Add [papertrail](https://github.com/influxdata/telegraf/tree/release-1.8/plugins/inputs/webhooks/papertrail) support to webhooks. +- Change [jolokia input plugin](https://github.com/influxdata/telegraf/tree/release-1.8/plugins/inputs/jolokia) to use bulk requests. +- Add [DiskIO input plugin](https://github.com/influxdata/telegraf/blob/release-1.8/plugins/inputs/system/DISK_README.md#diskio-input-plugin) for Darwin. +- Add use_random_partitionkey option to the [Kinesis output plugin](https://github.com/influxdata/telegraf/tree/release-1.8/plugins/outputs/kinesis). +- Add tcp keep-alive to [Socket Listener input plugin](https://github.com/influxdata/telegraf/tree/release-1.8/plugins/inputs/socket_listener) and [Socket Writer output plugin](https://github.com/influxdata/telegraf/tree/release-1.8/plugins/outputs/socket_writer). +- Add [Kapacitor input plugin](https://github.com/influxdata/telegraf/tree/release-1.8/plugins/inputs/kapacitor). +- Use Go (golang) 1.8.1. +- Add documentation for the [RabbitMQ input plugin](https://github.com/influxdata/telegraf/tree/release-1.8/plugins/inputs/rabbitmq). +- Make the [Logparser input plugin](https://github.com/influxdata/telegraf/tree/release-1.8/plugins/inputs/logparser) check for newly-created files. + +### Bugfixes + +- Allow `@` symbol in password for the ipmi_sensor plugin. +- Fix arithmetic overflow error converting numeric to data type int in SQL Server input. +- Flush jitter can inhibit metric collection. +- Add missing fields for HAproxy input. +- Handle null startTime for stopped pods for the Kubernetes input. +- Fix cpu input panic when /proc/stat is empty. +- Fix telegraf swallowing panics in --test mode. +- Create pidfile with 644 permissions & defer file deletion. +- Fix install/remove of telegraf on non-systemd Debian/Ubuntu systems. +- Fix for reloading telegraf freezes prometheus output. +- Fix when empty tag value causes error on InfluxDB output. +- buffer_size field value is negative number from "internal" plugin. +- Missing error handling in the MySQL plugin leads to segmentation violation. +- Fix type conflict in windows ping plugin. +- logparser: regexp with lookahead. +- Telegraf can crash in LoadDirectory on 0600 files. +- Iptables input: document better that rules without a comment are ignored. +- Fix win_perf_counters capping values at 100. +- Exporting Ipmi.Path to be set by config. +- Remove warning if parse empty content. +- Update default value for Cloudwatch rate limit. +- create /etc/telegraf/telegraf.d directory in tarball. +- Return error on unsupported serializer data format. +- Fix Windows Performance Counters multi instance identifier. +- Add write timeout to Riemann output. +- fix timestamp parsing on prometheus plugin. +- Fix deadlock when output cannot write. +- Fix connection leak in postgresql. +- Set default measurement name for snmp input. +- Improve performance of diskio with many disks. +- The internal input plugin uses the wrong units for `heap_objects`. +- Fix ipmi_sensor config is shared between all plugin instances. +- Network statistics not collected when system has alias interfaces. +- Sysstat plugin needs LANG=C or similar locale. +- File output closes standard streams on reload. +- AMQP output disconnect blocks all outputs. +- Improve documentation for redis input plugin. + +## v1.2.1 [2017-02-01] + +### Bugfixes + +- Fix segfault on nil metrics with InfluxDB output. +- Fix negative number handling. + +### Features + +- Go (golang) version update 1.7.4 -> 1.7.5 + +## v1.2 [2017-01-24] + +### Release Notes + +- The StatsD plugin will now default all "delete_" config options to "true". This +will change te default behavior for users who were not specifying these parameters +in their config file. + +- The StatsD plugin will also no longer save it's state on a service reload. +Essentially we have reverted PR [#887](https://github.com/influxdata/telegraf/pull/887). +The reason for this is that saving the state in a global variable is not +thread-safe (see [#1975](https://github.com/influxdata/telegraf/issues/1975) & [#2102](https://github.com/influxdata/telegraf/issues/2102)), +and this creates issues if users want to define multiple instances +of the statsd plugin. Saving state on reload may be considered in the future, +but this would need to be implemented at a higher level and applied to all +plugins, not just statsd. + +### Features + +- Fix improper calculation of CPU percentages +- Use RFC3339 timestamps in log output. +- Non-default HTTP timeouts for RabbitMQ plugin. +- "Discard" output plugin added, primarily for testing purposes. +- The JSON parser can now parse an array of objects using the same configuration. +- Option to use device name rather than path for reporting disk stats. +- Telegraf "internal" plugin for collecting stats on itself. +- Update GoLang version to 1.7.4. +- Support a metric.Split function. +- Elasticsearch "shield" (basic auth) support doc. +- Fix over-querying of cloudwatch metrics +- OpenTSDB basic auth support. +- RabbitMQ Connection metrics. +- HAProxy session limit metric. +- Accept strings for StatsD sets. +- Change StatsD default "reset" behavior. +- Enable setting ClientID in MQTT output. +- MongoDB input plugin: Improve state data. +- Ping input: add standard deviation field. +- Add GC pause metric to InfluxDB input plugin. +- Added response_timeout property to prometheus input plugin. +- Pulling github.com/lxn/win's pdh wrapper into Telegraf. +- Support negative statsd counters. +- Elasticsearch cluster stats support. +- Change Amazon Kinesis output plugin to use the built-in serializer plugins. +- Hide username/password from elasticsearch error log messages. +- Configurable HTTP timeouts in Jolokia plugin. +- Allow changing jolokia attribute delimiter. + +### Bugfixes + +- Fix the Value data format not trimming null characters from input. +- Fix windows `.net` plugin. +- Cache & expire metrics for delivery to prometheus +- Fix potential panic in aggregator plugin metric maker. +- Add optional ability to define PID as a tag. +- Fix win_perf_counters not gathering non-English counters. +- Fix panic when file stat info cannot be collected due to permissions or other issue(s). +- Graylog output should set short_message field. +- Hddtemp always put the value in the field temperature. +- Properly collect nested jolokia struct data. +- Fix puppetagent inputs plugin to support string for config variable. +- Fix docker input plugin tags when registry has port. +- Fix tail input when reading from a pipe. +- MongoDB plugin always shows 0 replication lag. +- Consul plugin: add check_id as a tag in metrics to avoid overwrites. +- Partial fix: logparser CLF pattern with IPv6 addresses. +- Fix thread-safety when using multiple instances of the statsd input plugin. +- Docker input: interface conversion panic fix. +- SNMP: ensure proper context is present on error messages. +- OpenTSDB: add tcp:// prefix if no scheme provided. +- Influx parser: parse line-protocol without newlines. +- InfluxDB output: fix field type conflict blocking output buffer. + +## v1.1.2 [2016-12-12] + +### Bugfixes + +- Make snmptranslate not required when using numeric OID. +- Add a global snmp translation cache. + +## v1.1.1 [2016-11-14] + +### Bugfixes + +- Fix issue parsing toml durations with single quotes. + +## v1.1.0 [2016-11-07] + +### Release Notes + +- Telegraf now supports two new types of plugins: processors & aggregators. + +- On systemd Telegraf will no longer redirect it's stdout to /var/log/telegraf/telegraf.log. +On most systems, the logs will be directed to the systemd journal and can be +accessed by `journalctl -u telegraf.service`. Consult the systemd journal +documentation for configuring journald. There is also a [`logfile` config option](https://github.com/influxdata/telegraf/blob/release-1.8/etc/telegraf.conf#L70) +available in 1.1, which will allow users to easily configure telegraf to +continue sending logs to /var/log/telegraf/telegraf.log. + +### Features + +- Processor & Aggregator plugin support. +- Adding the tags in the graylog output plugin. +- Telegraf systemd service, log to journal. +- Allow numeric and non-string values for tag_keys. +- Adding Gauge and Counter metric types. +- Remove carraige returns from exec plugin output on Windows +- Elasticsearch input: configurable timeout. +- Massage metric names in Instrumental output plugin +- Apache Mesos improvements. +- Add Ceph Cluster Performance Statistics +- Ability to configure response_timeout in httpjson input. +- Add additional redis metrics. +- Added capability to send metrics through HTTP API for OpenTSDB. +- iptables input plugin. +- Add filestack webhook plugin. +- Add server hostname for each Docker measurements. +- Add NATS output plugin. +- HTTP service listener input plugin. +- Add database blacklist option for Postgresql +- Add Docker container state metrics to Docker input plugin output +- Add support to SNMP for IP & MAC address conversion. +- Add support to SNMP for OID index suffixes. +- Change default arguments for SNMP plugin. +- Apach Mesos input plugin: very high-cardinality mesos-task metrics removed. +- Logging overhaul to centralize the logger & log levels, & provide a logfile config option. +- HAProxy plugin socket glob matching. +- Add Kubernetes plugin for retrieving pod metrics. + +### Bugfixes + +- Fix NATS plug-ins reconnection logic. +- Set required default values in udp_listener & tcp_listener. +- Fix toml unmarshal panic in Duration objects. +- Fix handling of non-string values for JSON keys listed in tag_keys. +- Fix mongodb input panic on version 2.2. +- Fix statsd scientific notation parsing. +- Sensors plugin strconv.ParseFloat: parsing "": invalid syntax. +- Fix prometheus_client reload panic. +- Fix Apache Kafka consumer panic when nil error is returned down errs channel. +- Speed up statsd parsing. +- Fix powerdns integer parse error handling. +- Fix varnish plugin defaults not being used. +- Fix Windows glob paths. +- Fix issue loading config directory on Windows. +- Windows remote management interactive service fix. +- SQLServer, fix issue when case sensitive collation is activated. +- Fix huge allocations in http_listener when dealing with huge payloads. +- Fix translating SNMP fields not in MIB. +- Fix SNMP emitting empty fields. +- SQL Server waitstats truncation bug. +- Fix logparser common log format: numbers in ident. +- Fix JSON Serialization in OpenTSDB output. +- Fix Graphite template ordering, use most specific. +- Fix snmp table field initialization for non-automatic table. +- cgroups path being parsed as metric. +- Fix phpfpm fcgi client panic when URL does not exist. +- Fix config file parse error logging. +- Delete nil fields in the metric maker. +- Fix MySQL special characters in DSN parsing. +- Ping input odd timeout behavior. +- Switch to github.com/kballard/go-shellquote. + +## v1.0.1 [2016-09-26] + +### Bugfixes + +- Prometheus output: Fix bug with multi-batch writes. +- Fix unmarshal of influxdb metrics with null tags. +- Add configurable timeout to influxdb input plugin. +- Fix statsd no default value panic. + +## v1.0 [2016-09-08] + +### Release Notes + +**Breaking Change** The SNMP plugin is being deprecated in it's current form. +There is a [new SNMP plugin](https://github.com/influxdata/telegraf/tree/release-1.8/plugins/inputs/snmp) +which fixes many of the issues and confusions +of its predecessor. For users wanting to continue to use the deprecated SNMP +plugin, you will need to change your config file from `[[inputs.snmp]]` to +`[[inputs.snmp_legacy]]`. The configuration of the new SNMP plugin is _not_ +backwards-compatible. + +**Breaking Change**: Aerospike main server node measurements have been renamed +aerospike_node. Aerospike namespace measurements have been renamed to +aerospike_namespace. They will also now be tagged with the node_name +that they correspond to. This has been done to differentiate measurements +that pertain to node vs. namespace statistics. + +**Breaking Change**: users of github_webhooks must change to the new +`[[inputs.webhooks]]` plugin. + +This means that the default github_webhooks config: + +``` +# A Github Webhook Event collector +[[inputs.github_webhooks]] + ## Address and port to host Webhook listener on + service_address = ":1618" +``` + +should now look like: + +``` +# A Webhooks Event collector +[[inputs.webhooks]] + ## Address and port to host Webhook listener on + service_address = ":1618" + + [inputs.webhooks.github] + path = "/" +``` + +- Telegraf now supports being installed as an official windows service, +which can be installed via +`> C:\Program Files\Telegraf\telegraf.exe --service install` + +- `flush_jitter` behavior has been changed. The random jitter will now be +evaluated at every flush interval, rather than once at startup. This makes it +consistent with the behavior of `collection_jitter`. + +- PostgresSQL plugins now handle oid and name typed columns seamlessly, previously they were ignored/skipped. + +### Features + +- postgresql_extensible now handles name and oid types correctly. +- Separate container_version from container_image tag. +- Support setting per-device and total metrics for Docker network and blockio. +- MongoDB input plugin: adding per DB stats from db.stats() +- Add tls support for certs to RabbitMQ input plugin. +- Webhooks input plugin. +- Rollbar webhook plugin. +- Mandrill webhook plugin. +- docker-machine/boot2docker no longer required for unit tests. +- cgroup input plugin. +- Add input plugin for consuming metrics from NSQD. +- Add ability to read Redis from a socket. +- **Breaking Change** - Redis `role` tag renamed to `replication_role` to avoid global_tags override. +- Fetching Galera status metrics in MySQL +- Aerospike plugin refactored to use official client library. +- Add measurement name arg to logparser plugin. +- logparser: change resp_code from a field to a tag. +- Implement support for fetching hddtemp data +- statsd: do not log every dropped metric. +- Add precision rounding to all metrics on collection. +- Add support for Tengine. +- Logparser input plugin for parsing grok-style log patterns. +- ElasticSearch: now supports connecting to ElasticSearch via SSL. +- Add graylog input pluging. +- Consul input plugin. +- conntrack input plugin. +- vmstat input plugin. +- Standardized AWS credentials evaluation & wildcard CloudWatch dimensions. +- Add SSL config options to http_response plugin. +- Graphite parser: add ability to specify multiple tag keys, for consistency with influxdb parser. +- Make DNS lookups for chrony configurable. +- Allow wildcard filtering of varnish stats. +- Support for glob patterns in exec plugin commands configuration. +- RabbitMQ input: made url parameter optional by using DefaultURL (`http://localhost:15672`) if not specified. +- Limit AWS GetMetricStatistics requests to 10 per second. +- RabbitMQ/Apache/InfluxDB inputs: made url(s) parameter optional by using reasonable input defaults if not specified. +- Refactor of flush_jitter argument. +- Add inactive & active memory to mem plugin. +- Official Windows service. +- Forking sensors command to remove C package dependency. +- Add a new SNMP plugin. + +### Bugfixes + +- Fix `make windows` build target. +- Fix error race conditions and partial failures. +- nstat: fix inaccurate config panic. +- jolokia: fix handling multiple multi-dimensional attributes. +- Fix prometheus character sanitizing. Sanitize more win_perf_counters characters. +- Add diskio io_time to FreeBSD & report timing metrics as ms (as linux does). +- Fix covering Amazon Linux for post remove flow. +- procstat missing fields: read/write bytes & count. +- diskio input plugin: set 'skip_serial_number = true' by default to avoid high cardinality. +- nil metrics panic fix. +- Fix datarace in apache input plugin. +- Add `read_repairs` statistics to riak plugin. +- Fix memory/connection leak in Prometheus input plugin. +- Trim BOM from config file for Windows support. +- Prometheus client output panic on service reload. +- Prometheus parser, protobuf format header fix. +- Prometheus output, metric refresh and caching fixes. +- Panic fix for multiple graphite outputs under very high load. +- Instrumental output has better reconnect behavior. +- Remove PID from procstat plugin to fix cardinality issues. +- Cassandra input: version 2.x "column family" fix. +- Shared WaitGroup in Exec plugin. +- logparser: honor modifiers in "pattern" config. +- logparser: error and exit on file permissions/missing errors. +- Make the user able to specify full path for HAproxy stats. +- Fix Redis url, an extra "tcp://" was added. +- Fix exec plugin panic when using single binary. +- Fixed incorrect prometheus metrics source selection. +- Set default Zookeeper chroot to empty string. +- Fix overall ping timeout to be calculated based on per-ping timeout. +- Change "default" retention policy to "". +- Graphite output mangling '%' character. +- Prometheus input plugin now supports x509 certs authentication. +- Fix systemd service. +- Fix influxdb n_shards counter. +- Fix potential kernel plugin integer parse error. +- Fix potential influxdb input type assertion panic. +- Still send processes metrics if a process exited during metric collection. +- disk plugin panic when usage grab fails. +- Removed leaked "database" tag on redis metrics. +- Processes plugin: fix potential error with /proc/net/stat directory. +- Fix rare RHEL 5.2 panic in gopsutil diskio gathering function. +- Remove IF NOT EXISTS from influxdb output database creation. +- Fix quoting with text values in postgresql_extensible plugin. +- Fix win_perf_counter "index out of range" panic. +- Fix ntpq panic when field is missing. +- Sanitize graphite output field names. +- Fix MySQL plugin not sending 0 value fields. diff --git a/content/telegraf/v1.9/administration/_index.md b/content/telegraf/v1.9/administration/_index.md new file mode 100644 index 000000000..b9ffa9cd3 --- /dev/null +++ b/content/telegraf/v1.9/administration/_index.md @@ -0,0 +1,21 @@ +--- + title: Administering Telegraf + + menu: + telegraf_1_9: + name: Administration + weight: 60 + +--- + +## [Configuring Telegraf](/telegraf/v1.9/administration/configuration/) + +[Configuring Telegraf](/telegraf/v1.9/administration/configuration/) discusses the Telegraf configuration file, enabling plugins, and setting environment variables. + +## [Running Telegraf as a Windows service](/telegraf/v1.9/administration/windows_service/) + +[Running Telegraf as a Windows service](/telegraf/v1.9/administration/windows_service/) describes how to use Telegraf as a Windows service. + +## [Troubleshooting Telegraf](/telegraf/v1.9/administration/troubleshooting/) + +[Troubleshooting Telegraf](/telegraf/v1.9/administration/troubleshooting/) shows you how to capture Telegraf output, submit sample metrics, and see how Telegraf formats and emits points to its output plugins. diff --git a/content/telegraf/v1.9/administration/configuration.md b/content/telegraf/v1.9/administration/configuration.md new file mode 100644 index 000000000..ce3c6836c --- /dev/null +++ b/content/telegraf/v1.9/administration/configuration.md @@ -0,0 +1,383 @@ +--- +title: Configuring Telegraf + +menu: + telegraf_1_9: + name: Configuring + weight: 20 + parent: Administration +--- + +The Telegraf configuration file (`telegraf.conf`) lists all of the available plugins. The current version is available here: +[telegraf.conf](https://github.com/influxdata/telegraf/blob/master/etc/telegraf.conf) + +## Generating a Configuration File + +A default Telegraf configuration file can be auto-generated by Telegraf: + +``` +telegraf config > telegraf.conf +``` + +To generate a configuration file with specific inputs and outputs, you can use the +`--input-filter` and `--output-filter` flags: + +``` +telegraf --input-filter cpu:mem:net:swap --output-filter influxdb:kafka config +``` + +## Environment variables + +Environment variables can be used anywhere in the configuration file by prepending them with `$`. For strings, the variables must be within quotes (i.e., `"$STR_VAR"`) and for numbers and Booleans they should be unquoted (i.e., `$INT_VAR`, `$BOOL_VAR`) + +Environment variables can be set using the Linux `export` command +(i.e., `export password=mypassword`). Using enviroment variables for sensitive +information is considered a best practice. + +## Configuration file locations + +The location of the configuration file can be set via the `--config` command +line flag. + +When the `--config-directory` command line flag is used, files ending with +`.conf` in the specified directory will also be included in the Telegraf +configuration. + +On most systems, the default locations are `/etc/telegraf/telegraf.conf` for +the main configuration file and `/etc/telegraf/telegraf.d` for the directory of +configuration files. + +# Global tags + +Global tags can be specified in the `[global_tags]` section of the config file +in `key="value"` format. All metrics being gathered on this host will be tagged +with the tags specified here. + +## Agent configuration + +Telegraf has a few options you can configure under the `[agent]` section of the +config. + +* **interval**: Default data collection interval for all inputs +* **round_interval**: Rounds collection interval to 'interval' +For example, if interval="10s" then always collect on :00, :10, :20, etc. +* **metric_batch_size**: Telegraf will send metrics to output in batch of at +most `metric_batch_size` metrics. +* **metric_buffer_limit**: Telegraf will cache `metric_buffer_limit` metrics +for each output, and will flush this buffer on a successful write. +This should be a multiple of `metric_batch_size` and could not be less +than 2 times `metric_batch_size`. +* **collection_jitter**: Collection jitter is used to jitter +the collection by a random amount. +Each plugin will sleep for a random time within jitter before collecting. +This can be used to avoid many plugins querying things like sysfs at the +same time, which can have a measurable effect on the system. +* **flush_interval**: Default data flushing interval for all outputs. +You should not set this below +interval. Maximum `flush_interval` will be `flush_interval` + `flush_jitter` +* **flush_jitter**: Jitter the flush interval by a random amount. +This is primarily to avoid +large write spikes for users running a large number of Telegraf instances. +For example, a `flush_jitter` of 5s and `flush_interval` of 10s means flushes will happen every 10-15s. +* **precision**: By default, precision will be set to the same timestamp order +as the collection interval, with the maximum being 1s. Precision will NOT +be used for service inputs, such as `logparser` and `statsd`. Valid values are +`ns`, `us` (or `µs`), `ms`, and `s`. +* **logfile**: Specify the log file name. The empty string means to log to `stderr`. +* **debug**: Run Telegraf in debug mode. +* **quiet**: Run Telegraf in quiet mode (error messages only). +* **hostname**: Override default hostname, if empty use `os.Hostname()`. +* **omit_hostname**: If true, do no set the `host` tag in the Telegraf agent. + +## Input configuration + +The following config parameters are available for all inputs: + +* **interval**: How often to gather this metric. Normal plugins use a single +global interval, but if one particular input should be run less or more often, +you can configure that here. +* **name_override**: Override the base name of the measurement. +(Default is the name of the input). +* **name_prefix**: Specifies a prefix to attach to the measurement name. +* **name_suffix**: Specifies a suffix to attach to the measurement name. +* **tags**: A map of tags to apply to a specific input's measurements. + +## Output configuration + +There are no generic configuration options available for all outputs. + +## Aggregator configuration + +The following config parameters are available for all aggregators: + +* **period**: The period on which to flush & clear each aggregator. All metrics +that are sent with timestamps outside of this period will be ignored by the +aggregator. +* **delay**: The delay before each aggregator is flushed. This is to control +how long for aggregators to wait before receiving metrics from input plugins, +in the case that aggregators are flushing and inputs are gathering on the +same interval. +* **drop_original**: If true, the original metric will be dropped by the +aggregator and will not get sent to the output plugins. +* **name_override**: Override the base name of the measurement. +(Default is the name of the input). +* **name_prefix**: Specifies a prefix to attach to the measurement name. +* **name_suffix**: Specifies a suffix to attach to the measurement name. +* **tags**: A map of tags to apply to a specific input's measurements. + +## Processor configuration + +The following config parameters are available for all processors: + +* **order**: This is the order in which processors are executed. If this +is not specified, then processor execution order will be random. + +#### Measurement filtering + +Filters can be configured per input, output, processor, or aggregator, +see below for examples. + +* **namepass**: +An array of glob pattern strings. Only points whose measurement name matches +a pattern in this list are emitted. +* **namedrop**: +The inverse of `namepass`. If a match is found the point is discarded. This +is tested on points after they have passed the `namepass` test. +* **fieldpass**: +An array of glob pattern strings. Only fields whose field key matches a +pattern in this list are emitted. Not available for outputs. +* **fielddrop**: +The inverse of `fieldpass`. Fields with a field key matching one of the +patterns will be discarded from the point. Not available for outputs. +* **tagpass**: +A table mapping tag keys to arrays of glob pattern strings. Only points +that contain a tag key in the table and a tag value matching one of its +patterns is emitted. +* **tagdrop**: +The inverse of `tagpass`. If a match is found the point is discarded. This +is tested on points after they have passed the `tagpass` test. +* **taginclude**: +An array of glob pattern strings. Only tags with a tag key matching one of +the patterns are emitted. In contrast to `tagpass`, which will pass an entire +point based on its tag, `taginclude` removes all non matching tags from the +point. This filter can be used on both inputs & outputs, but it is +_recommended_ to be used on inputs, as it is more efficient to filter out tags +at the ingestion point. +* **tagexclude**: +The inverse of `taginclude`. Tags with a tag key matching one of the patterns +will be discarded from the point. + +**NOTE** Due to the way TOML is parsed, `tagpass` and `tagdrop` parameters +must be defined at the _end_ of the plugin definition, otherwise subsequent +plugin config options will be interpreted as part of the tagpass/tagdrop +tables. + +#### Input configuration examples + +This is a full working config that will output CPU data to an InfluxDB instance +at `192.168.59.103:8086`, tagging measurements with `dc="denver-1"`. It will output +measurements at a 10s interval and will collect per-cpu data, dropping any +fields which begin with `time_`. + +```toml +[global_tags] + dc = "denver-1" + +[agent] + interval = "10s" + +# OUTPUTS +[[outputs.influxdb]] + url = "http://192.168.59.103:8086" # required. + database = "telegraf" # required. + precision = "s" + +# INPUTS +[[inputs.cpu]] + percpu = true + totalcpu = false + # filter all fields beginning with 'time_' + fielddrop = ["time_*"] +``` + +#### Input Config: `tagpass` and `tagdrop` + +**NOTE** `tagpass` and `tagdrop` parameters must be defined at the _end_ of +the plugin definition, otherwise subsequent plugin config options will be +interpreted as part of the tagpass/tagdrop map. + +```toml +[[inputs.cpu]] + percpu = true + totalcpu = false + fielddrop = ["cpu_time"] + # Don't collect CPU data for cpu6 & cpu7 + [inputs.cpu.tagdrop] + cpu = [ "cpu6", "cpu7" ] + +[[inputs.disk]] + [inputs.disk.tagpass] + # tagpass conditions are OR, not AND. + # If the (filesystem is ext4 or xfs) OR (the path is /opt or /home) + # then the metric passes + fstype = [ "ext4", "xfs" ] + # Globs can also be used on the tag values + path = [ "/opt", "/home*" ] +``` + +#### Input Config: `fieldpass` and `fielddrop` + +```toml +# Drop all metrics for guest & steal CPU usage +[[inputs.cpu]] + percpu = false + totalcpu = true + fielddrop = ["usage_guest", "usage_steal"] + +# Only store inode related metrics for disks +[[inputs.disk]] + fieldpass = ["inodes*"] +``` + +#### Input Config: `namepass` and `namedrop` + +```toml +# Drop all metrics about containers for kubelet +[[inputs.prometheus]] + urls = ["http://kube-node-1:4194/metrics"] + namedrop = ["container_*"] + +# Only store rest client related metrics for kubelet +[[inputs.prometheus]] + urls = ["http://kube-node-1:4194/metrics"] + namepass = ["rest_client_*"] +``` + +#### Input Config: `taginclude` and `tagexclude` + +```toml +# Only include the "cpu" tag in the measurements for the cpu plugin. +[[inputs.cpu]] + percpu = true + totalcpu = true + taginclude = ["cpu"] + +# Exclude the `fstype` tag from the measurements for the disk plugin. +[[inputs.disk]] + tagexclude = ["fstype"] +``` + +#### Input config: `prefix`, `suffix`, and `override` + +This plugin will emit measurements with the name `cpu_total`. + +```toml +[[inputs.cpu]] + name_suffix = "_total" + percpu = false + totalcpu = true +``` + +This will emit measurements with the name `foobar`. + +```toml +[[inputs.cpu]] + name_override = "foobar" + percpu = false + totalcpu = true +``` + +#### Input config: tags + +This plugin will emit measurements with two additional tags: `tag1=foo` and +`tag2=bar`. + +NOTE: Order matters, the `[inputs.cpu.tags]` table must be at the _end_ of the +plugin definition. + +```toml +[[inputs.cpu]] + percpu = false + totalcpu = true + [inputs.cpu.tags] + tag1 = "foo" + tag2 = "bar" +``` + +#### Multiple inputs of the same type + +Additional inputs (or outputs) of the same type can be specified by defining these instances in the configuration file. To avoid measurement collisions, use the `name_override`, `name_prefix`, or `name_suffix` config options: + +```toml +[[inputs.cpu]] + percpu = false + totalcpu = true + +[[inputs.cpu]] + percpu = true + totalcpu = false + name_override = "percpu_usage" + fielddrop = ["cpu_time*"] +``` + +#### Output configuration examples: + +```toml +[[outputs.influxdb]] + urls = [ "http://localhost:8086" ] + database = "telegraf" + precision = "s" + # Drop all measurements that start with "aerospike" + namedrop = ["aerospike*"] + +[[outputs.influxdb]] + urls = [ "http://localhost:8086" ] + database = "telegraf-aerospike-data" + precision = "s" + # Only accept aerospike data: + namepass = ["aerospike*"] + +[[outputs.influxdb]] + urls = [ "http://localhost:8086" ] + database = "telegraf-cpu0-data" + precision = "s" + # Only store measurements where the tag "cpu" matches the value "cpu0" + [outputs.influxdb.tagpass] + cpu = ["cpu0"] +``` + +#### Aggregator Configuration Examples: + +This will collect and emit the min/max of the system load1 metric every +30s, dropping the originals. + +```toml +[[inputs.system]] + fieldpass = ["load1"] # collects system load1 metric. + +[[aggregators.minmax]] + period = "30s" # send & clear the aggregate every 30s. + drop_original = true # drop the original metrics. + +[[outputs.file]] + files = ["stdout"] +``` + +This will collect and emit the min/max of the swap metrics every +30s, dropping the originals. The aggregator will not be applied +to the system load metrics due to the `namepass` parameter. + +```toml +[[inputs.swap]] + +[[inputs.system]] + fieldpass = ["load1"] # collects system load1 metric. + +[[aggregators.minmax]] + period = "30s" # send & clear the aggregate every 30s. + drop_original = true # drop the original metrics. + namepass = ["swap"] # only "pass" swap metrics through the aggregator. + +[[outputs.file]] + files = ["stdout"] +``` diff --git a/content/telegraf/v1.9/administration/enterprise-plugins.md b/content/telegraf/v1.9/administration/enterprise-plugins.md new file mode 100644 index 000000000..b37ad02be --- /dev/null +++ b/content/telegraf/v1.9/administration/enterprise-plugins.md @@ -0,0 +1,18 @@ +--- +title: Recommended Telegraf plugins for Enterprise users + +menu: + telegraf_1_9: + name: Recommended plugins for Enterprise users + weight: 20 + parent: Administration +draft: true +--- + +The Telegraf configuration file (`telegraf.conf`) lists all of the available plugins. The current version is available here: +[telegraf.conf](https://github.com/influxdata/telegraf/blob/master/etc/telegraf.conf) + +## Core Telegraf plugins for Enterprise users + + +## Optional Telegraf plugins for Enterprise users diff --git a/content/telegraf/v1.9/administration/troubleshooting.md b/content/telegraf/v1.9/administration/troubleshooting.md new file mode 100644 index 000000000..983698563 --- /dev/null +++ b/content/telegraf/v1.9/administration/troubleshooting.md @@ -0,0 +1,89 @@ +--- +title: Troubleshooting Telegraf + +menu: + telegraf_1_9: + name: Troubleshooting + weight: 30 + parent: Administration +--- + +This guide will show you how to capture Telegraf output, submit sample metrics, and see how Telegraf formats and emits points to its output plugins. + +## Capture output + +A quick way to view Telegraf output is by enabling a new UDP output plugin to run in parallel with the existing output plugins. Since each output plugin creates its own stream, the already existing outputs will not be affected. Traffic will be replicated to all active outputs. + +> **NOTE:** This approach requires Telegraf to be restarted, which will cause a brief interruption to your metrics collection. + +The minimal Telegraf configuration required to enable a UDP output is: + +``` +[[outputs.influxdb]] + urls = ["udp://localhost:8089"] +``` + +This setup utilizes the UDP format of the [InfluxDB output plugin](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/influxdb) and emits points formatted in InfluxDB's [line protocol](/influxdb/latest/concepts/glossary/#line-protocol). +You will need to append this section to the Telegraf configuration file and restart Telegraf for the change to take effect. + +Now you are ready to start listening on the destination port (`8089` in this example) using a simple tool like `netcat`: + +``` +nc -lup 8089 +``` + +`nc` will print the exact Telegraf output on stdout. +You can also direct the output to a file for further inspection: + +``` +nc -lup 8089 > telegraf_dump.txt +``` + +## Submit test inputs + +Once you have Telegraf's output arriving to your `nc` socket, you can enable the [inputs.socket_listener](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/socket_listener) plugins to submit some sample metrics. + +Append the TCP or UDP input section to Telegraf's config file and restart Telegraf for the change to take effect. + +``` + [[inputs.socket_listener]] + service_address = "tcp://:8094" + data_format = "influx" +``` + +Submit sample data to the Telegraf socket listener: + +``` +echo 'mymeasurement,my_tag_key=mytagvalue my_field="my field value"' | nc localhost 8094 +``` + +The output from your `netcat` listener will look like the following: + +``` +mymeasurement,host=kubuntu,my_tag_key=mytagvalue my_field="my field value" 1478106104713745634 +``` + +## Testing other plugins + +The same approach can be used to test other plugins, like the [inputs.statsd](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/statsd) plugin. + +Here is a basic configuration example of how to set up the Telegraf statsd input plugin: + +``` + [[inputs.statsd]] + service_address = ":8125" + metric_separator = "_" + allowed_pending_messages = 10000 +``` + +Sending a sample metric to the Telegraf statsd port: + +``` +echo "a.b.c:1|g" | nc -u localhost 8125 +``` + +The output from `nc` will look like the following: + +``` +a_b_c,host=myserver,metric_type=gauge value=1 1478106500000000000 +``` diff --git a/content/telegraf/v1.9/administration/windows_service.md b/content/telegraf/v1.9/administration/windows_service.md new file mode 100644 index 000000000..6f504d536 --- /dev/null +++ b/content/telegraf/v1.9/administration/windows_service.md @@ -0,0 +1,48 @@ +--- +title: Running Telegraf as a Windows service +description: How to configure Telegraf as a Windows service. +menu: + telegraf_1_9: + name: Running as Windows service + weight: 20 + parent: Administration +--- + +# Running Telegraf as a Windows service + +Telegraf natively supports running as a Windows service. Outlined below are +the general steps to set it up. + +1. Obtain the Telegraf distribution for Windows. +2. Create the directory `C:\Program Files\Telegraf` (if you install in a different location, specify the `-config` parameter with the desired location) +3. Place the `telegraf.exe` and the `telegraf.conf` files into `C:\Program Files\Telegraf`. +4. To install the service into the Windows Service Manager, run the following in PowerShell as an administrator. If necessary, you can wrap any spaces in the file directories in double quotes `""`: + + ``` + > C:\"Program Files"\Telegraf\telegraf.exe --service install + ``` + +5. Edit the configuration file to meet your requirements. + +6. To verify that it works, run: + + ``` + > C:\"Program Files"\Telegraf\telegraf.exe --config C:\"Program Files"\Telegraf\telegraf.conf --test + ``` + +7. To start collecting data, run: + + ``` + > net start telegraf + ``` + +## Other supported operations + +Telegraf can manage its own service through the `--service` flag: + +| Command | Effect | +|------------------------------------|-------------------------------| +| `telegraf.exe --service install` | Install telegraf as a service | +| `telegraf.exe --service uninstall` | Remove the telegraf service | +| `telegraf.exe --service start` | Start the telegraf service | +| `telegraf.exe --service stop` | Stop the telegraf service | diff --git a/content/telegraf/v1.9/concepts/_index.md b/content/telegraf/v1.9/concepts/_index.md new file mode 100644 index 000000000..47d7c0fa1 --- /dev/null +++ b/content/telegraf/v1.9/concepts/_index.md @@ -0,0 +1,21 @@ +--- +title: Key Telegraf concepts +description: This section discusses key concepts about Telegraf, including information on supported input data formats, output data formats, aggregator and processor plugins, and includes a glossary of important terms. +menu: + telegraf_1_9: + name: Concepts + weight: 30 +--- +This section discusses key concepts about Telegraf, the plug-in driven server agent component of the InfluxData time series platform. Topics covered include metrics, aggregator and processor plugins, and a glossary of important terms. + +## [Telegraf metrics](/telegraf/v1.9/concepts/metrics/) + +[Telegraf metrics](/telegraf/v1.9/concepts/metrics/) are internal representations used to model data during processing. + +## [Telegraf aggregator and processor plugins](/telegraf/v1.9/concepts/aggregator_processor_plugins/) + +[Telegraf aggregator and processor plugins](/telegraf/v1.9/concepts/aggregator_processor_plugins/) work between the input plugins and output plugins to aggregate and process metrics in Telegraf. + +## [Glossary of terms (for Telegraf)](/telegraf/v1.9/concepts/glossary/) + +This section includes definitions of important terms for related to Telegraf. diff --git a/content/telegraf/v1.9/concepts/aggregator_processor_plugins.md b/content/telegraf/v1.9/concepts/aggregator_processor_plugins.md new file mode 100644 index 000000000..518a5a4f8 --- /dev/null +++ b/content/telegraf/v1.9/concepts/aggregator_processor_plugins.md @@ -0,0 +1,62 @@ +--- +title: Telegraf aggregator and processor plugins +description: Use Telegraf aggregator and processor plugins to aggregate and process data between the input plugins and output plugins. +menu: + telegraf_1_9: + name: Aggregator and processor plugins + weight: 20 + parent: Concepts +--- + +Besides the input plugins and output plugins, Telegraf includes aggregator and processor plugins, which are used to aggregate and process metrics as they pass through Telegraf. + +``` +┌───────────┐ +│ │ +│ CPU │───┐ +│ │ │ +└───────────┘ │ + │ +┌───────────┐ │ ┌───────────┐ +│ │ │ │ │ +│ Memory │───┤ ┌──▶│ InfluxDB │ +│ │ │ │ │ │ +└───────────┘ │ ┌─────────────┐ ┌─────────────┐ │ └───────────┘ + │ │ │ │Aggregate │ │ +┌───────────┐ │ │Process │ │ - mean │ │ ┌───────────┐ +│ │ │ │ - transform │ │ - quantiles │ │ │ │ +│ MySQL │───┼──▶│ - decorate │────▶│ - min/max │───┼──▶│ File │ +│ │ │ │ - filter │ │ - count │ │ │ │ +└───────────┘ │ │ │ │ │ │ └───────────┘ + │ └─────────────┘ └─────────────┘ │ +┌───────────┐ │ │ ┌───────────┐ +│ │ │ │ │ │ +│ SNMP │───┤ └──▶│ Kafka │ +│ │ │ │ │ +└───────────┘ │ └───────────┘ + │ +┌───────────┐ │ +│ │ │ +│ Docker │───┘ +│ │ +└───────────┘ +``` + +**Processor plugins** process metrics as they pass through and immediately emit +results based on the values they process. For example, this could be printing +all metrics or adding a tag to all metrics that pass through. + +**Aggregator plugins**, on the other hand, are a bit more complicated. Aggregators +are typically for emitting new _aggregate_ metrics, such as a running mean, +minimum, maximum, quantiles, or standard deviation. For this reason, all _aggregator_ +plugins are configured with a `period`. The `period` is the size of the window +of metrics that each _aggregate_ represents. In other words, the emitted +_aggregate_ metric will be the aggregated value of the past `period` seconds. +Since many users will only care about their aggregates and not every single metric +gathered, there is also a `drop_original` argument, which tells Telegraf to only +emit the aggregates and not the original metrics. + +**NOTE** Since aggregator plugins only aggregate metrics within their periods, +historical data is not supported. In other words, if your metric timestamp is more +than `now() - period` in the past, it will not be aggregated. If this is a feature +that you need, please comment on this [GitHub issue](https://github.com/influxdata/telegraf/issues/1992). diff --git a/content/telegraf/v1.9/concepts/glossary.md b/content/telegraf/v1.9/concepts/glossary.md new file mode 100644 index 000000000..0282d843b --- /dev/null +++ b/content/telegraf/v1.9/concepts/glossary.md @@ -0,0 +1,103 @@ +--- +title: Telegraf glossary of terms +description: This section includes definitions of important terms for related to Telegraf, the plug-in driven server agent component of the InfluxData time series platform. +menu: + telegraf_1_99: + name: Glossary of terms + weight: 30 + parent: Concepts +--- + +## agent + +An agent is the core part of Telegraf that gathers metrics from the declared input plugins and sends metrics to the declared output plugins, based on the plugins enabled by the given configuration. + +Related entries: [input plugin](/telegraf/v1.9/concepts/glossary/#input-plugin), [output plugin](/telegraf/v1.9/concepts/glossary/#output-plugin) + +## aggregator plugin + +Aggregator plugins receive raw metrics from input plugins and create aggregate metrics from them. +The aggregate metrics are then passed to the configured output plugins. + +Related entries: [input plugin](/telegraf/v1.9/concepts/glossary/#input-plugin), [output plugin](/telegraf/v1.9/concepts/glossary/#output-plugin), [processor plugin](/telegraf/v1.9/concepts/glossary/#processor-plugin) + +## batch size + +The Telegraf agent sends metrics to output plugins in batches, not individually. +The batch size controls the size of each write batch that Telegraf sends to the output plugins. + +Related entries: [output plugin](/telegraf/v1.9/concepts/glossary/#output-plugin) + +## collection interval + +The default global interval for collecting data from each input plugin. +The collection interval can be overridden by each individual input plugin's configuration. + +Related entries: [input plugin](/telegraf/v1.9/concepts/glossary/#input-plugin) + +## collection jitter + +Collection jitter is used to prevent every input plugin from collecting metrics simultaneously, which can have a measurable effect on the system. +Each collection interval, every input plugin will sleep for a random time between zero and the collection jitter before collecting the metrics. + +Related entries: [collection interval](/telegraf/v1.9/concepts/glossary/#collection-interval), [input plugin](/telegraf/v1.9/concepts/glossary/#input-plugin) + +## flush interval + +The global interval for flushing data from each output plugin to its destination. +This value should not be set lower than the collection interval. + +Related entries: [collection interval](/telegraf/v1.9/concepts/glossary/#collection-interval), [flush jitter](/telegraf/v1.9/concepts/glossary/#flush-jitter), [output plugin](/telegraf/v1.9/concepts/glossary/#output-plugin) + +## flush jitter + +Flush jitter is used to prevent every output plugin from sending writes simultaneously, which can overwhelm some data sinks. +Each flush interval, every output plugin will sleep for a random time between zero and the flush jitter before emitting metrics. +This helps smooth out write spikes when running a large number of Telegraf instances. + +Related entries: [flush interval](/telegraf/v1.9/concepts/glossary/#flush-interval), [output plugin](/telegraf/v1.9/concepts/glossary/#output-plugin) + +## input plugin + +Input plugins actively gather metrics and deliver them to the core agent, where aggregator, processor, and output plugins can operate on the metrics. +In order to activate an input plugin, it needs to be enabled and configured in Telegraf's configuration file. + +Related entries: [aggregator plugin](/telegraf/v1.9/concepts/glossary/#aggregator-plugin), [collection interval](/telegraf/v1.9/concepts/glossary/#collection-interval), [output plugin](/telegraf/v1.9/concepts/glossary/#output-plugin), [processor plugin](/telegraf/v1.9/concepts/glossary/#processor-plugin) + +## metric buffer + +The metric buffer caches individual metrics when writes are failing for an output plugin. +Telegraf will attempt to flush the buffer upon a successful write to the output. +The oldest metrics are dropped first when this buffer fills. + +Related entries: [output plugin](/telegraf/v1.9/concepts/glossary/#output-plugin) + +## output plugin + +Output plugins deliver metrics to their configured destination. In order to activate an output plugin, it needs to be enabled and configured in Telegraf's configuration file. + +Related entries: [aggregator plugin](/telegraf/v1.9/concepts/glossary/#aggregator-plugin), [flush interval](/telegraf/v1.9/concepts/glossary/#flush-interval), [input plugin](/telegraf/v1.9/concepts/glossary/#input-plugin), [processor plugin](/telegraf/v1.9/concepts/glossary/#processor-plugin) + +## precision + +The precision configuration setting determines how much timestamp precision is retained in the points received from input plugins. All incoming timestamps are truncated to the given precision. +Telegraf then pads the truncated timestamps with zeros to create a nanosecond timestamp; output plugins will emit timestamps in nanoseconds. +Valid precisions are `ns`, `us` or `µs`, `ms`, and `s`. + +For example, if the precision is set to `ms`, the nanosecond epoch timestamp `1480000000123456789` would be truncated to `1480000000123` in millisecond precision and then padded with zeroes to make a new, less precise nanosecond timestamp of `1480000000123000000`. +Output plugins do not alter the timestamp further. The precision setting is ignored for service input plugins. + +Related entries: [aggregator plugin](/telegraf/v1.9/concepts/glossary/#aggregator-plugin), [input plugin](/telegraf/v1.9/concepts/glossary/#input-plugin), [output plugin](/telegraf/v1.9/concepts/glossary/#output-plugin), [processor plugin](/telegraf/v1.9/concepts/glossary/#processor-plugin), [service input plugin](/telegraf/v1.9/concepts/glossary/#service-input-plugin) + +## processor plugin + +Processor plugins transform, decorate, and/or filter metrics collected by input plugins, passing the transformed metrics to the output plugins. + +Related entries: [aggregator plugin](/telegraf/v1.9/concepts/glossary/#aggregator-plugin), [input plugin](/telegraf/v1.9/concepts/glossary/#input-plugin), [output plugin](/telegraf/v1.9/concepts/glossary/#output-plugin) + +## service input plugin + +Service input plugins are input plugins that run in a passive collection mode while the Telegraf agent is running. +They listen on a socket for known protocol inputs, or apply their own logic to ingested metrics before delivering them to the Telegraf agent. + +Related entries: [aggregator plugin](/telegraf/v1.9/concepts/glossary/#aggregator-plugin), [input plugin](/telegraf/v1.9/concepts/glossary/#input-plugin), [output plugin](/telegraf/v1.9/concepts/glossary/#output-plugin), [processor plugin](/telegraf/v1.9/concepts/glossary/#processor-plugin) diff --git a/content/telegraf/v1.9/concepts/metrics.md b/content/telegraf/v1.9/concepts/metrics.md new file mode 100644 index 000000000..9b23b3de8 --- /dev/null +++ b/content/telegraf/v1.9/concepts/metrics.md @@ -0,0 +1,28 @@ +--- +title: Telegraf metrics +description: Telegraf metrics are internal representations used to model data during processing and are based on InfluxDB's data model. Each metric component includes the measurement name, tags, fields, and timestamp. +menu: + telegraf_1_9: + name: Metrics + weight: 10 + parent: Concepts +--- + +Telegraf metrics are the internal representation used to model data during +processing. These metrics are closely based on InfluxDB's data model and contain +four main components: + +- **Measurement name**: Description and namespace for the metric. +- **Tags**: Key/Value string pairs and usually used to identify the + metric. +- **Fields**: Key/Value pairs that are typed and usually contain the + metric data. +- **Timestamp**: Date and time associated with the fields. + +This metric type exists only in memory and must be converted to a concrete +representation in order to be transmitted or viewed. Telegraf provides [output data formats][output data formats] (also known as *serializers*) for these conversions. Telegraf's default serializer converts to [InfluxDB Line +Protocol][line protocol], which provides a high performance and one-to-one +direct mapping from Telegraf metrics. + +[output data formats]: /telegraf/v1.9/data_formats/output/ +[line protocol]: /telegraf/v1.9/data_formats/output/influx/ diff --git a/content/telegraf/v1.9/data_formats/_index.md b/content/telegraf/v1.9/data_formats/_index.md new file mode 100644 index 000000000..1a74d6f46 --- /dev/null +++ b/content/telegraf/v1.9/data_formats/_index.md @@ -0,0 +1,21 @@ +--- +title: Telegraf data formats +description: Telegraf supports input data formats and output data formats for converting input and output data. +menu: + telegraf_1_9: + name: Data formats + weight: 50 +--- +This section covers the input data formats and output data formats used in the Telegraf plugin-driven server agent component of the InfluxData time series platform. + +## [Telegraf input data formats](/telegraf/v1.9/data_formats/input/) + +[Telegraf input data formats](/telegraf/v1.9/data_formats/input/) supports parsing input data formats into metrics for InfluxDB Line Protocol, JSON, Graphite, Value, Nagios, Collectd, and Dropwizard. + +## [Telegraf output data formats](/telegraf/v1.9/data_formats/output/) + +[Telegraf output data formats](/telegraf/v1.9/data_formats/output/) can serialize metrics into output data formats for InfluxDB Line Protocol, JSON, and Graphite. + +## [Telegraf template patterns](/telegraf/v1.9/data_formats/template-patterns/) + +[Telegraf template patterns](/telegraf/v1.9/data_formats/template-patterns/) are used to define templates for use with parsing and serializing data formats in Telegraf. diff --git a/content/telegraf/v1.9/data_formats/input/_index.md b/content/telegraf/v1.9/data_formats/input/_index.md new file mode 100644 index 000000000..15317466d --- /dev/null +++ b/content/telegraf/v1.9/data_formats/input/_index.md @@ -0,0 +1,46 @@ +--- +title: Telegraf input data formats +description: Telegraf supports parsing input data formats into Telegraf metrics for InfluxDB Line Protocol, CollectD, CSV, Dropwizard, Graphite, Grok, JSON, Logfmt, Nagios, Value, and Wavefront. +menu: + telegraf_1_9: + name: Input data formats + weight: 1 + parent: Data formats +--- + +Telegraf contains many general purpose plugins that support parsing input data +using a configurable parser into [metrics][]. This allows, for example, the +`kafka_consumer` input plugin to process messages in either InfluxDB Line +Protocol or in JSON format. Telegraf supports the following input data formats: + +- [InfluxDB Line Protocol](/telegraf/v1.9/data_formats/input/influx/) +- [collectd](/telegraf/v1.9/data_formats/input/collectd/) +- [CSV](/telegraf/v1.9/data_formats/input/csv/) +- [Dropwizard](/telegraf/v1.9/data_formats/input/dropwizard/) +- [Graphite](/telegraf/v1.9/data_formats/input/graphite/) +- [Grok](/telegraf/v1.9/data_formats/input/grok/) +- [JSON](/telegraf/v1.9/data_formats/input/json/) +- [logfmt](/telegraf/v1.9/data_formats/input/logfmt/) +- [Nagios](/telegraf/v1.9/data_formats/input/nagios/) +- [Value](/telegraf/v1.9/data_formats/input/value/), ie: 45 or "booyah" +- [Wavefront](/telegraf/v1.9/data_formats/input/wavefront/) + +Any input plugin containing the `data_format` option can use it to select the +desired parser: + +```toml +[[inputs.exec]] + ## Commands array + commands = ["/tmp/test.sh", "/usr/bin/mycollector --foo=bar"] + + ## measurement name suffix (for separating different commands) + name_suffix = "_mycollector" + + ## Data format to consume. + ## Each data format has its own unique set of configuration options, read + ## more about them here: + ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md + data_format = "json" +``` + +[metrics]: /telegraf/v1.9/concepts/metrics/ diff --git a/content/telegraf/v1.9/data_formats/input/collectd.md b/content/telegraf/v1.9/data_formats/input/collectd.md new file mode 100644 index 000000000..38c1a4488 --- /dev/null +++ b/content/telegraf/v1.9/data_formats/input/collectd.md @@ -0,0 +1,48 @@ +--- +title: Collectd input data format +description: Use the collectd input data format to parse the collectd network binary protocol to create tags for host, instance, type, and type instance. +menu: + telegraf_1_9: + name: collectd + weight: 10 + parent: Input data formats +--- + +The collectd input data format parses the collectd network binary protocol to create tags for host, instance, type, and type instance. All collectd values are added as float64 fields. + +For more information, see [binary protocol](https://collectd.org/wiki/index.php/Binary_protocol) in the collectd Wiki. + +You can control the cryptographic settings with parser options. +Create an authentication file and set `collectd_auth_file` to the path of the file, then set the desired security level in `collectd_security_level`. + +For more information, including client setup, see +[Cryptographic setup](https://collectd.org/wiki/index.php/Networking_introduction#Cryptographic_setup) in the collectd Wiki. + +You can also change the path to the typesdb or add additional typesdb using +`collectd_typesdb`. + +## Configuration + +```toml +[[inputs.file]] + files = ["example"] + + ## Data format to consume. + ## Each data format has its own unique set of configuration options, read + ## more about them here: + ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md + data_format = "collectd" + + ## Authentication file for cryptographic security levels + collectd_auth_file = "/etc/collectd/auth_file" + ## One of none (default), sign, or encrypt + collectd_security_level = "encrypt" + ## Path of to TypesDB specifications + collectd_typesdb = ["/usr/share/collectd/types.db"] + + ## Multi-value plugins can be handled two ways. + ## "split" will parse and store the multi-value plugin data into separate measurements + ## "join" will parse and store the multi-value plugin as a single multi-value measurement. + ## "split" is the default behavior for backward compatability with previous versions of influxdb. + collectd_parse_multivalue = "split" +``` diff --git a/content/telegraf/v1.9/data_formats/input/csv.md b/content/telegraf/v1.9/data_formats/input/csv.md new file mode 100644 index 000000000..0a8927aaf --- /dev/null +++ b/content/telegraf/v1.9/data_formats/input/csv.md @@ -0,0 +1,111 @@ +--- +title: CSV input data format +description: Use the "csv" input data format to parse a document containing comma-separated values into Telegraf metrics. +menu: + telegraf_1_9: + name: CSV + weight: 20 + parent: Input data formats +--- + +The CSV input data format parses documents containing comma-separated values into Telegraf metrics. + +## Configuration + +```toml +[[inputs.file]] + files = ["example"] + + ## Data format to consume. + ## Each data format has its own unique set of configuration options, read + ## more about them here: + ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md + data_format = "csv" + + ## Indicates how many rows to treat as a header. By default, the parser assumes + ## there is no header and will parse the first row as data. If set to anything more + ## than 1, column names will be concatenated with the name listed in the next header row. + ## If `csv_column_names` is specified, the column names in header will be overridden. + csv_header_row_count = 0 + + ## For assigning custom names to columns + ## If this is specified, all columns should have a name + ## Unnamed columns will be ignored by the parser. + ## If `csv_header_row_count` is set to 0, this config must be used + csv_column_names = [] + + ## Indicates the number of rows to skip before looking for header information. + csv_skip_rows = 0 + + ## Indicates the number of columns to skip before looking for data to parse. + ## These columns will be skipped in the header as well. + csv_skip_columns = 0 + + ## The seperator between csv fields + ## By default, the parser assumes a comma (",") + csv_delimiter = "," + + ## The character reserved for marking a row as a comment row + ## Commented rows are skipped and not parsed + csv_comment = "" + + ## If set to true, the parser will remove leading whitespace from fields + ## By default, this is false + csv_trim_space = false + + ## Columns listed here will be added as tags. Any other columns + ## will be added as fields. + csv_tag_columns = [] + + ## The column to extract the name of the metric from + csv_measurement_column = "" + + ## The column to extract time information for the metric + ## `csv_timestamp_format` must be specified if this is used + csv_timestamp_column = "" + + ## The format of time data extracted from `csv_timestamp_column` + ## this must be specified if `csv_timestamp_column` is specified + csv_timestamp_format = "" + ``` +### csv_timestamp_column, csv_timestamp_format + +By default the current time will be used for all created metrics, to set the +time using the JSON document you can use the `csv_timestamp_column` and +`csv_timestamp_format` options together to set the time to a value in the parsed +document. + +The `csv_timestamp_column` option specifies the column name containing the +time value and `csv_timestamp_format` must be set to a Go "reference time" +which is defined to be the specific time: `Mon Jan 2 15:04:05 MST 2006`. + +Consult the Go [time][time parse] package for details and additional examples +on how to set the time format. + +## Metrics + +One metric is created for each row with the columns added as fields. The type +of the field is automatically determined based on the contents of the value. + +## Examples + +Config: +``` +[[inputs.file]] + files = ["example"] + data_format = "csv" + csv_header_row_count = 1 + csv_timestamp_column = "time" + csv_timestamp_format = "2006-01-02T15:04:05Z07:00" +``` + +Input: +``` +measurement,cpu,time_user,time_system,time_idle,time +cpu,cpu0,42,42,42,2018-09-13T13:03:28Z +``` + +Output: +``` +cpu cpu=cpu0,time_user=42,time_system=42,time_idle=42 1536869008000000000 +``` diff --git a/content/telegraf/v1.9/data_formats/input/dropwizard.md b/content/telegraf/v1.9/data_formats/input/dropwizard.md new file mode 100644 index 000000000..d286d7434 --- /dev/null +++ b/content/telegraf/v1.9/data_formats/input/dropwizard.md @@ -0,0 +1,179 @@ +--- +title: Dropwizard input data format +description: Use the "dropwizard" input data format to parse Dropwizard JSON representations into Telegraf metrics. +menu: + telegraf_1_9: + name: Dropwizard + weight: 30 + parent: Input data formats +--- + +The `dropwizard` data format can parse a [Dropwizard JSON representation][dropwizard] representation of a single metrics registry. By default, tags are parsed from metric names as if they were actual InfluxDB Line Protocol keys (`measurement<,tag_set>`) which can be overridden using custom [template patterns][templates]. All field value types are supported, including `string`, `number` and `boolean`. + +[templates]: /telegraf/v1.9/data_formats/template-patterns/ +[dropwizard]: http://metrics.dropwizard.io/3.1.0/manual/json/ + +## Configuration + +```toml +[[inputs.file]] + files = ["example"] + + ## Data format to consume. + ## Each data format has its own unique set of configuration options, read + ## more about them here: + ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md + data_format = "dropwizard" + + ## Used by the templating engine to join matched values when cardinality is > 1 + separator = "_" + + ## Each template line requires a template pattern. It can have an optional + ## filter before the template and separated by spaces. It can also have optional extra + ## tags following the template. Multiple tags should be separated by commas and no spaces + ## similar to the line protocol format. There can be only one default template. + ## Templates support below format: + ## 1. filter + template + ## 2. filter + template + extra tag(s) + ## 3. filter + template with field key + ## 4. default template + ## By providing an empty template array, templating is disabled and measurements are parsed as influxdb line protocol keys (measurement<,tag_set>) + templates = [] + + ## You may use an appropriate [gjson path](https://github.com/tidwall/gjson#path-syntax) + ## to locate the metric registry within the JSON document + # dropwizard_metric_registry_path = "metrics" + + ## You may use an appropriate [gjson path](https://github.com/tidwall/gjson#path-syntax) + ## to locate the default time of the measurements within the JSON document + # dropwizard_time_path = "time" + # dropwizard_time_format = "2006-01-02T15:04:05Z07:00" + + ## You may use an appropriate [gjson path](https://github.com/tidwall/gjson#path-syntax) + ## to locate the tags map within the JSON document + # dropwizard_tags_path = "tags" + + ## You may even use tag paths per tag + # [inputs.exec.dropwizard_tag_paths] + # tag1 = "tags.tag1" + # tag2 = "tags.tag2" +``` + + +## Examples + +A typical JSON of a dropwizard metric registry: + +```json +{ + "version": "3.0.0", + "counters" : { + "measurement,tag1=green" : { + "count" : 1 + } + }, + "meters" : { + "measurement" : { + "count" : 1, + "m15_rate" : 1.0, + "m1_rate" : 1.0, + "m5_rate" : 1.0, + "mean_rate" : 1.0, + "units" : "events/second" + } + }, + "gauges" : { + "measurement" : { + "value" : 1 + } + }, + "histograms" : { + "measurement" : { + "count" : 1, + "max" : 1.0, + "mean" : 1.0, + "min" : 1.0, + "p50" : 1.0, + "p75" : 1.0, + "p95" : 1.0, + "p98" : 1.0, + "p99" : 1.0, + "p999" : 1.0, + "stddev" : 1.0 + } + }, + "timers" : { + "measurement" : { + "count" : 1, + "max" : 1.0, + "mean" : 1.0, + "min" : 1.0, + "p50" : 1.0, + "p75" : 1.0, + "p95" : 1.0, + "p98" : 1.0, + "p99" : 1.0, + "p999" : 1.0, + "stddev" : 1.0, + "m15_rate" : 1.0, + "m1_rate" : 1.0, + "m5_rate" : 1.0, + "mean_rate" : 1.0, + "duration_units" : "seconds", + "rate_units" : "calls/second" + } + } +} +``` + +Would get translated into 4 different measurements: + +``` +measurement,metric_type=counter,tag1=green count=1 +measurement,metric_type=meter count=1,m15_rate=1.0,m1_rate=1.0,m5_rate=1.0,mean_rate=1.0 +measurement,metric_type=gauge value=1 +measurement,metric_type=histogram count=1,max=1.0,mean=1.0,min=1.0,p50=1.0,p75=1.0,p95=1.0,p98=1.0,p99=1.0,p999=1.0 +measurement,metric_type=timer count=1,max=1.0,mean=1.0,min=1.0,p50=1.0,p75=1.0,p95=1.0,p98=1.0,p99=1.0,p999=1.0,stddev=1.0,m15_rate=1.0,m1_rate=1.0,m5_rate=1.0,mean_rate=1.0 +``` + +You may also parse a dropwizard registry from any JSON document which contains a dropwizard registry in some inner field. +Eg. to parse the following JSON document: + +```json +{ + "time" : "2017-02-22T14:33:03.662+02:00", + "tags" : { + "tag1" : "green", + "tag2" : "yellow" + }, + "metrics" : { + "counters" : { + "measurement" : { + "count" : 1 + } + }, + "meters" : {}, + "gauges" : {}, + "histograms" : {}, + "timers" : {} + } +} +``` +and translate it into: + +``` +measurement,metric_type=counter,tag1=green,tag2=yellow count=1 1487766783662000000 +``` + +you simply need to use the following additional configuration properties: + +```toml +dropwizard_metric_registry_path = "metrics" +dropwizard_time_path = "time" +dropwizard_time_format = "2006-01-02T15:04:05Z07:00" +dropwizard_tags_path = "tags" +## tag paths per tag are supported too, eg. +#[inputs.yourinput.dropwizard_tag_paths] +# tag1 = "tags.tag1" +# tag2 = "tags.tag2" +``` diff --git a/content/telegraf/v1.9/data_formats/input/graphite.md b/content/telegraf/v1.9/data_formats/input/graphite.md new file mode 100644 index 000000000..e3f486963 --- /dev/null +++ b/content/telegraf/v1.9/data_formats/input/graphite.md @@ -0,0 +1,55 @@ +--- +title: Graphite input data format +description: Us the Graphite data format to translate Graphite dot buckets directly into Telegraf measurement names, with a single value field, and without any tags. +menu: + telegraf_1_9: + name: Graphite + weight: 40 + parent: Input data formats +--- + +The Graphite data format translates Graphite *dot* buckets directly into +Telegraf measurement names, with a single value field, and without any tags. +By default, the separator is left as `.`, but this can be changed using the +`separator` argument. For more advanced options, Telegraf supports specifying +[templates](#templates) to translate graphite buckets into Telegraf metrics. + +## Configuration + +```toml +[[inputs.exec]] + ## Commands array + commands = ["/tmp/test.sh", "/usr/bin/mycollector --foo=bar"] + + ## measurement name suffix (for separating different commands) + name_suffix = "_mycollector" + + ## Data format to consume. + ## Each data format has its own unique set of configuration options, read + ## more about them here: + ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md + data_format = "graphite" + + ## This string will be used to join the matched values. + separator = "_" + + ## Each template line requires a template pattern. It can have an optional + ## filter before the template and separated by spaces. It can also have optional extra + ## tags following the template. Multiple tags should be separated by commas and no spaces + ## similar to the line protocol format. There can be only one default template. + ## Templates support below format: + ## 1. filter + template + ## 2. filter + template + extra tag(s) + ## 3. filter + template with field key + ## 4. default template + templates = [ + "*.app env.service.resource.measurement", + "stats.* .host.measurement* region=eu-east,agent=sensu", + "stats2.* .host.measurement.field", + "measurement*" + ] +``` + +### templates + +For information on creating templates, see [Template patterns](/telegraf/v1.9/data_formats/template-patterns/). diff --git a/content/telegraf/v1.9/data_formats/input/grok.md b/content/telegraf/v1.9/data_formats/input/grok.md new file mode 100644 index 000000000..6a438bbd2 --- /dev/null +++ b/content/telegraf/v1.9/data_formats/input/grok.md @@ -0,0 +1,226 @@ +--- +title: Grok input data format +description: Use the grok data format to parse line-delimited data using a regular expression-like language. +menu: + telegraf_1_9: + name: Grok + weight: 40 + parent: Input data formats +--- + +The grok data format parses line delimited data using a regular expression-like +language. + +If you need to become familiar with grok patterns, see [Grok Basics](https://www.elastic.co/guide/en/logstash/current/plugins-filters-grok.html#_grok_basics) +in the Logstash documentation. The grok parser uses a slightly modified version of logstash "grok" +patterns, using the format: + +``` +%{[:][:]} +``` + +The `capture_syntax` defines the grok pattern that is used to parse the input +line and the `semantic_name` is used to name the field or tag. The extension +`modifier` controls the data type that the parsed item is converted to or +other special handling. + +By default, all named captures are converted into string fields. +Timestamp modifiers can be used to convert captures to the timestamp of the +parsed metric. If no timestamp is parsed the metric will be created using the +current time. + +You must capture at least one field per line. + +- Available modifiers: + - string (default if nothing is specified) + - int + - float + - duration (ie, 5.23ms gets converted to int nanoseconds) + - tag (converts the field into a tag) + - drop (drops the field completely) + - measurement (use the matched text as the measurement name) +- Timestamp modifiers: + - ts (This will auto-learn the timestamp format) + - ts-ansic ("Mon Jan _2 15:04:05 2006") + - ts-unix ("Mon Jan _2 15:04:05 MST 2006") + - ts-ruby ("Mon Jan 02 15:04:05 -0700 2006") + - ts-rfc822 ("02 Jan 06 15:04 MST") + - ts-rfc822z ("02 Jan 06 15:04 -0700") + - ts-rfc850 ("Monday, 02-Jan-06 15:04:05 MST") + - ts-rfc1123 ("Mon, 02 Jan 2006 15:04:05 MST") + - ts-rfc1123z ("Mon, 02 Jan 2006 15:04:05 -0700") + - ts-rfc3339 ("2006-01-02T15:04:05Z07:00") + - ts-rfc3339nano ("2006-01-02T15:04:05.999999999Z07:00") + - ts-httpd ("02/Jan/2006:15:04:05 -0700") + - ts-epoch (seconds since unix epoch, may contain decimal) + - ts-epochnano (nanoseconds since unix epoch) + - ts-syslog ("Jan 02 15:04:05", parsed time is set to the current year) + - ts-"CUSTOM" + +CUSTOM time layouts must be within quotes and be the representation of the +"reference time", which is `Mon Jan 2 15:04:05 -0700 MST 2006`. +To match a comma decimal point you can use a period. For example `%{TIMESTAMP:timestamp:ts-"2006-01-02 15:04:05.000"}` can be used to match `"2018-01-02 15:04:05,000"` +To match a comma decimal point you can use a period in the pattern string. +See https://golang.org/pkg/time/#Parse for more details. + +Telegraf has many of its own [built-in patterns](https://github.com/influxdata/telegraf/blob/master/plugins/parsers/grok/influx_patterns.go), +as well as support for most of +[logstash's builtin patterns](https://github.com/logstash-plugins/logstash-patterns-core/blob/master/patterns/grok-patterns). +_Golang regular expressions do not support lookahead or lookbehind. +logstash patterns that depend on these are not supported._ + +If you need help building patterns to match your logs, the +[Grok Debugger application](https://grokdebug.herokuapp.com) might be helpful. + +## Configuration + +```toml +[[inputs.file]] + ## Files to parse each interval. + ## These accept standard unix glob matching rules, but with the addition of + ## ** as a "super asterisk". ie: + ## /var/log/**.log -> recursively find all .log files in /var/log + ## /var/log/*/*.log -> find all .log files with a parent dir in /var/log + ## /var/log/apache.log -> only tail the apache log file + files = ["/var/log/apache/access.log"] + + ## The dataformat to be read from files + ## Each data format has its own unique set of configuration options, read + ## more about them here: + ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md + data_format = "grok" + + ## This is a list of patterns to check the given log file(s) for. + ## Note that adding patterns here increases processing time. The most + ## efficient configuration is to have one pattern. + ## Other common built-in patterns are: + ## %{COMMON_LOG_FORMAT} (plain apache & nginx access logs) + ## %{COMBINED_LOG_FORMAT} (access logs + referrer & agent) + grok_patterns = ["%{COMBINED_LOG_FORMAT}"] + + ## Full path(s) to custom pattern files. + grok_custom_pattern_files = [] + + ## Custom patterns can also be defined here. Put one pattern per line. + grok_custom_patterns = ''' + ''' + + ## Timezone allows you to provide an override for timestamps that + ## don't already include an offset + ## e.g. 04/06/2016 12:41:45 data one two 5.43µs + ## + ## Default: "" which renders UTC + ## Options are as follows: + ## 1. Local -- interpret based on machine localtime + ## 2. "Canada/Eastern" -- Unix TZ values like those found in https://en.wikipedia.org/wiki/List_of_tz_database_time_zones + ## 3. UTC -- or blank/unspecified, will return timestamp in UTC + grok_timezone = "Canada/Eastern" +``` + +### Timestamp examples + +This example input and config parses a file using a custom timestamp conversion: + +``` +2017-02-21 13:10:34 value=42 +``` + +```toml +[[inputs.file]] + grok_patterns = ['%{TIMESTAMP_ISO8601:timestamp:ts-"2006-01-02 15:04:05"} value=%{NUMBER:value:int}'] +``` + +This example input and config parses a file using a timestamp in unix time: + +``` +1466004605 value=42 +1466004605.123456789 value=42 +``` + +```toml +[[inputs.file]] + grok_patterns = ['%{NUMBER:timestamp:ts-epoch} value=%{NUMBER:value:int}'] +``` + +This example parses a file using a built-in conversion and a custom pattern: + +``` +Wed Apr 12 13:10:34 PST 2017 value=42 +``` + +```toml +[[inputs.file]] + grok_patterns = ["%{TS_UNIX:timestamp:ts-unix} value=%{NUMBER:value:int}"] + grok_custom_patterns = ''' + TS_UNIX %{DAY} %{MONTH} %{MONTHDAY} %{HOUR}:%{MINUTE}:%{SECOND} %{TZ} %{YEAR} + ''' +``` + +For cases where the timestamp itself is without offset, the `timezone` config var is available +to denote an offset. By default (with `timezone` either omit, blank or set to `"UTC"`), the times +are processed as if in the UTC timezone. If specified as `timezone = "Local"`, the timestamp +will be processed based on the current machine timezone configuration. Lastly, if using a +timezone from the list of Unix [timezones](https://en.wikipedia.org/wiki/List_of_tz_database_time_zones), +grok will offset the timestamp accordingly. + +### TOML escaping + +When saving patterns to the configuration file, keep in mind the different TOML +[string](https://github.com/toml-lang/toml#string) types and the escaping +rules for each. These escaping rules must be applied in addition to the +escaping required by the grok syntax. Using the Multi-line line literal +syntax with `'''` may be useful. + +The following config examples will parse this input file: + +``` +|42|\uD83D\uDC2F|'telegraf'| +``` + +Since `|` is a special character in the grok language, we must escape it to +get a literal `|`. With a basic TOML string, special characters such as +backslash must be escaped, requiring us to escape the backslash a second time. + +```toml +[[inputs.file]] + grok_patterns = ["\\|%{NUMBER:value:int}\\|%{UNICODE_ESCAPE:escape}\\|'%{WORD:name}'\\|"] + grok_custom_patterns = "UNICODE_ESCAPE (?:\\\\u[0-9A-F]{4})+" +``` + +We cannot use a literal TOML string for the pattern, because we cannot match a +`'` within it. However, it works well for the custom pattern. +```toml +[[inputs.file]] + grok_patterns = ["\\|%{NUMBER:value:int}\\|%{UNICODE_ESCAPE:escape}\\|'%{WORD:name}'\\|"] + grok_custom_patterns = 'UNICODE_ESCAPE (?:\\u[0-9A-F]{4})+' +``` + +A multi-line literal string allows us to encode the pattern: +```toml +[[inputs.file]] + grok_patterns = [''' + \|%{NUMBER:value:int}\|%{UNICODE_ESCAPE:escape}\|'%{WORD:name}'\| + '''] + grok_custom_patterns = 'UNICODE_ESCAPE (?:\\u[0-9A-F]{4})+' +``` + +### Tips for creating patterns + +Writing complex patterns can be difficult, here is some advice for writing a +new pattern or testing a pattern developed [online](https://grokdebug.herokuapp.com). + +Create a file output that writes to stdout, and disable other outputs while +testing. This will allow you to see the captured metrics. Keep in mind that +the file output will only print once per `flush_interval`. + +```toml +[[outputs.file]] + files = ["stdout"] +``` + +- Start with a file containing only a single line of your input. +- Remove all but the first token or piece of the line. +- Add the section of your pattern to match this piece to your configuration file. +- Verify that the metric is parsed successfully by running Telegraf. +- If successful, add the next token, update the pattern and retest. +- Continue one token at a time until the entire line is successfully parsed. diff --git a/content/telegraf/v1.9/data_formats/input/influx.md b/content/telegraf/v1.9/data_formats/input/influx.md new file mode 100644 index 000000000..90760c439 --- /dev/null +++ b/content/telegraf/v1.9/data_formats/input/influx.md @@ -0,0 +1,27 @@ +--- +title: InfluxDB Line Protocol input data format +description: Use the InfluxDB Line Protocol input data format to parse InfluxDB metrics directly into Telegraf metrics. +menu: + telegraf_1_9: + name: InfluxDB Line Protocol input + weight: 60 + parent: Input data formats +--- + +There are no additional configuration options for InfluxDB [line protocol][]. The +InfluxDB metrics are parsed directly into Telegraf metrics. + +[line protocol]: /influxdb/latest/write_protocols/line/ + +### Configuration + +```toml +[[inputs.file]] + files = ["example"] + + ## Data format to consume. + ## Each data format has its own unique set of configuration options, read + ## more about them here: + ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md + data_format = "influx" +``` diff --git a/content/telegraf/v1.9/data_formats/input/json.md b/content/telegraf/v1.9/data_formats/input/json.md new file mode 100644 index 000000000..d02065427 --- /dev/null +++ b/content/telegraf/v1.9/data_formats/input/json.md @@ -0,0 +1,224 @@ +--- +title: JSON input data format +description: Use the JSON input data format to parse [JSON][json] objects, or an array of objects, into Telegraf metric fields. +menu: + telegraf_1_9: + name: JSON input + weight: 70 + parent: Input data formats +--- + + +The JSON input data format parses a [JSON][json] object or an array of objects +into Telegraf metric fields. + +**NOTE:** All JSON numbers are converted to float fields. JSON String are +ignored unless specified in the `tag_key` or `json_string_fields` options. + +## Configuration + +```toml +[[inputs.file]] + files = ["example"] + + ## Data format to consume. + ## Each data format has its own unique set of configuration options, read + ## more about them here: + ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md + data_format = "json" + + ## Query is a GJSON path that specifies a specific chunk of JSON to be + ## parsed, if not specified the whole document will be parsed. + ## + ## GJSON query paths are described here: + ## https://github.com/tidwall/gjson#path-syntax + json_query = "" + + ## Tag keys is an array of keys that should be added as tags. + tag_keys = [ + "my_tag_1", + "my_tag_2" + ] + + ## String fields is an array of keys that should be added as string fields. + json_string_fields = [] + + ## Name key is the key to use as the measurement name. + json_name_key = "" + + ## Time key is the key containing the time that should be used to create the + ## metric. + json_time_key = "" + + ## Time format is the time layout that should be used to interprete the + ## json_time_key. The time must be `unix`, `unix_ms` or a time in the + ## "reference time". + ## ex: json_time_format = "Mon Jan 2 15:04:05 -0700 MST 2006" + ## json_time_format = "2006-01-02T15:04:05Z07:00" + ## json_time_format = "unix" + ## json_time_format = "unix_ms" + json_time_format = "" +``` + +### `json_query` + +The `json_query` is a [GJSON][gjson] path that can be used to limit the +portion of the overall JSON document that should be parsed. The result of the +query should contain a JSON object or an array of objects. + +Consult the GJSON [path syntax][gjson syntax] for details and examples. + +### json_time_key, json_time_format + +By default the current time will be used for all created metrics, to set the +time using the JSON document you can use the `json_time_key` and +`json_time_format` options together to set the time to a value in the parsed +document. + +The `json_time_key` option specifies the key containing the time value and +`json_time_format` must be set to `unix`, `unix_ms`, or the Go "reference +time" which is defined to be the specific time: `Mon Jan 2 15:04:05 MST 2006`. + +Consult the Go [time][time parse] package for details and additional examples +on how to set the time format. + +## Examples + +### Basic parsing + +Config: +```toml +[[inputs.file]] + files = ["example"] + name_override = "myjsonmetric" + data_format = "json" +``` + +Input: +```json +{ + "a": 5, + "b": { + "c": 6 + }, + "ignored": "I'm a string" +} +``` + +Output: +``` +myjsonmetric a=5,b_c=6 +``` + +### Name, tags, and string fields + +Config: +```toml +[[inputs.file]] + files = ["example"] + name_key = "name" + tag_keys = ["my_tag_1"] + string_fields = ["my_field"] + data_format = "json" +``` + +Input: +```json +{ + "a": 5, + "b": { + "c": 6, + "my_field": "description" + }, + "my_tag_1": "foo", + "name": "my_json" +} +``` + +Output: +``` +my_json,my_tag_1=foo a=5,b_c=6,my_field="description" +``` + +### Arrays + +If the JSON data is an array, then each object within the array is parsed with +the configured settings. + +Config: +```toml +[[inputs.file]] + files = ["example"] + data_format = "json" + json_time_key = "b_time" + json_time_format = "02 Jan 06 15:04 MST" +``` + +Input: +```json +[ + { + "a": 5, + "b": { + "c": 6, + "time":"04 Jan 06 15:04 MST" + }, + }, + { + "a": 7, + "b": { + "c": 8, + "time":"11 Jan 07 15:04 MST" + }, + } +] +``` + +Output: +``` +file a=5,b_c=6 1136387040000000000 +file a=7,b_c=8 1168527840000000000 +``` + +### Query + +The `json_query` option can be used to parse a subset of the document. + +Config: +```toml +[[inputs.file]] + files = ["example"] + data_format = "json" + tag_keys = ["first"] + json_string_fields = ["last"] + json_query = "obj.friends" +``` + +Input: +```json +{ + "obj": { + "name": {"first": "Tom", "last": "Anderson"}, + "age":37, + "children": ["Sara","Alex","Jack"], + "fav.movie": "Deer Hunter", + "friends": [ + {"first": "Dale", "last": "Murphy", "age": 44}, + {"first": "Roger", "last": "Craig", "age": 68}, + {"first": "Jane", "last": "Murphy", "age": 47} + ] + } +} +``` + +Output: +``` +file,first=Dale last="Murphy",age=44 +file,first=Roger last="Craig",age=68 +file,first=Jane last="Murphy",age=47 +``` + +[gjson]: https://github.com/tidwall/gjson +[gjson syntax]: https://github.com/tidwall/gjson#path-syntax +[json]: https://www.json.org/ +[time parse]: https://golang.org/pkg/time/#Parse diff --git a/content/telegraf/v1.9/data_formats/input/logfmt.md b/content/telegraf/v1.9/data_formats/input/logfmt.md new file mode 100644 index 000000000..3aedc633a --- /dev/null +++ b/content/telegraf/v1.9/data_formats/input/logfmt.md @@ -0,0 +1,42 @@ +--- +title: Logfmt input data format +description: Use the "logfmt" input data format to parse "logfmt" data into Telegraf metrics. +menu: + telegraf_1_9: + name: logfmt + weight: 80 + parent: Input data formats +--- + +The `logfmt` data format parses [logfmt] data into Telegraf metrics. + +[logfmt]: https://brandur.org/logfmt + +## Configuration + +```toml +[[inputs.file]] + files = ["example"] + + ## Data format to consume. + ## Each data format has its own unique set of configuration options, read + ## more about them here: + ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md + data_format = "logfmt" + + ## Set the name of the created metric, if unset the name of the plugin will + ## be used. + metric_name = "logfmt" +``` + +## Metrics + +Each key/value pair in the line is added to a new metric as a field. The type +of the field is automatically determined based on the contents of the value. + +## Examples + +``` +- method=GET host=example.org ts=2018-07-24T19:43:40.275Z connect=4ms service=8ms status=200 bytes=1653 ++ logfmt method="GET",host="example.org",ts="2018-07-24T19:43:40.275Z",connect="4ms",service="8ms",status=200i,bytes=1653i +``` diff --git a/content/telegraf/v1.9/data_formats/input/nagios.md b/content/telegraf/v1.9/data_formats/input/nagios.md new file mode 100644 index 000000000..8ee0a0a35 --- /dev/null +++ b/content/telegraf/v1.9/data_formats/input/nagios.md @@ -0,0 +1,29 @@ +--- +title: Nagios input data format +description: Use the Nagios input data format to parse the output of Nagios plugins into Telegraf metrics. +menu: + telegraf_1_9: + name: Nagios + weight: 90 + parent: Input data formats +--- + +# Nagios + +The Nagios input data format parses the output of +[Nagios plugins](https://www.nagios.org/downloads/nagios-plugins/) into +Telegraf metrics. + +## Configuration + +```toml +[[inputs.exec]] + ## Commands array + commands = ["/usr/lib/nagios/plugins/check_load -w 5,6,7 -c 7,8,9"] + + ## Data format to consume. + ## Each data format has its own unique set of configuration options, read + ## more about them here: + ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md + data_format = "nagios" +``` diff --git a/content/telegraf/v1.9/data_formats/input/value.md b/content/telegraf/v1.9/data_formats/input/value.md new file mode 100644 index 000000000..91e08a120 --- /dev/null +++ b/content/telegraf/v1.9/data_formats/input/value.md @@ -0,0 +1,44 @@ +--- +title: Value input data format +description: Use the "value" input data format to parse single values into Telegraf metrics. +menu: + telegraf_1_9: + name: Value + weight: 100 + parent: Input data formats +--- + + +The "value" input data format translates single values into Telegraf metrics. This +is done by assigning a measurement name and setting a single field ("value") +as the parsed metric. + +## Configuration + +You **must** tell Telegraf what type of metric to collect by using the +`data_type` configuration option. Available data type options are: + +1. integer +2. float or long +3. string +4. boolean + +> **Note:** It is also recommended that you set `name_override` to a measurement +name that makes sense for your metric; otherwise, it will just be set to the +name of the plugin. + +```toml +[[inputs.exec]] + ## Commands array + commands = ["cat /proc/sys/kernel/random/entropy_avail"] + + ## override the default metric name of "exec" + name_override = "entropy_available" + + ## Data format to consume. + ## Each data format has its own unique set of configuration options, read + ## more about them here: + ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md + data_format = "value" + data_type = "integer" # required +``` diff --git a/content/telegraf/v1.9/data_formats/input/wavefront.md b/content/telegraf/v1.9/data_formats/input/wavefront.md new file mode 100644 index 000000000..7bccf4b2a --- /dev/null +++ b/content/telegraf/v1.9/data_formats/input/wavefront.md @@ -0,0 +1,28 @@ +--- +title: Wavefront input data format +description: Use the Wavefront input data format to parse Wavefront data into Telegraf metrics. +menu: + telegraf_1_9: + name: Wavefront + weight: 110 + parent: Input data formats +--- + +The Wavefront input data format parse Wavefront data into Telegraf metrics. +For more information on the Wavefront native data format, see +[Wavefront Data Format](https://docs.wavefront.com/wavefront_data_format.html) in the Wavefront documentation. + +## Configuration + +There are no additional configuration options for Wavefront Data Format line-protocol. + +```toml +[[inputs.file]] + files = ["example"] + + ## Data format to consume. + ## Each data format has its own unique set of configuration options, read + ## more about them here: + ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md + data_format = "wavefront" +``` diff --git a/content/telegraf/v1.9/data_formats/output/_index.md b/content/telegraf/v1.9/data_formats/output/_index.md new file mode 100644 index 000000000..77dfbee93 --- /dev/null +++ b/content/telegraf/v1.9/data_formats/output/_index.md @@ -0,0 +1,33 @@ +--- +title: Telegraf output data formats +description: Telegraf serializes metrics into output data formats for InfluxDB Line Protocol, JSON, Graphite, and Splunk metrics. +menu: + telegraf_1_9: + name: Output data formats + weight: 1 + parent: Data formats +--- + +In addition to output-specific data formats, Telegraf supports the following set +of common data formats that may be selected when configuring many of the Telegraf +output plugins. + +* [InfluxDB Line Protocol](/telegraf/v1.9/data_formats/output/influx) +* [JSON](/telegraf/v1.9/data_formats/output/json) +* [Graphite](/telegraf/v1.9/data_formats/output/graphite) +* [SplunkMetric](/telegraf/v1.9/data_formats/output/splunkmetric) + +You will be able to identify the plugins with support by the presence of a +`data_format` configuration option, for example, in the File (`file`) output plugin: + +```toml +[[outputs.file]] + ## Files to write to, "stdout" is a specially handled file. + files = ["stdout"] + + ## Data format to output. + ## Each data format has its own unique set of configuration options, read + ## more about them here: + ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md + data_format = "influx" +``` diff --git a/content/telegraf/v1.9/data_formats/output/graphite.md b/content/telegraf/v1.9/data_formats/output/graphite.md new file mode 100644 index 000000000..df6fe4adb --- /dev/null +++ b/content/telegraf/v1.9/data_formats/output/graphite.md @@ -0,0 +1,58 @@ +--- +title: Graphite output data format +description: Use the "Graphite" output data format to serialize data from Telegraf metrics. +menu: + telegraf_1_9: + name: Graphite output + weight: 10 + parent: Output data formats +--- + +The Graphite data format is serialized from Telegraf metrics using either the +template pattern or tag support method. You can select between the two +methods using the [`graphite_tag_support`](#graphite-tag-support) option. When set, the tag support method is used, +otherwise the [template pattern][templates]) option is used. + +## Configuration + +```toml +[[outputs.file]] + ## Files to write to, "stdout" is a specially handled file. + files = ["stdout", "/tmp/metrics.out"] + + ## Data format to output. + ## Each data format has its own unique set of configuration options, read + ## more about them here: + ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md + data_format = "graphite" + + ## Prefix added to each graphite bucket + prefix = "telegraf" + ## Graphite template pattern + template = "host.tags.measurement.field" + + ## Support Graphite tags, recommended to enable when using Graphite 1.1 or later. + # graphite_tag_support = false +``` + +### graphite_tag_support + +When the `graphite_tag_support` option is enabled, the template pattern is not +used. Instead, tags are encoded using +[Graphite tag support](http://graphite.readthedocs.io/en/latest/tags.html), +added in Graphite 1.1. The `metric_path` is a combination of the optional +`prefix` option, measurement name, and field name. + +The tag `name` is reserved by Graphite, any conflicting tags and will be encoded as `_name`. + +**Example conversion**: +``` +cpu,cpu=cpu-total,dc=us-east-1,host=tars usage_idle=98.09,usage_user=0.89 1455320660004257758 +=> +cpu.usage_user;cpu=cpu-total;dc=us-east-1;host=tars 0.89 1455320690 +cpu.usage_idle;cpu=cpu-total;dc=us-east-1;host=tars 98.09 1455320690 +``` + +### templates + +For more information on templates and template patterns, see [Template patterns](/telegraf/v1.9/data_formats/template-patterns/). diff --git a/content/telegraf/v1.9/data_formats/output/influx.md b/content/telegraf/v1.9/data_formats/output/influx.md new file mode 100644 index 000000000..e4b00641a --- /dev/null +++ b/content/telegraf/v1.9/data_formats/output/influx.md @@ -0,0 +1,41 @@ +--- +title: InfluxDB Line Protocol output data format +description: The "influx" data format outputs metrics into the InfluxDB Line Protocol format. +menu: + telegraf_1_9: + name: InfluxDB Line Protocol + weight: 20 + parent: Output data formats +--- + +The `influx` output data format outputs metrics into [InfluxDB Line Protocol][line protocol]. InfluxData recommends this data format unless another format is required for interoperability. + +## Configuration + +```toml +[[outputs.file]] + ## Files to write to, "stdout" is a specially handled file. + files = ["stdout", "/tmp/metrics.out"] + + ## Data format to output. + ## Each data format has its own unique set of configuration options, read + ## more about them here: + ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md + data_format = "influx" + + ## Maximum line length in bytes. Useful only for debugging. + influx_max_line_bytes = 0 + + ## When true, fields will be output in ascending lexical order. Enabling + ## this option will result in decreased performance and is only recommended + ## when you need predictable ordering while debugging. + influx_sort_fields = false + + ## When true, Telegraf will output unsigned integers as unsigned values, + ## i.e.: `42u`. You will need a version of InfluxDB supporting unsigned + ## integer values. Enabling this option will result in field type errors if + ## existing data has been written. + influx_uint_support = false +``` + +[line protocol]: /influxdb/latest/write_protocols/line_protocol_tutorial/ diff --git a/content/telegraf/v1.9/data_formats/output/json.md b/content/telegraf/v1.9/data_formats/output/json.md new file mode 100644 index 000000000..a355abe2a --- /dev/null +++ b/content/telegraf/v1.9/data_formats/output/json.md @@ -0,0 +1,89 @@ +--- +title: JSON output data format +description: Telegraf's "json" output data format converts metrics into JSON documents. +menu: + telegraf_1_9: + name: JSON + weight: 30 + parent: Output data formats +--- + +The `json` output data format serializes Telegraf metrics into JSON documents. + +## Configuration + +```toml +[[outputs.file]] + ## Files to write to, "stdout" is a specially handled file. + files = ["stdout", "/tmp/metrics.out"] + + ## Data format to output. + ## Each data format has its own unique set of configuration options, read + ## more about them here: + ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md + data_format = "json" + + ## The resolution to use for the metric timestamp. Must be a duration string + ## such as "1ns", "1us", "1ms", "10ms", "1s". Durations are truncated to + ## the power of 10 less than the specified units. + json_timestamp_units = "1s" +``` + +## Examples + +### Standard format + +```json +{ + "fields": { + "field_1": 30, + "field_2": 4, + "field_N": 59, + "n_images": 660 + }, + "name": "docker", + "tags": { + "host": "raynor" + }, + "timestamp": 1458229140 +} +``` + +### Batch format + +When an output plugin needs to emit multiple metrics at one time, it may use the +batch format. The use of batch format is determined by the plugin -- reference +the documentation for the specific plugin. + +```json +{ + "metrics": [ + { + "fields": { + "field_1": 30, + "field_2": 4, + "field_N": 59, + "n_images": 660 + }, + "name": "docker", + "tags": { + "host": "raynor" + }, + "timestamp": 1458229140 + }, + { + "fields": { + "field_1": 30, + "field_2": 4, + "field_N": 59, + "n_images": 660 + }, + "name": "docker", + "tags": { + "host": "raynor" + }, + "timestamp": 1458229140 + } + ] +} +``` diff --git a/content/telegraf/v1.9/data_formats/output/splunkmetric.md b/content/telegraf/v1.9/data_formats/output/splunkmetric.md new file mode 100644 index 000000000..93c710367 --- /dev/null +++ b/content/telegraf/v1.9/data_formats/output/splunkmetric.md @@ -0,0 +1,147 @@ +--- +title: SplunkMetric output data format +description: The SplunkMetric serializer formats and outputs data in a format that can be consumed by a Splunk metrics index. +menu: + telegraf_1_9: + name: SplunkMetric + weight: 40 + parent: Output data formats +--- + +The SplunkMetric serializer formats and outputs the metric data in a format that can be consumed by a Splunk metrics index. +It can be used to write to a file using the file output, or for sending metrics to a HEC using the standard Telegraf HTTP output. + +If you're using the HTTP output, this serializer knows how to batch the metrics so you don't end up with an HTTP POST per metric. + +Th data is output in a format that conforms to the specified Splunk HEC JSON format as found here: +[Send metrics in JSON format](http://dev.splunk.com/view/event-collector/SP-CAAAFDN). + +An example event looks like: +```javascript +{ + "time": 1529708430, + "event": "metric", + "host": "patas-mbp", + "fields": { + "_value": 0.6, + "cpu": "cpu0", + "dc": "mobile", + "metric_name": "cpu.usage_user", + "user": "ronnocol" + } +} +``` +In the above snippet, the following keys are dimensions: +* cpu +* dc +* user + +## Using with the HTTP output + +To send this data to a Splunk HEC, you can use the HTTP output, there are some custom headers that you need to add +to manage the HEC authorization, here's a sample config for an HTTP output: + +```toml +[[outputs.http]] + ## URL is the address to send metrics to + url = "https://localhost:8088/services/collector" + + ## Timeout for HTTP message + # timeout = "5s" + + ## HTTP method, one of: "POST" or "PUT" + # method = "POST" + + ## HTTP Basic Auth credentials + # username = "username" + # password = "pa$$word" + + ## Optional TLS Config + # tls_ca = "/etc/telegraf/ca.pem" + # tls_cert = "/etc/telegraf/cert.pem" + # tls_key = "/etc/telegraf/key.pem" + ## Use TLS but skip chain & host verification + # insecure_skip_verify = false + + ## Data format to output. + ## Each data format has it's own unique set of configuration options, read + ## more about them here: + ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md + data_format = "splunkmetric" + ## Provides time, index, source overrides for the HEC + splunkmetric_hec_routing = true + + ## Additional HTTP headers + [outputs.http.headers] + # Should be set manually to "application/json" for json data_format + Content-Type = "application/json" + Authorization = "Splunk xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" + X-Splunk-Request-Channel = "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" +``` + +## Overrides +You can override the default values for the HEC token you are using by adding additional tags to the config file. + +The following aspects of the token can be overriden with tags: +* index +* source + +You can either use `[global_tags]` or using a more advanced configuration as documented [here](https://github.com/influxdata/telegraf/blob/master/docs/CONFIGURATION.md). + +Such as this example which overrides the index just on the cpu metric: +```toml +[[inputs.cpu]] + percpu = false + totalcpu = true + [inputs.cpu.tags] + index = "cpu_metrics" +``` + +## Using with the File output + +You can use the file output when running telegraf on a machine with a Splunk forwarder. + +A sample event when `hec_routing` is false (or unset) looks like: +```javascript +{ + "_value": 0.6, + "cpu": "cpu0", + "dc": "mobile", + "metric_name": "cpu.usage_user", + "user": "ronnocol", + "time": 1529708430 +} +``` +Data formatted in this manner can be ingested with a simple `props.conf` file that +looks like this: + +```ini +[telegraf] +category = Metrics +description = Telegraf Metrics +pulldown_type = 1 +DATETIME_CONFIG = +NO_BINARY_CHECK = true +SHOULD_LINEMERGE = true +disabled = false +INDEXED_EXTRACTIONS = json +KV_MODE = none +TIMESTAMP_FIELDS = time +TIME_FORMAT = %s.%3N +``` + +An example configuration of a file based output is: + +```toml + # Send telegraf metrics to file(s) +[[outputs.file]] + ## Files to write to, "stdout" is a specially handled file. + files = ["/tmp/metrics.out"] + + ## Data format to output. + ## Each data format has its own unique set of configuration options, read + ## more about them here: + ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md + data_format = "splunkmetric" + hec_routing = false +``` diff --git a/content/telegraf/v1.9/data_formats/template-patterns.md b/content/telegraf/v1.9/data_formats/template-patterns.md new file mode 100644 index 000000000..d2471d704 --- /dev/null +++ b/content/telegraf/v1.9/data_formats/template-patterns.md @@ -0,0 +1,145 @@ +--- +title: Telegraf template patterns +description: Use template patterns to describe how dot-delimited strings should map to and from Telegraf metrics. +menu: + telegraf_1_9: + name: Template patterns + weight: 30 + parent: Data formats +--- + + + +Template patterns are a mini language that describes how a dot delimited +string should be mapped to and from [metrics][]. + +A template has the form: +``` +"host.mytag.mytag.measurement.measurement.field*" +``` + +Where the following keywords can be set: + +1. `measurement`: specifies that this section of the graphite bucket corresponds +to the measurement name. This can be specified multiple times. +2. `field`: specifies that this section of the graphite bucket corresponds +to the field name. This can be specified multiple times. +3. `measurement*`: specifies that all remaining elements of the graphite bucket +correspond to the measurement name. +4. `field*`: specifies that all remaining elements of the graphite bucket +correspond to the field name. + +Any part of the template that is not a keyword is treated as a tag key. This +can also be specified multiple times. + +**NOTE:** `field*` cannot be used in conjunction with `measurement*`. + +## Examples + +### Measurement and tag templates + +The most basic template is to specify a single transformation to apply to all +incoming metrics. So the following template: + +```toml +templates = [ + "region.region.measurement*" +] +``` + +would result in the following Graphite -> Telegraf transformation. + +``` +us.west.cpu.load 100 +=> cpu.load,region=us.west value=100 +``` + +Multiple templates can also be specified, but these should be differentiated +using _filters_ (see below for more details) + +```toml +templates = [ + "*.*.* region.region.measurement", # <- all 3-part measurements will match this one. + "*.*.*.* region.region.host.measurement", # <- all 4-part measurements will match this one. +] +``` + +### Field templates + +The field keyword tells Telegraf to give the metric that field name. +So the following template: + +```toml +separator = "_" +templates = [ + "measurement.measurement.field.field.region" +] +``` + +would result in the following Graphite -> Telegraf transformation. + +``` +cpu.usage.idle.percent.eu-east 100 +=> cpu_usage,region=eu-east idle_percent=100 +``` + +The field key can also be derived from all remaining elements of the graphite +bucket by specifying `field*`: + +```toml +separator = "_" +templates = [ + "measurement.measurement.region.field*" +] +``` + +which would result in the following Graphite -> Telegraf transformation. + +``` +cpu.usage.eu-east.idle.percentage 100 +=> cpu_usage,region=eu-east idle_percentage=100 +``` + +### Filter templates + +Users can also filter the template(s) to use based on the name of the bucket, +using glob matching, like so: + +```toml +templates = [ + "cpu.* measurement.measurement.region", + "mem.* measurement.measurement.host" +] +``` + +which would result in the following transformation: + +``` +cpu.load.eu-east 100 +=> cpu_load,region=eu-east value=100 + +mem.cached.localhost 256 +=> mem_cached,host=localhost value=256 +``` + +### Adding Tags + +Additional tags can be added to a metric that don't exist on the received metric. +You can add additional tags by specifying them after the pattern. +Tags have the same format as the line protocol. +Multiple tags are separated by commas. + +```toml +templates = [ + "measurement.measurement.field.region datacenter=1a" +] +``` + +would result in the following Graphite -> Telegraf transformation. + +``` +cpu.usage.idle.eu-east 100 +=> cpu_usage,region=eu-east,datacenter=1a idle=100 +``` + +[metrics]: /telegraf/v1.9/concepts/metrics/ diff --git a/content/telegraf/v1.9/introduction/_index.md b/content/telegraf/v1.9/introduction/_index.md new file mode 100644 index 000000000..2e03e613d --- /dev/null +++ b/content/telegraf/v1.9/introduction/_index.md @@ -0,0 +1,22 @@ +--- +title: Introducing Telegraf + +menu: + telegraf_1_9: + name: Introduction + weight: 20 +--- + +The introductory documentation includes all the information you need to get up and running with Telegraf. + +## [Downloading Telegraf](/telegraf/v1.9/introduction/downloading/) + +Go to the [InfluxData downloads page](https://portal.influxdata.com/downloads) to get the latest release of Telegraf. + +## [Installing Telegraf](/telegraf/v1.9/introduction/installation/) + +[Installing Telegraf](/telegraf/v1.9/introduction/installation/) includes directions for installing, starting, and configuring Telegraf. + +## [Getting started with Telegraf](/telegraf/v1.9/introduction/getting-started/) + +[Getting started with Telegraf](/telegraf/v1.9/introduction/getting-started/) walks you through the download, installation, and configuration processes, and it shows how to use Telegraf to get data into InfluxDB. diff --git a/content/telegraf/v1.9/introduction/downloading.md b/content/telegraf/v1.9/introduction/downloading.md new file mode 100644 index 000000000..5ec244afa --- /dev/null +++ b/content/telegraf/v1.9/introduction/downloading.md @@ -0,0 +1,12 @@ +--- +title: Downloading Telegraf +menu: + telegraf_1_9: + name: Downloading + weight: 10 + parent: Introduction +--- + + + +Download the latest Telegraf release at the [InfluxData download page](https://portal.influxdata.com/downloads). diff --git a/content/telegraf/v1.9/introduction/getting-started.md b/content/telegraf/v1.9/introduction/getting-started.md new file mode 100644 index 000000000..3151d711d --- /dev/null +++ b/content/telegraf/v1.9/introduction/getting-started.md @@ -0,0 +1,129 @@ +--- +title: Getting started with Telegraf +description: Downloading, installing, configuring and getting started with Telegraf, the plug-in driven server agent of the InfluxData time series platform. +aliases: + - /telegraf/v1.9/introduction/getting_started/ +menu: + telegraf_1_9: + name: Getting started + weight: 30 + parent: Introduction +--- + +## Getting started with Telegraf +Telegraf is an agent written in Go for collecting metrics and writing them into InfluxDB or other possible outputs. +This guide will get you up and running with Telegraf. +It walks you through the download, installation, and configuration processes, and it shows how to use Telegraf to get data into InfluxDB. + +## Download and install Telegraf +Follow the instructions in the Telegraf section on the [Downloads page](https://influxdata.com/downloads/). + +> **Note:** Telegraf will start automatically using the default configuration when installed from a deb package. + +## Configuring Telegraf + +### Configuration file location by installation type + +* macOS [Homebrew](http://brew.sh/): `/usr/local/etc/telegraf.conf` +* Linux debian and RPM packages: `/etc/telegraf/telegraf.conf` +* Standalone Binary: see the next section for how to create a configuration file + +### Creating and editing the configuration file + +Before starting the Telegraf server you need to edit and/or create an initial configuration that specifies your desired [inputs](/telegraf/v1.9/plugins/inputs/) (where the metrics come from) and [outputs](/telegraf/v1.9/plugins/outputs/) (where the metrics go). There are [several ways](/telegraf/v1.9/administration/configuration/) to create and edit the configuration file. +Here, we'll generate a configuration file and simultaneously specify the desired inputs with the `-input-filter` flag and the desired output with the `-output-filter` flag. + +In the example below, we create a configuration file called `telegraf.conf` with two inputs: +one that reads metrics about the system's cpu usage (`cpu`) and one that reads metrics about the system's memory usage (`mem`). We specify InfluxDB as the desired output. + +```bash +telegraf -sample-config -input-filter cpu:mem -output-filter influxdb > telegraf.conf +``` + +## Start the Telegraf service + +Start the Telegraf service and direct it to the relevant configuration file: +### macOS [Homebrew](http://brew.sh/) +```bash +telegraf --config telegraf.conf +``` + +### Linux (sysvinit and upstart installations) +```bash +sudo service telegraf start +``` + +### Linux (systemd installations) +```bash +systemctl start telegraf +``` + +## Results +Once Telegraf is up and running it will start collecting data and writing them to the desired output. + +Returning to our sample configuration, we show what the `cpu` and `mem` data look like in InfluxDB below. +Note that we used the default input and output configuration settings to get these data. + +* List all [measurements](/influxdb/v1.4/concepts/glossary/#measurement) in the `telegraf` [database](/influxdb/v1.4/concepts/glossary/#database): + +``` +> SHOW MEASUREMENTS +name: measurements +------------------ +name +cpu +mem +``` + +* List all [field keys](/influxdb/v1.4/concepts/glossary/#field-key) by measurement: + +``` +> SHOW FIELD KEYS +name: cpu +--------- +fieldKey fieldType +usage_guest float +usage_guest_nice float +usage_idle float +usage_iowait float +usage_irq float +usage_nice float +usage_softirq float +usage_steal float +usage_system float +usage_user float + +name: mem +--------- +fieldKey fieldType +active integer +available integer +available_percent float +buffered integer +cached integer +free integer +inactive integer +total integer +used integer +used_percent float +``` + +* Select a sample of the data in the [field](/influxdb/v1.4/concepts/glossary/#field) `usage_idle` in the measurement `cpu_usage_idle`: + +```bash +> SELECT usage_idle FROM cpu WHERE cpu = 'cpu-total' LIMIT 5 +name: cpu +--------- +time usage_idle +2016-01-16T00:03:00Z 97.56189047261816 +2016-01-16T00:03:10Z 97.76305923519121 +2016-01-16T00:03:20Z 97.32533433320835 +2016-01-16T00:03:30Z 95.68857785553611 +2016-01-16T00:03:40Z 98.63715928982245 +``` + + +Notice that the timestamps occur at rounded ten second intervals (that is, `:00`, `:10`, `:20`, and so on) - this is a configurable setting. + + +That's it! You now have the foundation for using Telegraf to collect metrics and write them to your output of choice. diff --git a/content/telegraf/v1.9/introduction/installation.md b/content/telegraf/v1.9/introduction/installation.md new file mode 100644 index 000000000..6c7a0aa8a --- /dev/null +++ b/content/telegraf/v1.9/introduction/installation.md @@ -0,0 +1,235 @@ +--- +title: Installing Telegraf + +menu: + telegraf_1_9: + name: Installing + weight: 20 + parent: Introduction +--- + +This page provides directions for installing, starting, and configuring Telegraf. + +## Requirements + +Installation of the Telegraf package may require `root` or administrator privileges in order to complete successfully. + +### Networking + +Telegraf offers multiple service [input plugins](/telegraf/v1.9/plugins/inputs/) that may +require custom ports. +All port mappings can be modified through the configuration file, +which is located at `/etc/telegraf/telegraf.conf` for default installations. + +### NTP + +Telegraf uses a host's local time in UTC to assign timestamps to data. +Use the Network Time Protocol (NTP) to synchronize time between hosts; if hosts' clocks +aren't synchronized with NTP, the timestamps on the data can be inaccurate. + +## Installation + +{{< tabs-wrapper >}} +{{% tabs %}} +[Ubuntu & Debian](#) +[RedHat & CentOS](#) +[SLES & openSUSE](#) +[FreeBSD/PC-BSD](#) +[macOS](#) +[Windows](#) +{{% /tabs %}} + +{{% tab-content %}} +For instructions on how to install the Debian package from a file, please see the [downloads page](https://influxdata.com/downloads/). + +Debian and Ubuntu users can install the latest stable version of Telegraf using the `apt-get` package manager. + +**Ubuntu:** Add the InfluxData repository with the following commands: + +{{< code-tabs-wrapper >}} +{{% code-tabs %}} +[wget](#) +[curl](#) +{{% /code-tabs %}} + +{{% code-tab-content %}} +```bash +wget -qO- https://repos.influxdata.com/influxdb.key | sudo apt-key add - +source /etc/lsb-release +echo "deb https://repos.influxdata.com/${DISTRIB_ID,,} ${DISTRIB_CODENAME} stable" | sudo tee /etc/apt/sources.list.d/influxdb.list +``` +{{% /code-tab-content %}} + +{{% code-tab-content %}} +```bash +curl -sL https://repos.influxdata.com/influxdb.key | sudo apt-key add - +source /etc/lsb-release +echo "deb https://repos.influxdata.com/${DISTRIB_ID,,} ${DISTRIB_CODENAME} stable" | sudo tee /etc/apt/sources.list.d/influxdb.list +``` +{{% /code-tab-content %}} +{{< /code-tabs-wrapper >}} + +**Debian:** Add the InfluxData repository with the following commands: + +{{< code-tabs-wrapper >}} +{{% code-tabs %}} +[wget](#) +[curl](#) +{{% /code-tabs %}} + +{{% code-tab-content %}} +```bash +# Before adding Influx repository, run this so that apt will be able to read the repository. + +sudo apt-get update && sudo apt-get install apt-transport-https + +# Add the InfluxData key + +wget -qO- https://repos.influxdata.com/influxdb.key | sudo apt-key add - +source /etc/os-release +test $VERSION_ID = "7" && echo "deb https://repos.influxdata.com/debian wheezy stable" | sudo tee /etc/apt/sources.list.d/influxdb.list +test $VERSION_ID = "8" && echo "deb https://repos.influxdata.com/debian jessie stable" | sudo tee /etc/apt/sources.list.d/influxdb.list +test $VERSION_ID = "9" && echo "deb https://repos.influxdata.com/debian stretch stable" | sudo tee /etc/apt/sources.list.d/influxdb.list +``` +{{% /code-tab-content %}} + +{{% code-tab-content %}} +```bash +# Before adding Influx repository, run this so that apt will be able to read the repository. + +sudo apt-get update && sudo apt-get install apt-transport-https + +# Add the InfluxData key + +curl -sL https://repos.influxdata.com/influxdb.key | sudo apt-key add - +source /etc/os-release +test $VERSION_ID = "7" && echo "deb https://repos.influxdata.com/debian wheezy stable" | sudo tee /etc/apt/sources.list.d/influxdb.list +test $VERSION_ID = "8" && echo "deb https://repos.influxdata.com/debian jessie stable" | sudo tee /etc/apt/sources.list.d/influxdb.list +test $VERSION_ID = "9" && echo "deb https://repos.influxdata.com/debian stretch stable" | sudo tee /etc/apt/sources.list.d/influxdb.list +``` +{{% /code-tab-content %}} +{{< /code-tabs-wrapper >}} + +Then, install and start the Telegraf service: + +```bash +sudo apt-get update && sudo apt-get install telegraf +sudo service telegraf start +``` + +Or if your operating system is using systemd (Ubuntu 15.04+, Debian 8+): +``` +sudo apt-get update && sudo apt-get install telegraf +sudo systemctl start telegraf +``` + +{{% /tab-content %}} + +{{% tab-content %}} +For instructions on how to install the RPM package from a file, please see the [downloads page](https://influxdata.com/downloads/). + +**RedHat and CentOS:** Install the latest stable version of Telegraf using the `yum` package manager: + +```bash +cat < +``` +{{% /tab-content %}} + +{{< /tabs-wrapper >}} + +## Configuration + +### Create a configuration file with default input and output plugins. + +Every plugin will be in the file, but most will be commented. + +``` +telegraf config > telegraf.conf +``` + +### Create a configuration file with specific inputs and outputs +``` +telegraf --input-filter [:] --output-filter [:] config > telegraf.conf +``` + +For more advanced configuration details, see the +[configuration documentation](/telegraf/v1.9/administration/configuration/). diff --git a/content/telegraf/v1.9/plugins/_index.md b/content/telegraf/v1.9/plugins/_index.md new file mode 100644 index 000000000..53edcd804 --- /dev/null +++ b/content/telegraf/v1.9/plugins/_index.md @@ -0,0 +1,27 @@ +--- +title: Telegraf plugins +description: Telegraf plugins are agents used in the InfluxData time series platform for collecting, processing, aggregating, and writing metrics from time series data on the InfluxDB time series database and other popular databases and applications. +menu: + telegraf_1_9: + name: Plugins + weight: 40 +--- + +Telegraf is an agent, written in the Go programming language, for collecting, processing, aggregating, and writing metrics. Telegraf is plugin-driven and supports four categories of plugin types, including input, output, aggregator, and processor. + + +## [Telegraf input plugins](/telegraf/v1.9/plugins/inputs/) + +The [Telegraf input plugins](/telegraf/v1.9/plugins/inputs/) collect metrics from the system, services, or third party APIs. + +## [Telegraf output plugins](/telegraf/v1.9/plugins/outputs/) + +The [Telegraf output plugins](/telegraf/v1.9/plugins/outputs/) transform, decorate, and filter metrics. + +## [Telegraf aggregator plugins](/telegraf/v1.9/plugins/aggregators/) + +The [Telegraf aggregator plugins](/telegraf/v1.9/plugins/aggregators/) create aggregate metrics (for example, mean, min, max, quantiles, etc.) + +## [Telegraf processor plugins](/telegraf/v1.9/plugins/processors/) + +The [Telegraf processor plugins](/telegraf/v1.9/plugins/processors/) write metrics to various destinations. diff --git a/content/telegraf/v1.9/plugins/aggregators.md b/content/telegraf/v1.9/plugins/aggregators.md new file mode 100644 index 000000000..f6b9c4464 --- /dev/null +++ b/content/telegraf/v1.9/plugins/aggregators.md @@ -0,0 +1,50 @@ +--- +title: Telegraf aggregator plugins +description: Use the Telegraf aggregator plugins with the InfluxData time series platfrom to create aggregate metrics (for example, mean, min, max, quantiles, etc.) collected by the input plugins. Aggregator plugins support basic statistics, histograms, and min/max values. +menu: + telegraf_1_9: + name: Aggregator + weight: 30 + parent: Plugins +--- + +Aggregators emit new aggregate metrics based on the metrics collected by the input plugins. + + +## Supported Telegraf aggregator plugins + + +### BasicStats + +Plugin ID: `basicstats` + +The [BasicStats aggregator plugin](https://github.com/influxdata/telegraf/blob/release-1.9/plugins/aggregators/basicstats/README.md) gives `count`, `max`, `min`, `mean`, `s2`(variance), and `stdev` for a set of values, emitting the aggregate every period seconds. + +### Histogram + +Plugin ID: `histogram` + +The [Histogram aggregator plugin](https://github.com/influxdata/telegraf/blob/release-1.9/plugins/aggregators/histogram/README.md) creates histograms containing the counts of field values within a range. + +Values added to a bucket are also added to the larger buckets in the distribution. This creates a [cumulative histogram](https://upload.wikimedia.org/wikipedia/commons/5/53/Cumulative_vs_normal_histogram.svg). + +Like other Telegraf aggregator plugins, the metric is emitted every period seconds. Bucket counts, however, are not reset between periods and will be non-strictly increasing while Telegraf is running. + +### MinMax + +Plugin ID: `minmax` + +The [MinMax aggregator plugin](https://github.com/influxdata/telegraf/blob/release-1.9/plugins/aggregators/minmax/README.md) aggregates `min` and `max` values of each field it sees, emitting the aggregrate every period seconds. + +### ValueCounter + +Plugin ID: `valuecounter` + +The [ValueCounter aggregator plugin](https://github.com/influxdata/telegraf/blob/release-1.9/plugins/aggregators/valuecounter/README.md) counts the occurrence of values in fields and emits the counter once every 'period' seconds. + +A use case for the ValueCounter aggregator plugin is when you are processing an HTTP access log with the [Logparser input plugin](/telegraf/v1.8/plugins/inputs/#logparser) and want to count the HTTP status codes. + +The fields which will be counted must be configured with the fields configuration directive. When no fields are provided, the plugin will not count any fields. +The results are emitted in fields, formatted as `originalfieldname_fieldvalue = count`. + +ValueCounter only works on fields of the type `int`, `bool`, or `string`. Float fields are being dropped to prevent the creating of too many fields. diff --git a/content/telegraf/v1.9/plugins/inputs.md b/content/telegraf/v1.9/plugins/inputs.md new file mode 100644 index 000000000..e312c52a3 --- /dev/null +++ b/content/telegraf/v1.9/plugins/inputs.md @@ -0,0 +1,1030 @@ +--- +title: Telegraf input plugins +description: Telegraf input plugins are used with the InfluxData time series platform to collect metrics from the system, services, or third party APIs. +menu: + telegraf_1_9: + name: Input + weight: 10 + parent: Plugins +--- + +Telegraf input plugins are used with the InfluxData time series platform to collect metrics from the system, services, or third party APIs. All metrics are gathered from the inputs you [enable and configure in the configuration file](/telegraf/v1.9/administration/configuration/). + + + +## Usage instructions + +View usage instructions for each service input by running `telegraf --usage `. + + +## Supported Telegraf input plugins + +### ActiveMQ + +Plugin ID: `activemq` + +The [ActiveMQ input plugin](https://github.com/influxdata/telegraf/blob/release-1.9/plugins/inputs/activemq/README.md) gathers queues, topics, and subscriber metrics using the ActiveMQ Console API. + +### Aerospike + +Plugin ID: `aerospike` + +The [Aerospike input plugin](https://github.com/influxdata/telegraf/blob/release-1.9/plugins/inputs/aerospike/README.md) queries Aerospike servers and gets node statistics and statistics for all configured namespaces. + +### Amazon CloudWatch Statistics + +Plugin ID: `cloudwatch` + +The [Amazon CloudWatch Statistics input plugin](https://github.com/influxdata/telegraf/blob/release-1.9/plugins/inputs/cloudwatch/README.md) pulls metric statistics from Amazon CloudWatch. + +### AMQP Consumer + +Plugin ID: `amqp_consumer` + +The [AMQP Consumer input plugin](https://github.com/influxdata/telegraf/blob/release-1.9/plugins/inputs/amqp_consumer/README.md) provides a consumer for use with AMQP 0-9-1, a prominent implementation of this protocol +being RabbitMQ. + +### Apache HTTP Server + +Plugin ID: `apache` + +The [Apache HTTP Server input plugin](https://github.com/influxdata/telegraf/blob/release-1.9/plugins/inputs/apache/README.md) collects server performance information using the `mod_status` module of the Apache HTTP Server. + +Typically, the `mod_status` module is configured to expose a page at the `/server-status?auto` location of the Apache server. +The [ExtendedStatus](https://httpd.apache.org/docs/2.4/mod/core.html#extendedstatus) option must be enabled in order to collect all available fields. +For information about how to configure your server reference, see the +[module documentation](https://httpd.apache.org/docs/2.4/mod/mod_status.html#enable). + +### Apache Kafka Consumer + +Plugin ID: `kafka_consumer` + +The [Apache Kafka Consumer input plugin](https://github.com/influxdata/telegraf/blob/release-1.9/plugins/inputs/kafka_consumer/README.md) polls a specified Kafka topic and adds messages to InfluxDB. +Messages are expected in the line protocol format. +[Consumer Group](http://godoc.org/github.com/wvanbergen/kafka/consumergroup) is used to talk to the Kafka cluster so +multiple instances of Telegraf can read from the same topic in parallel. + +### Apache Solr + +Plugin ID: `solr` + +The [Apache Solr (`solr`) input plugin](https://github.com/influxdata/telegraf/blob/release-1.9/plugins/inputs/solr/README.md) collects stats using the MBean Request Handler. + +### Apache Tomcat + +Plugin ID: `tomcat` + +The [Apache Tomcat input plugin](https://github.com/influxdata/telegraf/blob/release-1.9/plugins/inputs/tomcat/README.md) collects statistics available from the Apache Tomcat manager status page (`http:///manager/status/all?XML=true`). Using `XML=true` returns XML data). +See the [Apache Tomcat documentation](https://tomcat.apache.org/tomcat-9.0-doc/manager-howto.html#Server_Status) for details on these statistics. + +### Aurora + +Plugin ID: `aurora` + +The [Aurora input plugin](https://github.com/influxdata/telegraf/blob/release-1.9/plugins/inputs/aurora/README.md) gathers metrics from [Apache Aurora](https://aurora.apache.org/) schedulers. For monitoring recommendations, see [Monitoring your Aurora cluster](https://aurora.apache.org/documentation/latest/operations/monitoring/). + +### Bcache + +Plugin ID: `bcache` + +The [Bcache input plugin](https://github.com/influxdata/telegraf/blob/release-1.9/plugins/inputs/bcache/README.md) gets bcache statistics from the `stats_total` directory and `dirty_data` file. + +### Beanstalkd + +Plugin ID: `beanstalkd` + +The [Beanstalkd input plugin](https://github.com/influxdata/telegraf/blob/release-1.9/plugins/inputs/beanstalkd/README.md) collects server stats as well as tube stats (reported by `stats` and `stats-tube` commands respectively). + +### Bond + +Plugin ID: `bond` + +The [Bond input plugin](https://github.com/influxdata/telegraf/blob/release-1.9/plugins/inputs/bond/README.md) collects network bond interface status, bond's slaves interfaces status and failures count of +bond's slaves interfaces. The plugin collects these metrics from `/proc/net/bonding/*` files. + +### Burrow + +Plugin ID: `burrow` + +The [Burrow input plugin)](https://github.com/influxdata/telegraf/blob/release-1.9/plugins/inputs/burrow/README.md) collects Apache Kafka topic, consumer, and partition status using the [Burrow](https://github.com/linkedin/Burrow) HTTP [HTTP Endpoint](https://github.com/linkedin/Burrow/wiki/HTTP-Endpoint). + +### Ceph Storage + +Plugin ID: `ceph` + +The [Ceph Storage input plugin](https://github.com/influxdata/telegraf/blob/release-1.9/plugins/inputs/ceph/README.md) collects performance metrics from the MON and OSD nodes in a Ceph storage cluster. + +### CGroup + +Plugin ID: `cgroup` + +The [CGroup input plugin](https://github.com/influxdata/telegraf/blob/release-1.9/plugins/inputs/cgroup/README.md) captures specific statistics per cgroup. + +### Chrony + +Plugin ID: `chrony` + +The [Chrony input plugin](https://github.com/influxdata/telegraf/blob/release-1.9/plugins/inputs/chrony/README.md) gets standard chrony metrics, requires chronyc executable. + +### Conntrack `inputs.conntrack` + +The [Conntrack input plugin](https://github.com/influxdata/telegraf/blob/release-1.9/plugins/inputs/conntrack/README.md) collects stats from Netfilter's conntrack-tools. + +The conntrack-tools provide a mechanism for tracking various aspects of network connections as they are processed by netfilter. +At runtime, conntrack exposes many of those connection statistics within `/proc/sys/net`. +Depending on your kernel version, these files can be found in either `/proc/sys/net/ipv4/netfilter` or `/proc/sys/net/netfilter` and will be prefixed with either `ip_` or `nf_`. +This plugin reads the files specified in its configuration and publishes each one as a field, with the prefix normalized to `ip_`. + +### Consul + +Plugin ID: `consul` + +The [Consul input plugin](https://github.com/influxdata/telegraf/blob/release-1.9/plugins/inputs/consul/README.md) will collect statistics about all health checks registered in the Consul. +It uses Consul API to query the data. +It will not report the telemetry but Consul can report those stats already using StatsD protocol, if needed. + +### Couchbase + +Plugin ID: `couchbase` + +The [Couchbase input plugin](https://github.com/influxdata/telegraf/blob/release-1.9/plugins/inputs/couchbase/README.md) reads per-node and per-bucket metrics from Couchbase. + +### CouchDB + +Plugin ID: `couchdb` + +The [CouchDB input plugin](https://github.com/influxdata/telegraf/blob/release-1.9/plugins/inputs/couchdb/README.md) gathers metrics of CouchDB using `_stats` endpoint. + +### CPU + +Plugin ID: `cpu` + +The [CPU input plugin](https://github.com/influxdata/telegraf/blob/release-1.9/plugins/inputs/cpu/README.md) gathers metrics about cpu usage. + +### Disk + +Plugin ID: `disk` + +The [Disk input plugin](https://github.com/influxdata/telegraf/blob/release-1.9/plugins/inputs/disk/README.md) gathers metrics about disk usage by mount point. + +### DiskIO + +Plugin ID: `diskio` + +The [DiskIO input plugin](https://github.com/influxdata/telegraf/blob/release-1.9/plugins/inputs/diskio/README.md) gathers metrics about disk IO by device. + +### Disque + +Plugin ID: `disque` + +The [Disque input plugin](https://github.com/influxdata/plugins/inputs/disque) gathers metrics from one or more [Disque](https://github.com/antirez/disque) servers. + +### DMCache + +Plugin ID: `dmcache` + +The [DMCache input plugin](https://github.com/influxdata/telegraf/blob/release-1.9/plugins/inputs/dmcache/README.md) provides a native collection for dmsetup-based statistics for dm-cache. + +### DNS Query + +Plugin ID: `dns_query` + +The [DNS Query (`dns_query`) input plugin](https://github.com/influxdata/telegraf/blob/release-1.9/plugins/inputs/dns_query/README.md) gathers DNS query times in milliseconds - like [Dig](https://en.wikipedia.org/wiki/Dig_(command)). + +### Docker + +Plugin ID: `docker` + +The [Docker input plugin](https://github.com/influxdata/telegraf/blob/release-1.9/plugins/inputs/docker/README.md) uses the Docker Engine API to gather metrics on running Docker containers. The Docker plugin +uses the [Official Docker Client](https://github.com/moby/moby/tree/master/client) to gather stats from the +[Engine API](https://docs.docker.com/engine/api/v1.20/) library documentation. + +### Dovecot + +Plugin ID: `dovecot` + +The [Dovecot input plugin](https://github.com/influxdata/telegraf/blob/release-1.9/plugins/inputs/dovecot/README.md) uses the dovecot Stats protocol to gather metrics on configured domains. For more information, +see the [Dovecot documentation](http://wiki2.dovecot.org/Statistics). + +### Elasticsearch + +Plugin ID: `elasticsearch` + +The [Elasticsearch sinput plugin](https://github.com/influxdata/telegraf/blob/release-1.9/plugins/inputs/elasticsearch/README.md) queries endpoints to obtain [node](https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-nodes-stats.html) +and optionally [cluster-health](https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-health.html) +or [cluster-stats](https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-stats.html) metrics. + +### Exec + +Plugin ID: `exec` + +The [Exec input plugin](https://github.com/influxdata/telegraf/blob/release-1.9/plugins/inputs/exec/README.md) parses supported [Telegraf input data formats](/telegraf/v1.9/data_formats/input/) (InfluxDB Line Protocol, JSON, Graphite, Value, Nagios, Collectd, and Dropwizard into metrics. Each Telegraf metric includes the measurement name, tags, fields, and timesamp. + +### Fail2ban + +Plugin ID: `fail2ban` + +The [Fail2ban input plugin](https://github.com/influxdata/telegraf/blob/release-1.9/plugins/inputs/fail2ban/README.md) gathers the count of failed and banned ip addresses using [fail2ban](https://www.fail2ban.org/). + +### Fibaro + +Plugin ID: `fibaro` + +The [Fibaro input plugin](https://github.com/influxdata/telegraf/blob/release-1.9/plugins/inputs/fibaro/README.md) makes HTTP calls to the Fibaro controller API to gather values of hooked devices. Those values could be true (`1`) or false (`0`) for switches, percentage for dimmers, temperature, etc. + +### File + +Plugin ID: `file` + +The [File input plugin](https://github.com/influxdata/telegraf/blob/release-1.9/plugins/inputs/file/README.md) updates a list of files every interval and parses the contents using the selected input data format. + +Files will always be read in their entirety, if you wish to tail/follow a file use the [tail input plugin](#tail) instead. + +### Filecount + +Plugin ID: `filecount` + +The [Filecount input plugin](https://github.com/influxdata/telegraf/blob/release-1.9/plugins/inputs/filecount/README.md) counts files in directories that match certain criteria. + +### Filestat + +Plugin ID: `filestat` + +The [Filestat input plugin](https://github.com/influxdata/telegraf/blob/release-1.9/plugins/inputs/filestat/README.md) gathers metrics about file existence, size, and other stats. + +### Fluentd + +Plugin ID: `fluentd` + +The [Fluentd input plugin](https://github.com/influxdata/telegraf/blob/release-1.9/plugins/inputs/fluentd/README.md) gathers metrics from plugin endpoint provided by in_monitor plugin. This plugin understands +data provided by `/api/plugin.json` resource (`/api/config.json` is not covered). + +### Graylog + +Plugin ID: `graylog` + +The [Graylog input plugin](https://github.com/influxdata/telegraf/blob/release-1.9/plugins/inputs/graylog/README.md) can collect data from remote Graylog service URLs. This plugin currently supports two +types of endpoints: + +* multiple (e.g., `http://[graylog-server-ip]:12900/system/metrics/multiple`) +* namespace (e.g., `http://[graylog-server-ip]:12900/system/metrics/namespace/{namespace}`) + +### HAproxy + +Plugin ID: `haproxy` + +The [HAproxy input plugin](https://github.com/influxdata/telegraf/blob/release-1.9/plugins/inputs/haproxy/README.md) gathers metrics directly from any running HAproxy instance. It can do so by using CSV +generated by HAproxy status page or from admin sockets. + +### Hddtemp + +Plugin ID: `hddtemp` + +The [Hddtemp input plugin](https://github.com/influxdata/telegraf/blob/release-1.9/plugins/inputs/hddtemp/README.md) reads data from `hddtemp` daemons. + +### HTTP + +Plugin ID: `http` + +The [HTTP input plugin](https://github.com/influxdata/telegraf/blob/release-1.9/plugins/inputs/http/README.md) collects metrics from one or more HTTP (or HTTPS) endpoints. The endpoint should have metrics formatted in one of the [supported input data formats](/telegraf/v1.9/data_formats/input/). Each data format has its own unique set of configuration options which can be added to the input configuration. + +### HTTP Listener + +Plugin ID: `http_listener` + +The [HTTP Listener input plugin](https://github.com/influxdata/telegraf/blob/release-1.9/plugins/inputs/http_listener/README.md) listens for messages sent via HTTP POST. Messages are expected in the [InfluxDB +Line Protocol input data format](/telegraf/v1.9/data_formats/input/influx) ONLY (other [Telegraf input data formats](/telegraf/v1.9/data_formats/input/) are not supported). +This plugin allows Telegraf to serve as a proxy or router for the `/write` endpoint of the InfluxDB HTTP API. + +### HTTP Listener v2 + +Plugin ID: `http_listener_v2` + +The [HTTP Listener v2 input plugin](https://github.com/influxdata/telegraf/blob/release-1.9/plugins/inputs/http_listener_v2/README.md) listens for messages sent via HTTP POST. Messages are expected in the [InfluxDB +Line Protocol input data format](/telegraf/v1.9/data_formats/input/influx) ONLY (other [Telegraf input data formats](/telegraf/v1.9/data_formats/input/) are not supported). +This plugin allows Telegraf to serve as a proxy or router for the `/write` endpoint of the InfluxDB v2110 HTTP API. + +### HTTP Response + +Plugin ID: `http_response` + +The [HTTP Response input plugin](https://github.com/influxdata/telegraf/blob/release-1.9/plugins/inputs/http_response/README.md) gathers metrics for HTTP responses. The measurements and fields include `response_time`, `http_response_code`, and `result_type`. Tags for measurements include `server` and `method`. + +### Icinga2 + +Plugin ID: `icinga2` + +The [Icinga2 input plugin](https://github.com/influxdata/plugins/inputs/icinga2) gather status on running services and hosts using the [Icinga2 Remote API](https://docs.icinga.com/icinga2/latest/doc/module/icinga2/chapter/icinga2-api). + +### InfluxDB v1.x + +Plugin ID: `influxdb` + +The [InfluxDB v1.x input plugin](https://github.com/influxdata/plugins/inputs/influxdb) gathers metrics from the exposed InfluxDB v1.x `/debug/vars` endpoint. Using Telegraf to extract these metrics to create a "monitor of monitors" is a best practice and allows you to reduce the overhead associated with +capturing and storing these metrics locally within the `_internal` database for production deployments. +[Read more about this approach here.](https://www.influxdata.com/blog/influxdb-debugvars-endpoint/) + +### InfluxDB Listener + +Plugin ID: `influxdb_listener` + +The [InfluxDB Listener input plugin](https://github.com/influxdata/telegraf/blob/release-1.9/plugins/inputs/influxdb_listener/README.md) listens for requests sent +according to the [InfluxDB HTTP API](/influxdb/latest/guides/writing_data/). The intent of the +plugin is to allow Telegraf to serve as a proxy, or router, for the HTTP `/write` +endpoint of the InfluxDB HTTP API. + +**Note:** This plugin was previously known as `http_listener`. If you wish to +send general metrics via HTTP, use the +[HTTP Listener v2 input plugin](#http-listener-v2) instead. + +The `/write` endpoint supports the `precision` query parameter and can be set +to one of `ns`, `u`, `ms`, `s`, `m`, `h`. All other parameters are ignored and +defer to the output plugins configuration. + +When chaining Telegraf instances using this plugin, `CREATE DATABASE` requests +receive a `200 OK` response with message body `{"results":[]}` but they are not +relayed. The output configuration of the Telegraf instance which ultimately +submits data to InfluxDB determines the destination database. + +### Interrupts + +Plugin ID: `interrupts` + +The [Interrupts input plugin](https://github.com/influxdata/telegraf/blob/release-1.9/plugins/inputs/interrupts/README.md) gathers metrics about IRQs, including `interrupts` (from `/proc/interrupts`) and `soft_interrupts` (from `/proc/softirqs`). + +### IPMI Sensor + +Plugin ID: `ipmi_sensor` + +The [IPMI Sensor input plugin](https://github.com/influxdata/telegraf/blob/release-1.9/plugins/inputs/ipmi_sensor/README.md) queries the local machine or remote host sensor statistics using the `ipmitool` utility. + +### Ipset + +Plugin ID: `ipset` + +The [Ipset input plugin](https://github.com/influxdata/telegraf/blob/release-1.9/plugins/inputs/ipset/README.md) gathers packets and bytes counters from Linux `ipset`. It uses the output of the command `ipset save`. Ipsets created without the `counters` option are ignored. + +### IPtables + +Plugin ID: `iptables` + +The [IPtables input plugin](https://github.com/influxdata/telegraf/blob/release-1.9/plugins/inputs/iptables/README.md) gathers packets and bytes counters for rules within a set of table and chain from the Linux iptables firewall. + +### IPVS + +Plugin ID: `ipvs` + +The [IPVS input plugin](https://github.com/influxdata/telegraf/blob/release-1.9/plugins/inputs/ipvs/README.md) uses the Linux kernel netlink socket interface to gather metrics about IPVS virtual and real servers. + +### Jenkins + +Plugin ID: `jenkins` + +The [Jenkins input plugin](https://github.com/influxdata/telegraf/blob/release-1.9/plugins/inputs/jenkins/README.md) gathers information about the nodes and jobs running in a jenkins instance. + +This plugin does not require a plugin on jenkins and it makes use of Jenkins API to retrieve all the information needed. + +### Jolokia2 Agent + +Plugin ID: `jolokia2_agent` + +The [Jolokia2 Agent input plugin](https://github.com/influxdata/telegraf/blob/release-1.9/plugins/inputs/jolokia2/README.md) reads JMX metrics from one or more [Jolokia](https://jolokia.org/) agent REST endpoints using the + [JSON-over-HTTP protocol](https://jolokia.org/reference/html/protocol.html). + +### Jolokia2 Proxy + +Plugin ID: `jolokia2_proxy` + +The [Jolokia2 Proxy input plugin](https://github.com/influxdata/plugins/inputs/jolokia2/README.md) reads JMX metrics from one or more targets by interacting with a [Jolokia](https://jolokia.org/) proxy REST endpoint using the [Jolokia](https://jolokia.org/) [JSON-over-HTTP protocol](https://jolokia.org/reference/html/protocol.html). + +### JTI OpenConfig Telemetry + +Plugin ID: `jti_openconfig_telemetry` + +The [JTI OpenConfig Telemetry input plugin](https://github.com/influxdata/telegraf/blob/release-1.9/plugins/inputs/jti_openconfig_telemetry/README.md) reads Juniper Networks implementation of OpenConfig telemetry data from listed sensors using the Junos Telemetry Interface. Refer to +[openconfig.net](http://openconfig.net/) for more details about OpenConfig and [Junos Telemetry Interface (JTI)](https://www.juniper.net/documentation/en_US/junos/topics/concept/junos-telemetry-interface-oveview.html). + +### Kapacitor + +Plugin ID: `kapacitor` + +The [Kapacitor input plugin](https://github.com/influxdata/telegraf/blob/release-1.9/plugins/inputs/kapacitor/README.md) will collect metrics from the given Kapacitor instances. + +### Kernel + +Plugin ID: `kernel` + +The [Kernel input plugin](https://github.com/influxdata/telegraf/blob/release-1.9/plugins/inputs/kernel/README.md) gathers kernel statistics from `/proc/stat`. + +### Kernel VMStat + +Plugin ID: `kernel_vmstat` + +The [Kernel VMStat input plugin](https://github.com/influxdata/telegraf/blob/release-1.9/plugins/inputs/kernel_vmstat/README.md) gathers kernel statistics from `/proc/vmstat`. + +### Kibana + +Plugin ID: `kibana` + +The [Kibana input plugin](https://github.com/influxdata/telegraf/blob/release-1.9/plugins/inputs/kibana/README.md) queries the Kibana status API to obtain the health status of Kibana and some useful metrics. + +### Kubernetes + +Plugin ID: `kubernetes` + +>***Note:*** The Kubernetes input plugin is experimental and may cause high cardinality issues with moderate to +large Kubernetes deployments. + +The [Kubernetes input plugin](https://github.com/influxdata/telegraf/blob/release-1.9/plugins/inputs/kubernetes/README.md) talks to the kubelet API using the `/stats/summary` endpoint to gather metrics about the running pods +and containers for a single host. It is assumed that this plugin is running as part of a daemonset within a +Kubernetes installation. This means that Telegraf is running on every node within the cluster. Therefore, you +should configure this plugin to talk to its locally running kubelet. + +### LeoFS + +Plugin ID: `leofs` + +The [LeoFS input plugin](https://github.com/influxdata/telegraf/blob/release-1.9/plugins/inputs/leofs/README.md) gathers metrics of LeoGateway, LeoManager, and LeoStorage using SNMP. See [System monitoring](https://leo-project.net/leofs/docs/admin/system_admin/monitoring/) in the [LeoFS documentation](https://leo-project.net/leofs/docs/) for more information. + +### Linux Sysctl FS + +Plugin ID: `linux_sysctl_fs` + +The [Linux Sysctl FS input plugin](https://github.com/influxdata/telegraf/blob/release-1.9/plugins/inputs/linux_sysctl_fs/README.md) provides Linux system level file (`sysctl fs`) metrics. The documentation on these fields can be found at https://www.kernel.org/doc/Documentation/sysctl/fs.txt. + +### Logparser + +Plugin ID: `logparser` + +The [Logparser input plugin](https://github.com/influxdata/telegraf/blob/release-1.9/plugins/inputs/logparser/README.md) streams and parses the given log files. Currently, it has the capability of parsing "grok" patterns +from log files, which also supports regular expression (regex) patterns. + +### Lustre2 + +Plugin ID: `lustre2` + +Lustre Jobstats allows for RPCs to be tagged with a value, such as a job's ID. This allows for per job statistics. +The [Lustre2 input plugin](https://github.com/influxdata/plugins/inputs/lustre2) collects statistics and tags the data with the `jobid`. + +### Mailchimp + +Plugin ID: `mailchimp` + +The [Mailchimp input plugin](https://github.com/influxdata/plugins/inputs/mailchimp) gathers metrics from the `/3.0/reports` MailChimp API. + +### Mcrouter + +Plugin ID: `mcrouter` + +The [Mcrouter input plugin](https://github.com/influxdata/telegraf/blob/release-1.9/plugins/inputs/mcrouter/README.md) gathers statistics data from a mcrouter instance. [Mcrouter](https://github.com/facebook/mcrouter) is a memcached protocol router, developed and maintained by Facebook, for scaling memcached (http://memcached.org/) deployments. It's a core component of cache infrastructure at Facebook and Instagram where mcrouter handles almost 5 billion requests per second at peak. + +### Mem + +Plugin ID: `mem` + +The [Mem input plugin](https://github.com/influxdata/telegraf/blob/release-1.9/plugins/inputs/mem/README.md) collects system memory metrics. For a more complete explanation of the difference between used and actual_used RAM, see [Linux ate my ram](https://www.linuxatemyram.com/). + +### Memcached + +Plugin ID: `memcached` + +The [Memcached input plugin](https://github.com/influxdata/telegraf/blob/release-1.9/plugins/inputs/memcached/README.md) gathers statistics data from a Memcached server. + +### Mesos + +Plugin ID: `mesos` + +The [Mesos input plugin](https://github.com/influxdata/telegraf/blob/release-1.9/plugins/inputs/mesos/README.md) gathers metrics from Mesos. For more information, please check the +[Mesos Observability Metrics](http://mesos.apache.org/documentation/latest/monitoring/) page. + +### Mesosphere DC/OS + +Plugin ID: `dcos` + +The [Mesosphere DC/OS input plugin](https://github.com/influxdata/telegraf/blob/release-1.9/plugins/inputs/dcos/README.md) gathers metrics from a DC/OS cluster's [metrics component](https://docs.mesosphere.com/1.10/metrics/). + +### Microsoft SQL Server + +Plugin ID: `sqlserver` + +The [Microsoft SQL Server input plugin](https://github.com/influxdata/telegraf/blob/release-1.9/plugins/inputs/sqlserver/README.md) provides metrics for your Microsoft SQL Server instance. It currently works with SQL Server +versions 2008+. Recorded metrics are lightweight and use Dynamic Management Views supplied by SQL Server. + +### Minecraft + +Plugin ID: `minecraft` + +The [Minecraft input plugin](https://github.com/influxdata/telegraf/blob/release-1.9/plugins/inputs/minecraft/README.md) uses the RCON protocol to collect statistics from a scoreboard on a Minecraft server. + +### MongoDB + +Plugin ID: `mongodb` + +The [MongoDB input plugin](https://github.com/influxdata/telegraf/blob/release-1.9/plugins/inputs/mongodb/README.md) collects MongoDB stats exposed by `serverStatus` and few more and create a single +measurement containing values. + +### MQTT Consumer + +Plugin ID: `mqtt_consumer` + +The [MQTT Consumer input plugin](https://github.com/influxdata/telegraf/blob/release-1.9/plugins/inputs/mqtt_consumer/README.md) reads from specified MQTT topics and adds messages to InfluxDB. Messages are in the +[Telegraf input data formats](/telegraf/v1.9/data_formats/input/). + +### MySQL + +Plugin ID: `mysql` + +The [MySQL input plugin](https://github.com/influxdata/telegraf/blob/release-1.9/plugins/inputs/mysql/README.md) gathers the statistics data from MySQL servers. + +### NATS Consumer + +Plugin ID: `nats_consumer` + +The [NATS Consumer input plugin](https://github.com/influxdata/telegraf/blob/release-1.9/plugins/inputs/nats_consumer/README.md) reads from specified NATS subjects and adds messages to InfluxDB. Messages are expected in the [Telegraf input data formats](/telegraf/v1.9/data_formats/input/). A Queue Group is used when subscribing to subjects so multiple instances of Telegraf can read from a NATS cluster in parallel. + +### NATS Server Monitoring + +Plugin ID: `nats` + +The [NATS Server Monitoring input plugin](https://github.com/influxdata/telegraf/blob/release-1.9/plugins/inputs/nats/README.md) gathers metrics when using the [NATS Server monitoring server](https://www.nats.io/documentation/server/gnatsd-monitoring/). + +### Net + +Plugin ID: `net` + +The [Net input plugin](https://github.com/influxdata/telegraf/blob/release-1.9/plugins/inputs/net/NET_README.md) gathers metrics about network interface usage (Linux only). + +### Netstat + +Plugin ID: `netstat` + +The [Netstat input plugin](https://github.com/influxdata/telegraf/blob/release-1.9/plugins/inputs/net/NETSTAT_README.md) gathers TCP metrics such as established, time-wait and sockets counts by using `lsof`. + +### Network Response + +Plugin ID: `net_response` + +The [Network Response input plugin](https://github.com/influxdata/telegraf/blob/release-1.9/plugins/inputs/net_response/README.md) tests UDP and TCP connection response time. It can also check response text. + +### NGINX + +Plugin ID: `nginx` + +The [NGINX input plugin](https://github.com/influxdata/telegraf/blob/release-1.9/plugins/inputs/nginx/README.md) reads NGINX basic status information (`ngx_http_stub_status_module`). + +### NGINX VTS + +Plugin ID: `nginx_vts` + +The [NGINX VTS input plugin](https://github.com/influxdata/telegraf/blob/release-1.9/plugins/inputs/nginx_vts/README.md) gathers NGINX status using external virtual host traffic status module - https://github.com/vozlt/nginx-module-vts. This is an NGINX module that provides access to virtual host status information. It contains the current status such as servers, upstreams, caches. This is similar to the live activity monitoring of NGINX Plus. +For module configuration details, see the [NGINX VTS module documentation](https://github.com/vozlt/nginx-module-vts#synopsis). + +### NGINX Plus + +Plugin ID: `nginx_plus` + +The [NGINX Plus input plugin](https://github.com/influxdata/telegraf/blob/release-1.9/plugins/inputs/nginx_plus/README.md) is for NGINX Plus, the commercial version of the open source web server NGINX. To use this plugin you will need a license. +For more information, see [What’s the Difference between Open Source NGINX and NGINX Plus?](https://www.nginx.com/blog/whats-difference-nginx-foss-nginx-plus/). + +Structures for NGINX Plus have been built based on history of [status module documentation](http://nginx.org/en/docs/http/ngx_http_status_module.html). + +### NGINX Plus API + +Plugin ID: `nginx_plus_api` + +The [NGINX Plus API input plugin](https://github.com/influxdata/telegraf/blob/release-1.9/plugins/inputs/nginx_plus_api/README.md) gathers advanced status information for NGINX Plus servers. + +### NSQ + +Plugin ID: `nsq` + +The [NSQ input plugin](https://github.com/influxdata/plugins/inputs/nsq) ... + +### NSQ Consumer + +Plugin ID: `nsq_consumer` + +The [NSQ Consumer input plugin](https://github.com/influxdata/telegraf/blob/release-1.9/plugins/inputs/nsq_consumer/README.md) polls a specified NSQD topic and adds messages to InfluxDB. This plugin allows a message to be in any of the supported data_format types. + +### Nstat + +Plugin ID: `nstat` + +The [Nstat input plugin](https://github.com/influxdata/telegraf/blob/release-1.9/plugins/inputs/nstat/README.md) collects network metrics from `/proc/net/netstat`, `/proc/net/snmp`, and `/proc/net/snmp6` files. + +### NTPq + +Plugin ID: `ntpq` + +The [NTPq input plugin](https://github.com/influxdata/telegraf/blob/release-1.9/plugins/inputs/ntpq/README.md) gets standard NTP query metrics, requires ntpq executable. + +### NVIDIA SMI + +Plugin ID: `nvidia-smi` + +The [NVIDIA SMI input plugin](https://github.com/influxdata/telegraf/blob/release-1.9/plugins/inputs/nvidia_smi/README.md) uses a query on the [NVIDIA System Management Interface (`nvidia-smi`)](https://developer.nvidia.com/nvidia-system-management-interface) binary to pull GPU stats including memory and GPU usage, temp and other. + +### OpenLDAP + +Plugin ID: `openldap` + +The [OpenLDAP input plugin](https://github.com/influxdata/telegraf/blob/release-1.9/plugins/inputs/openldap/README.md) gathers metrics from OpenLDAP's `cn=Monitor` backend. + +### OpenSMTPD + +Plugin ID: `opensmtpd` + +The [OpenSMTPD input plugin](https://github.com/influxdata/telegraf/blob/release-1.9/plugins/inputs/opensmtpd/README.md) gathers stats from [OpenSMTPD](https://www.opensmtpd.org/), a free implementation of the server-side SMTP protocol. + +### PF + +Plugin ID: `pf` + +The [PF input plugin](https://github.com/influxdata/telegraf/blob/release-1.9/plugins/inputs/pf/README.md) gathers information from the FreeBSD/OpenBSD pf firewall. Currently it can retrive information about +the state table: the number of current entries in the table, and counters for the number of searches, inserts, and +removals to the table. The pf plugin retrieves this information by invoking the `pfstat` command. + +### PgBouncer + +Plugin ID: `pgbouncer` + +The [PgBouncer input plugin](https://github.com/influxdata/telegraf/blob/release-1.9/plugins/inputs/pgbouncer/README.md) provides metrics for your PgBouncer load balancer. For information about the metrics, see the [PgBouncer documentation](https://pgbouncer.github.io/usage.html). + +### Phfusion Passenger + +Plugin ID: `passenger` + +The [Phfusion 0Passenger input plugin](https://github.com/influxdata/telegraf/blob/release-1.9/plugins/inputs/passenger/README.md) gets Phusion Passenger statistics using their command line utility `passenger-status`. + +### PHP FPM + +Plugin ID: `phpfpm` + +The [PHP FPM input plugin](https://github.com/influxdata/telegraf/blob/release-1.9/plugins/inputs/phpfpm/README.md) gets phpfpm statistics using either HTTP status page or fpm socket. + +### Ping + +Plugin ID: `ping` + +The [Ping input plugin](https://github.com/influxdata/telegraf/blob/release-1.9/plugins/inputs/ping/README.md) measures the round-trip for ping commands, response time, and other packet statistics. + +### Postfix + +Plugin ID: `postfix` + +The [Postfix input plugin](https://github.com/influxdata/telegraf/blob/release-1.9/plugins/inputs/postfix/README.md) reports metrics on the postfix queues. For each of the active, hold, incoming, maildrop, and +deferred [queues](http://www.postfix.org/QSHAPE_README.html#queues), it will report the queue length (number of items), +size (bytes used by items), and age (age of oldest item in seconds). + +### PostgreSQL + +Plugin ID: `postgresql` + +The [PostgreSQL input plugin](https://github.com/influxdata/telegraf/blob/release-1.9/plugins/inputs/postgresql/README.md) provides metrics for your PostgreSQL database. It currently works with PostgreSQL versions 8.1+. +It uses data from the built-in `pg_stat_database` and `pg_stat_bgwriter` views. The metrics recorded depend on your +version of PostgreSQL. + +### PostgreSQL Extensible + +Plugin ID: `postgresql_extensible` + +This [PostgreSQL Extensible input plugin](https://github.com/influxdata/plugins/inputs/postgresql_extensible) provides metrics for your Postgres database. It has been designed to parse SQL queries in the plugin section of `telegraf.conf` files. + +### PowerDNS + +Plugin ID: `powerdns` + +The [PowerDNS input plugin](https://github.com/influxdata/telegraf/blob/release-1.9/plugins/inputs/powerdns/README.md) gathers metrics about PowerDNS using UNIX sockets. + +### Processes + +Plugin ID: `processes` + +The [Processes input plugin](https://github.com/influxdata/telegraf/blob/release-1.9/plugins/inputs/processes/README.md) +gathers info about the total number of processes and groups them by status (zombie, sleeping, running, etc.). On Linux, this plugin requires access to `procfs` (`/proc`); on other operating systems, it requires access to execute `ps`. + +### Procstat + +Plugin ID: `procstat` + +The [Procstat input plugin](https://github.com/influxdata/telegraf/blob/release-1.9/plugins/inputs/procstat/README.md) can be used to monitor system resource usage by an individual process using their `/proc` data. + +Processes can be specified either by `pid` file, by executable name, by command line pattern matching, by username, +by systemd unit name, or by cgroup name/path (in this order or priority). This plugin uses `pgrep` when an executable +name is provided to obtain the `pid`. The Procstat plugin transmits IO, memory, cpu, file descriptor-related +measurements for every process specified. A prefix can be set to isolate individual process specific measurements. + +The Procstat input plugin will tag processes according to how they are specified in the configuration. If a pid file is used, a +"pidfile" tag will be generated. On the other hand, if an executable is used an "exe" tag will be generated. + +### Prometheus Format + +Plugin ID: `prometheus` + +The [Prometheus Format input plugin](https://github.com/influxdata/telegraf/blob/release-1.9/plugins/inputs/prometheus/README.md) input plugin gathers metrics from HTTP servers exposing metrics in Prometheus format. + +### Puppet Agent + +Plugin ID: `puppetagent` + +The [Puppet Agent input plugin](https://github.com/influxdata/plugins/inputs/puppetagent) collects variables outputted from the `last_run_summary.yaml` file usually +located in `/var/lib/puppet/state/` Puppet Agent Runs. For more information, see [Puppet Monitoring: How to Monitor the Success or Failure of Puppet Runs](https://puppet.com/blog/puppet-monitoring-how-to-monitor-success-or-failure-of-puppet-runs) + +### RabbitMQ + +Plugin ID: `rabbitmq` + +The [RabbitMQ input plugin](https://github.com/influxdata/telegraf/blob/release-1.9/plugins/inputs/rabbitmq/README.md) reads metrics from RabbitMQ servers via the [Management Plugin](https://www.rabbitmq.com/management.html). + +### Raindrops Middleware + +Plugin ID: `raindrops` + +The [Raindrops Middleware input plugin](https://github.com/influxdata/telegraf/blob/release-1.9/plugins/inputs/raindrops/README.md) reads from the specified [Raindrops middleware](http://raindrops.bogomips.org/Raindrops/Middleware.html) +URI and adds the statistics to InfluxDB. + +### Redis + +Plugin ID: `redis` + +The [Redis input plugin](https://github.com/influxdata/telegraf/blob/release-1.9/plugins/inputs/redis/README.md) gathers the results of the INFO Redis command. There are two separate measurements: `redis` +and `redis_keyspace`, the latter is used for gathering database-related statistics. + +Additionally the plugin also calculates the hit/miss ratio (`keyspace_hitrate`) and the elapsed time since the last RDB save (`rdb_last_save_time_elapsed`). + +### RethinkDB + +Plugin ID: `rethinkdb` + +The [RethinkDB input plugin](https://github.com/influxdata/plugins/inputs/rethinkdb) works with RethinkDB 2.3.5+ databases that requires username, password authorization, +and Handshake protocol v1.0. + +### Riak + +Plugin ID: `riak` + +The [Riak input plugin](https://github.com/influxdata/telegraf/blob/release-1.9/plugins/inputs/riak/README.md) gathers metrics from one or more Riak instances. + +### Salesforce + +Plugin ID: `salesforce` + +The [Salesforce input plugin](https://github.com/influxdata/telegraf/blob/release-1.9/plugins/inputs/salesforce/README.md) gathers metrics about the limits in your Salesforce organization and the remaining usage. +It fetches its data from the limits endpoint of the Salesforce REST API. + +### Sensors + +Plugin ID: `sensors` + +The [Sensors input plugin](https://github.com/influxdata/telegraf/blob/release-1.9/plugins/inputs/sensors/README.md) collects collects sensor metrics with the sensors executable from the `lm-sensor` package. + +### SMART + +Plugin ID: `smart` + +The [SMART input plugin](https://github.com/influxdata/telegraf/blob/release-1.9/plugins/inputs/smart/README.md) gets metrics using the command line utility `smartctl` for SMART (Self-Monitoring, Analysis +and Reporting Technology) storage devices. SMART is a monitoring system included in computer hard disk drives (HDDs) +and solid-state drives (SSDs), which include most modern ATA/SATA, SCSI/SAS and NVMe disks. The plugin detects and +reports on various indicators of drive reliability, with the intent of enabling the anticipation of hardware failures. +See [smartmontools](https://www.smartmontools.org/). + +### SNMP + +Plugin ID: `snmp` + +The [SNMP input plugin](https://github.com/influxdata/telegraf/blob/release-1.9/plugins/inputs/snmp/README.md) gathers metrics from SNMP agents. + +### Socket Listener + +Plugin ID: `socket_listener` + +The [Socket Listener input plugin](https://github.com/influxdata/telegraf/blob/release-1.9/plugins/inputs/socket_listener/README.md) listens for messages from streaming (TCP, UNIX) or datagram (UDP, unixgram) protocols. Messages are expected in the +[Telegraf Input Data Formats](/telegraf/v1.9/data_formats/input/). + +### StatsD + +Plugin ID: `statsd` + +The [StatsD input plugin](https://github.com/influxdata/telegraf/blob/release-1.9/plugins/inputs/statsd/README.md) is a special type of plugin which runs a backgrounded `statsd` listener service while Telegraf is running. +StatsD messages are formatted as described in the original [etsy statsd](https://github.com/etsy/statsd/blob/master/docs/metric_types.md) implementation. + +### Swap + +Plugin ID: `swap` + +Supports: Linux only. + +The [Swap input plugin](https://github.com/influxdata/telegraf/blob/release-1.9/plugins/inputs/swap/README.md) gathers metrics about swap memory usage. For more information about Linux swap spaces, see [All about Linux swap space](https://www.linux.com/news/all-about-linux-swap-space) + +### Syslog + +Plugin ID: `syslog` + +The [Syslog input plugin](https://github.com/influxdata/telegraf/blob/release-1.9/plugins/inputs/syslog/README.md) listens for syslog messages transmitted over +[UDP](https://tools.ietf.org/html/rfc5426) or [TCP](https://tools.ietf.org/html/rfc5425). Syslog messages should be formatted according to [RFC 5424](https://tools.ietf.org/html/rfc5424). + +### Sysstat + +Plugin ID: `sysstat` + +The [Sysstat input plugin](https://github.com/influxdata/plugins/inputs/sysstat) collects [sysstat](https://github.com/sysstat/sysstat) system metrics with the sysstat +collector utility `sadc` and parses the created binary data file with the `sadf` utility. + +### System + +Plugin ID: `system` + +The [System input plugin](https://github.com/influxdata/telegraf/blob/release-1.9/plugins/inputs/system/README.md) gathers general stats on system load, uptime, and number of users logged in. It is basically equivalent to the UNIX `uptime` command. + +### Tail + +Plugin ID: `tail` + +The [Tail input plugin](https://github.com/influxdata/telegraf/blob/release-1.9/plugins/inputs/tail/README.md) "tails" a log file and parses each log message. + +### Teamspeak 3 + +Plugin ID: `teamspeak` + +The [Teamspeak 3 input plugin](https://github.com/influxdata/telegraf/blob/release-1.9/plugins/inputs/teamspeak/README.md) uses the Teamspeak 3 ServerQuery interface of the Teamspeak server to collect statistics of one or more virtual servers. + +### Telegraf v1.x + +Plugin ID: `internal` + +The [Telegraf v1.x input plugin](https://github.com/influxdata/telegraf/blob/release-1.9/plugins/inputs/internal/README.md) collects metrics about the Telegraf v1.x agent itself. +Note that some metrics are aggregates across all instances of one type of plugin. + +### Temp + +Plugin ID: `temp` + +The [Temp input plugin](https://github.com/influxdata/telegraf/blob/release-1.9/plugins/inputs/temp/README.md) collects temperature data from sensors. + +### Tengine Web Server + +Plugin ID: `tengine` + +The [Tengine Web Server input plugin](https://github.com/influxdata/telegraf/blob/release-1.9/plugins/inputs/tengine/README.md) gathers status metrics from the [Tengine Web Server](http://tengine.taobao.org/) using the [Reqstat module](http://tengine.taobao.org/document/http_reqstat.html). + +### Trig + +Plugin ID: `trig` + +The [Trig input plugin](https://github.com/influxdata/plugins/inputs/trig) inserts sine and cosine waves for demonstration purposes. + +### Twemproxy + +Plugin ID: `twemproxy` + +The [Twemproxy input plugin](https://github.com/influxdata/plugins/inputs/twemproxy) gathers data from Twemproxy instances, processes Twemproxy server statistics, processes pool data, and processes backend server (Redis/Memcached) statistics. + +### Unbound + +Plugin ID: `unbound` + +The [Unbound input plugin](https://github.com/influxdata/telegraf/blob/release-1.9/plugins/inputs/unbound/README.md) gathers statistics from [Unbound](https://www.unbound.net/), a validating, recursive, and caching DNS resolver. + +### Varnish + +Plugin ID: `varnish` + +The [Varnish input plugin](https://github.com/influxdata/telegraf/blob/release-1.9/plugins/inputs/varnish/README.md) gathers stats from [Varnish HTTP Cache](https://varnish-cache.org/). + +### VMware vSphere + +Plugin ID: `vsphere` + +The [VMware vSphere input plugin](https://github.com/influxdata/telegraf/blob/release-1.9/plugins/inputs/vsphere/README.md) uses the vSphere API to gather metrics from multiple vCenter servers (clusters, hosts, VMs, and data stores). For more information on the available performance metrics, see [Common vSphere Performance Metrics](https://github.com/influxdata/telegraf/blob/release-1.9/plugins/inputs/vsphere/METRICS.md). + +### Webhooks + +Plugin ID: `webhooks` + +The [Webhooks input plugin](https://github.com/influxdata/telegraf/blob/release-1.9/plugins/inputs/webhooks/README.md) starts an HTTPS server and registers multiple webhook listeners. + +#### Available webhooks + +* [Filestack](https://github.com/influxdata/telegraf/blob/release-1.9/plugins/inputs/webhooks/filestack/README.md) +* [GitHub](https://github.com/influxdata/telegraf/blob/release-1.9/plugins/inputs/webhooks/github/README.md) +* [Mandrill](https://github.com/influxdata/telegraf/blob/release-1.9/plugins/inputs/webhooks/mandrill/README.md) +* [Papertrail](https://github.com/influxdata/telegraf/blob/release-1.9/plugins/inputs/webhooks/papertrail/README.md) +* [Particle.io](https://github.com/influxdata/telegraf/blob/release-1.9/plugins/inputs/webhooks/particle/README.md) +* [Rollbar](https://github.com/influxdata/plugins/inputs/webhooks/rollbar) + +#### Add new webhooks +If you need a webhook that is not supported, consider [adding a new webhook](https://github.com/influxdata/plugins/inputs/webhooks#adding-new-webhooks-plugin). + + +### Windows Performance Counters + +Plugin ID: `win_perf_counters` + +Supports: Windows + +The way the [Windows Performance Counters input plugin](https://github.com/influxdata/plugins/inputs/win_perf_counters) works is that on load of Telegraf, the plugin will be handed configuration +from Telegraf. +This configuration is parsed and then tested for validity such as if the Object, Instance and Counter existing. +If it does not match at startup, it will not be fetched. +Exceptions to this are in cases where you query for all instances `""`. +By default the plugin does not return `_Total` when it is querying for all () as this is redundant. + +### Windows Services + +Plugin ID: `win_services` + +Supports: Windows + +The [Windows Services input plugin](https://github.com/influxdata/telegraf/blob/release-1.9/plugins/inputs/win_services/README.md) reports Windows services info. + +### Wireless + +Plugin ID: `wireless` + +Supports: Linux only + +The [Wireless input plugin](https://github.com/influxdata/telegraf/blob/release-1.9/plugins/inputs/wireless/README.md) gathers metrics about wireless link quality by reading the `/proc/net/wireless` file. This plugin currently supports Linux only. + +### X.509 Certificate + +Plugin ID: `x509_cert` + +The [X.509 Certificate input plugin](https://github.com/influxdata/telegraf/blob/release-1.9/plugins/inputs/x509_cert/README.md) provides information about X.509 certificate accessible using the local file or network connection. + +### ZFS + +Plugin ID: `zfs` + +Supports: FreeBSD, Linux + +The [ZFS input plugin](https://github.com/influxdata/telegraf/blob/release-1.9/plugins/inputs/zfs/README.md) provides metrics from your ZFS filesystems. It supports ZFS on Linux and FreeBSD. +It gets ZFS statistics from `/proc/spl/kstat/zfs` on Linux and from `sysctl` and `zpool` on FreeBSD. + +### Zipkin + +Plugin ID: `zipkin` + +The [Zipkin input plugin](https://github.com/influxdata/telegraf/blob/release-1.9/plugins/inputs/zipkin/README.md) implements the Zipkin HTTP server to gather trace and timing data needed to troubleshoot latency problems in microservice architectures. + +> ***Note:*** This plugin is experimental. Its data schema may be subject to change based on its main usage cases and the evolution of the OpenTracing standard. + +### Zookeeper + +Plugin ID: `zookeeper` + +The [Zookeeper (`zookeeper`) input plugin](https://github.com/influxdata/telegraf/blob/release-1.9/plugins/inputs/zookeeper/README.md) collects variables output from the `mntr` command [Zookeeper Admin](https://zookeeper.apache.org/doc/current/zookeeperAdmin.html). + +## Deprecated Telegraf input plugins + +### Cassandra + +Plugin ID: `cassandra` + +> DEPRECATED as of version 1.7. The [Cassandra input plugin](https://github.com/influxdata/telegraf/tree/release-1.6/plugins/inputs/cassandra) collects Cassandra 3 / JVM metrics exposed as MBean attributes through the jolokia REST endpoint. +All metrics are collected for each server configured. + +### HTTP JSON + +Plugin ID: `httpjson` + +> DEPRECATED as of version 1.6; use the [HTTP input plugin](#http). + +The [HTTP JSON input plugin](https://github.com/influxdata/telegraf/blob/release-1.9/plugins/inputs/httpjson/README.md) collects data from HTTP URLs which respond with JSON. +It flattens the JSON and finds all numeric values, treating them as floats. + +### HTTP Listener + +Plugin ID: `http_listener` + +The [HTTP Listener input plugin](https://github.com/influxdata/telegraf/blob/release-1.9/plugins/inputs/http_listener/README.md) listens for messages sent via HTTP POST. Messages are expected in the [InfluxDB +Line Protocol input data format](/telegraf/v1.9/data_formats/input/influx) ONLY (other [Telegraf input data formats](/telegraf/v1.9/data_formats/input/) are not supported). +This plugin allows Telegraf to serve as a proxy or router for the `/write` endpoint of the InfluxDB HTTP API. + +> DEPRECATED as of version 1.9. Use either [HTTP Listener v2](#http-listener-v2) or the [InfluxDB Listener](#influxdb-v1-x) + + +### Jolokia + +Plugin ID: `jolokia` + +> DEPRECATED as of version 1.5; use the [Jolokia2 input plugin](#jolokia2-agent). + +### SNMP Legacy + +Plugin ID: `snmp_legacy` + +> The [SNMP Legacy input plugin](https://github.com/influxdata/telegraf/blob/release-1.9/plugins/inputs/snmp_legacy/README.md) is DEPRECATED. Use the [SNMP input plugin](https://github.com/influxdata/telegraf/tree/release-1.6/plugins/inputs/snmp). + +The SNMP Legacy input plugin gathers metrics from SNMP agents. + +### TCP Listener + +Plugin ID: `tcp_listener` + +> The [TCP Listener input plugin](https://github.com/influxdata/telegraf/blob/release-1.9/plugins/inputs/tcp_listener/README.md) is DEPRECATED as of version 1.3; use the [Socket Listener input plugin](#socket-listener). + +### UDP Listener + +Plugin ID: `udp_listener` + +> DEPRECATED as of version 1.3; use the [Socket Listener input plugin](#socket-listener). diff --git a/content/telegraf/v1.9/plugins/outputs.md b/content/telegraf/v1.9/plugins/outputs.md new file mode 100644 index 000000000..44ae46e0a --- /dev/null +++ b/content/telegraf/v1.9/plugins/outputs.md @@ -0,0 +1,209 @@ +--- +title: Telegraf output plugins +descriptions: Use Telegraf output plugins to transform, decorate, and filter metrics. Supported output plugins include Datadog, Elasticsearch, Graphite, InfluxDB, Kafka, MQTT, Prometheus Client, Riemann, and Wavefront. +menu: + telegraf_1_9: + name: Output + weight: 20 + parent: Plugins +--- +Telegraf allows users to specify multiple output sinks in the configuration file. + +## Supported Telegraf output plugins + +### Amazon CloudWatch + +Plugin ID: `cloudwatch` + +The [Amazon CloudWatch output plugin](https://github.com/influxdata/telegraf/blob/release-1.9/plugins/outputs/cloudwatch/README.md) send metrics to Amazon CloudWatch. + +### Amazon Kinesis + +Plugin ID: `kinesis` + +The [Amazon Kinesis output plugin](https://github.com/influxdata/telegraf/blob/release-1.9/plugins/outputs/kinesis/README.md) is an experimental plugin that is still in the early stages of development. It will batch up all of the points into one `PUT` request to Kinesis. This should save the number of API requests by a considerable level. + +### Amon + +Plugin ID: `amon` + +The [Amon output plugin](https://github.com/influxdata/telegraf/blob/release-1.9/plugins/outputs/amon/README.md) writes metrics to an [Amon server](https://github.com/amonapp/amon). For details on the Amon Agent, see [Monitoring Agent](https://docs.amon.cx/agent/) and requires a `apikey` and `amoninstance` URL. + +If the point value being sent cannot be converted to a float64 value, the metric is skipped. + +Metrics are grouped by converting any `_` characters to `.` in the Point Name. + +### AMQP + +Plugin ID: `amqp` + +The [AMQP output plugin](https://github.com/influxdata/telegraf/blob/release-1.9/plugins/outputs/amqp/README.md) writes to an AMQP 0-9-1 exchange, a prominent implementation of the Advanced Message Queuing Protocol (AMQP) protocol being [RabbitMQ](https://www.rabbitmq.com/). + +Metrics are written to a topic exchange using `tag`, defined in configuration file as `RoutingTag`, as a routing key. + +### Apache Kafka + +Plugin ID: `kafka` + +The [Apache Kafka output plugin](https://github.com/influxdata/telegraf/blob/release-1.9/plugins/outputs/kafka/README.md) writes to a [Kafka Broker](http://kafka.apache.org/07/quickstart.html) acting a Kafka Producer. + +### CrateDB + +Plugin ID: `cratedb` + +The [CrateDB output plugin](https://github.com/influxdata/telegraf/blob/release-1.9/plugins/outputs/cratedb/README.md) writes to [CrateDB](https://crate.io/), a real-time SQL database for machine data and IoT, using its [PostgreSQL protocol](https://crate.io/docs/crate/reference/protocols/postgres.html). + +### Datadog + +Plugin ID: `datadog` + +The [Datadog output plugin](https://github.com/influxdata/telegraf/blob/release-1.9/plugins/outputs/datadog/README.md) writes to the [Datadog Metrics API](http://docs.datadoghq.com/api/#metrics) and requires an `apikey` which can be obtained [here](https://app.datadoghq.com/account/settings#api) for the account. + +### Discard + +Plugin ID: `discard` + +The [Discard output plugin](https://github.com/influxdata/telegraf/blob/release-1.9/plugins/outputs/discard/README.md) simply drops all metrics that are sent to it. It is only meant to be used for testing purposes. + +### Elasticsearch + +Plugin ID: `elasticsearch` + +The [Elasticsearch output plugin](https://github.com/influxdata/telegraf/blob/release-1.9/plugins/outputs/elasticsearch/README.md) writes to Elasticsearch via HTTP using [Elastic](http://olivere.github.io/elastic/). Currently it only supports Elasticsearch 5.x series. + +### File + +Plugin ID: `file` + +The [File output plugin](https://github.com/influxdata/telegraf/blob/release-1.9/plugins/outputs/file/README.md) writes Telegraf metrics to files. + +### Graphite + +Plugin ID: `graphite` + +The [Graphite output plugin](https://github.com/influxdata/telegraf/blob/release-1.9/plugins/outputs/graphite/README.md) writes to [Graphite](http://graphite.readthedocs.org/en/latest/index.html) via raw TCP. + +### Graylog + +Plugin ID: `graylog` + +The [Graylog output plugin](https://github.com/influxdata/telegraf/blob/release-1.9/plugins/outputs/graylog/README.md) writes to a Graylog instance using the `gelf` format. + +### HTTP + +Plugin ID: `http` + +The [HTTP output plugin](https://github.com/influxdata/telegraf/blob/release-1.9/plugins/outputs/http/README.md) sends metrics in a HTTP message encoded using one of the output data formats. For `data_formats` that support batching, metrics are sent in batch format. + +### InfluxDB v1.x + +Plugin ID: `influxdb` + +The [InfluxDB v1.x output plugin](https://github.com/influxdata/telegraf/blob/release-1.9/plugins/outputs/influxdb/README.md) writes to InfluxDB using HTTP or UDP. + +### InfluxDB v2 + +Plugin ID: `influxdb_v2` + +The [InfluxDB v2 output plugin](https://github.com/influxdata/telegraf/blob/release-1.9/plugins/outputs/influxdb_v2/README.md) writes metrics to the [InfluxDB 2.0](https://github.com/influxdata/platform) HTTP service. + +### Instrumental + +Plugin ID: `instrumental` + +The [Instrumental output plugin](https://github.com/influxdata/telegraf/blob/release-1.9/plugins/outputs/instrumental/README.md) writes to the [Instrumental Collector API](https://instrumentalapp.com/docs/tcp-collector) and requires a Project-specific API token. + +Instrumental accepts stats in a format very close to Graphite, with the only difference being that the type of stat (gauge, increment) is the first token, separated from the metric itself by whitespace. The increment type is only used if the metric comes in as a counter through [[input.statsd]]. + +### Librato + +Plugin ID: `librato` + +The [Librato output plugin](https://github.com/influxdata/telegraf/blob/release-1.9/plugins/outputs/librato/README.md) writes to the [Librato Metrics API](http://dev.librato.com/v1/metrics#metrics) and requires an `api_user` and `api_token` which can be obtained [here](https://metrics.librato.com/account/api_tokens) for the account. + +### Microsoft Azure Application Insights + +Plugin ID: `application_insights` + +The [Microsoft Azure Application Insights output plugin](https://github.com/influxdata/telegraf/blob/release-1.9/plugins/outputs/application_insights/README.md) writes Telegraf metrics to [Application Insights (Microsoft Azure)](https://azure.microsoft.com/en-us/services/application-insights/). + +### Microsoft Azure Monitor + +Plugin ID: `azure_monitor` + +>**Note:** The Azure Monitor custom metrics service is currently in preview and not available in a subset of Azure regions. + +The [Microsoft Azure Monitor output plugin](https://github.com/influxdata/telegraf/blob/release-1.9/plugins/outputs/azure_monitor/README.md) sends custom metrics to [Microsoft Azure Monitor](https://azure.microsoft.com/en-us/services/monitor/). Azure Monitor has a metric resolution of one minute. To handle this in Telegraf, the Azure Monitor output plugin automatically aggregates metrics into one minute buckets, which are then sent to Azure Monitor on every flush interval. + +For a Microsoft blog posting on using Telegraf with Microsoft Azure Monitor, see [Collect custom metrics for a Linux VM with the InfluxData Telegraf Agent](https://docs.microsoft.com/en-us/azure/monitoring-and-diagnostics/metrics-store-custom-linux-telegraf). + +The metrics from each input plugin will be written to a separate Azure Monitor namespace, prefixed with `Telegraf/` by default. The field name for each metric is written as the Azure Monitor metric name. All field values are written as a summarized set that includes `min`, `max`, `sum`, and `count`. Tags are written as a dimension on each Azure Monitor metric. + +### MQTT Producer + +Plugin ID: `mqtt` + +The [MQTT Producer output plugin](https://github.com/influxdata/telegraf/blob/release-1.9/plugins/outputs/mqtt/README.md) writes to the MQTT server using [supported output data formats](/telegraf/v1.8/data_formats/output/). + +### NATS Output + +Plugin ID: `nats` + +The [NATS Output output plugin](https://github.com/influxdata/telegraf/blob/release-1.9/plugins/outputs/nats/README.md) writes to a (list of) specified NATS instance(s). + +### NSQ + +Plugin ID: `nsq` + +The [NSQ output plugin](https://github.com/influxdata/telegraf/blob/release-1.9/plugins/outputs/nsq/README.md) writes to a specified NSQD instance, usually local to the producer. It requires a server name and a topic name. + +### OpenTSDB + +Plugin ID: `opentsdb` + +The [OpenTSDB output plugin](https://github.com/influxdata/telegraf/blob/release-1.9/plugins/outputs/opentsdb/README.md) writes to an OpenTSDB instance using either the telnet or HTTP mode. + +Using the HTTP API is the recommended way of writing metrics since OpenTSDB 2.0 To use HTTP mode, set `useHttp` to true in config. You can also control how many metrics are sent in each HTTP request by setting `batchSize` in config. See http://opentsdb.net/docs/build/html/api_http/put.html for details. + +### Prometheus Client + +Plugin ID: `prometheus_client` + +The [Prometheus Client output plugin](https://github.com/influxdata/telegraf/blob/release-1.9/plugins/outputs/prometheus_client/README.md) starts a [Prometheus](https://prometheus.io/) Client, it exposes all metrics on `/metrics` (default) to be polled by a Prometheus server. + +### Riemann + +Plugin ID: `riemann` + +The [Riemann output plugin](https://github.com/influxdata/telegraf/blob/release-1.9/plugins/outputs/riemann/README.md) writes to [Riemann](http://riemann.io/) using TCP or UDP. + +### Socket Writer + +Plugin ID: `socket_writer` + +The [Socket Writer output plugin](https://github.com/influxdata/telegraf/blob/release-1.9/plugins/outputs/socket_writer/README.md) writes to a UDP, TCP, or UNIX socket. It can output data in any of the [supported output formats](https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md). + +### Stackdriver + +Plugin ID: `stackdriver` + +The [Stackdriver output plugin](https://github.com/influxdata/telegraf/blob/release-1.9/plugins/outputs/stackdriver/README.md) writes to the [Google Cloud Stackdriver API](https://cloud.google.com/monitoring/api/v3/) +and requires [Google Cloud authentication](https://cloud.google.com/docs/authentication/getting-started) with Google Cloud using either a service account or user credentials. For details on pricing, see the [Stackdriver documentation](https://cloud.google.com/stackdriver/pricing). + +Requires `project` to specify where Stackdriver metrics will be delivered to. + +Metrics are grouped by the `namespace` variable and metric key, for example `custom.googleapis.com/telegraf/system/load5`. + +### Wavefront + +Plugin ID: `wavefront` + +The [Wavefront output plugin](https://github.com/influxdata/telegraf/blob/release-1.9/plugins/outputs/wavefront/README.md) writes to a Wavefront proxy, in Wavefront data format over TCP. + +## Deprecated Telegraf output plugins + +### Riemann Legacy + +Plugin ID: `riemann_legacy` + +The [Riemann Legacy output plugin](https://github.com/influxdata/telegraf/tree/release-1.9/plugins/outputs/riemann_legacy) will be deprecated in a future release, see https://github.com/influxdata/telegraf/issues/1878 for more details & discussion. diff --git a/content/telegraf/v1.9/plugins/processors.md b/content/telegraf/v1.9/plugins/processors.md new file mode 100644 index 000000000..ad89d2626 --- /dev/null +++ b/content/telegraf/v1.9/plugins/processors.md @@ -0,0 +1,103 @@ +--- +title: Telegraf processor plugins +description: Use Telegraf processor plugins in the InfluxData time series platform to process metrics and emit results based on the values processed. +menu: + telegraf_1_9: + name: Processor + identifier: processors + weight: 40 + parent: Plugins +--- + +Processor plugins process metrics as they pass through and immediately emit results based on the values they process. + + +## Supported Telegraf processor plugins + + +### Converter + +Plugin ID: `converter` + +The [Converter processor plugin](https://github.com/influxdata/telegraf/blob/release-1.9/plugins/processors/converter/README.md) is used to change the type of tag or field values. In addition to changing field types, it can convert between fields and tags. Values that cannot be converted are dropped. + +### Enum + +Plugin ID: `enum` + +The [Enum processor plugin](https://github.com/influxdata/telegraf/blob/release-1.9/plugins/processors/enum/README.md) allows the configuration of value mappings for metric fields. The main use case for this is to rewrite status codes such as `red`, `amber`, and `green` by numeric values such as `0`, `1`, `2`. The plugin supports string and bool types for the field values. Multiple Fields can be configured with separate value mappings for each field. Default mapping values can be configured to be used for all values, which are not contained in the value_mappings. The processor supports explicit configuration of a destination field. By default the source field is overwritten. + +### Override + +Plugin ID: `override` + +The [Override processor plugin](https://github.com/influxdata/telegraf/blob/release-1.9/plugins/processors/override/README.md) allows overriding all modifications that are supported by input plugins and aggregator plugins: + +* `name_override` +* `name_prefix` +* `name_suffix` +* tags + +All metrics passing through this processor will be modified accordingly. Select the metrics to modify using the standard measurement filtering options. + +Values of `name_override`, `name_prefix`, `name_suffix`, and already present tags with conflicting keys will be overwritten. Absent tags will be created. + +Use case of this plugin encompass ensuring certain tags or naming conventions are adhered to irrespective of input plugin configurations, e.g., by `taginclude`. + +### Parser + +Plugin ID: `parser` + +The [Parser processor plugin](https://github.com/influxdata/telegraf/blob/release-1.9/plugins/processors/parser/README.md) parses defined fields containing the specified data format and creates new metrics based on the contents of the field. + +### Printer + +Plugin ID: `printer` + +The [Printer processor plugin](https://github.com/influxdata/telegraf/blob/release-1.9/plugins/processors/printer/README.md) simply prints every metric passing through it. + +### Regex + +Plugin ID: `regex` + +The [Regex processor plugin](https://github.com/influxdata/telegraf/blob/release-1.9/plugins/processors/regex/README.md) transforms tag and field values using a regular expression (regex) pattern. If `result_key `parameter is present, it can produce new tags and fields from existing ones. + +### Rename + +Plugin ID: `rename` + +The [Rename processor plugin](https://github.com/influxdata/telegraf/blob/release-1.9/plugins/processors/rename/README.md) renames InfluxDB measurements, fields, and tags. + +### Strings + +Plugin ID: `strings` + +The [Strings processor plugin](https://github.com/influxdata/telegraf/blob/release-1.9/plugins/processors/strings/README.md) maps certain Go string functions onto InfluxDB measurement, tag, and field values. Values can be modified in place or stored in another key. + +Implemented functions are: + +* `lowercase` +* `uppercase` +* `trim` +* `trim_left` +* `trim_right` +* `trim_prefix` +* `trim_suffix` + +Note that in this implementation these are processed in the order that they appear above. You can specify the `measurement`, `tag` or `field` that you want processed in each section and optionally a `dest` if you want the result stored in a new tag or field. You can specify lots of transformations on data with a single strings processor. + +### TopK + +Plugin ID: `topk` + +The [TopK processor plugin](https://github.com/influxdata/telegraf/blob/release-1.9/plugins/processors/topk/README.md) is a filter designed to get the top series over a period of time. It can be tweaked to do its top `K` computation over a period of time, so spikes can be smoothed out. + +This processor goes through the following steps when processing a batch of metrics: + +1. Groups metrics in buckets using their tags and name as key. +2. Aggregates each of the selected fields for each bucket by the selected aggregation function (sum, mean, etc.). +3. Orders the buckets by one of the generated aggregations, returns all metrics in the top `K` buckets, then reorders the buckets by the next of the generated aggregations, returns all metrics in the top `K` buckets, etc, etc, etc, until it runs out of fields. + +The plugin makes sure not to duplicate metrics. + +Note that depending on the amount of metrics on each computed bucket, more than `K` metrics may be returned.