From f73709e52dde2a37595b3256f3fd4b0e55c8f62a Mon Sep 17 00:00:00 2001 From: Scott Anderson Date: Thu, 30 Jul 2020 10:34:24 -0600 Subject: [PATCH] ported telegraf 1.15 and kapacitor 1.5 --- content/kapacitor/v1.5/_index.md | 26 + .../v1.5/about_the_project/_index.md | 31 + .../kapacitor/v1.5/about_the_project/cla.md | 10 + .../v1.5/about_the_project/contributing.md | 10 + .../v1.5/about_the_project/license.md | 10 + .../releasenotes-changelog.md | 578 ++++ .../kapacitor/v1.5/administration/_index.md | 10 + .../v1.5/administration/configuration.md | 935 +++++++ .../kapacitor/v1.5/administration/security.md | 446 +++ .../administration/subscription-management.md | 147 + .../v1.5/administration/upgrading.md | 473 ++++ .../kapacitor/v1.5/event_handlers/_index.md | 133 + .../v1.5/event_handlers/aggregate.md | 84 + .../kapacitor/v1.5/event_handlers/alerta.md | 197 ++ .../kapacitor/v1.5/event_handlers/discord.md | 308 ++ .../kapacitor/v1.5/event_handlers/email.md | 187 ++ content/kapacitor/v1.5/event_handlers/exec.md | 109 + .../kapacitor/v1.5/event_handlers/hipchat.md | 200 ++ .../kapacitor/v1.5/event_handlers/kafka.md | 195 ++ content/kapacitor/v1.5/event_handlers/log.md | 107 + .../v1.5/event_handlers/microsoftteams.md | 177 ++ content/kapacitor/v1.5/event_handlers/mqtt.md | 201 ++ .../v1.5/event_handlers/opsgenie/v1.md | 181 ++ .../v1.5/event_handlers/opsgenie/v2.md | 184 ++ .../v1.5/event_handlers/pagerduty/v1.md | 157 ++ .../v1.5/event_handlers/pagerduty/v2.md | 164 ++ content/kapacitor/v1.5/event_handlers/post.md | 361 +++ .../kapacitor/v1.5/event_handlers/publish.md | 78 + .../kapacitor/v1.5/event_handlers/pushover.md | 165 ++ .../kapacitor/v1.5/event_handlers/sensu.md | 158 ++ .../kapacitor/v1.5/event_handlers/slack.md | 297 ++ .../kapacitor/v1.5/event_handlers/snmptrap.md | 163 ++ content/kapacitor/v1.5/event_handlers/talk.md | 144 + content/kapacitor/v1.5/event_handlers/tcp.md | 98 + .../kapacitor/v1.5/event_handlers/telegram.md | 321 +++ .../v1.5/event_handlers/victorops.md | 171 ++ content/kapacitor/v1.5/guides/_index.md | 47 + .../v1.5/guides/anomaly_detection.md | 742 +++++ .../v1.5/guides/continuous_queries.md | 201 ++ .../v1.5/guides/event-handler-setup.md | 440 +++ .../guides/hierarchical-alert-suppression.md | 82 + .../kapacitor/v1.5/guides/join_backfill.md | 261 ++ .../kapacitor/v1.5/guides/live_leaderboard.md | 291 ++ .../kapacitor/v1.5/guides/load_directory.md | 181 ++ .../v1.5/guides/reference_scripts.md | 17 + .../v1.5/guides/scheduled-downtime.md | 207 ++ content/kapacitor/v1.5/guides/socket_udf.md | 655 +++++ .../v1.5/guides/two-measurement-alert.md | 131 + content/kapacitor/v1.5/introduction/_index.md | 16 + .../v1.5/introduction/getting-started.md | 524 ++++ .../v1.5/introduction/install-docker.md | 433 +++ .../v1.5/introduction/installation.md | 131 + content/kapacitor/v1.5/nodes/_index.md | 64 + content/kapacitor/v1.5/nodes/alert_node.md | 2005 +++++++++++++ content/kapacitor/v1.5/nodes/barrier_node.md | 934 ++++++ content/kapacitor/v1.5/nodes/batch_node.md | 202 ++ .../v1.5/nodes/change_detect_node.md | 868 ++++++ content/kapacitor/v1.5/nodes/combine_node.md | 951 +++++++ content/kapacitor/v1.5/nodes/default_node.md | 903 ++++++ content/kapacitor/v1.5/nodes/delete_node.md | 901 ++++++ .../kapacitor/v1.5/nodes/derivative_node.md | 926 ++++++ .../v1.5/nodes/ec2_autoscale_node.md | 1039 +++++++ content/kapacitor/v1.5/nodes/eval_node.md | 993 +++++++ content/kapacitor/v1.5/nodes/flatten_node.md | 961 +++++++ content/kapacitor/v1.5/nodes/from_node.md | 1119 ++++++++ content/kapacitor/v1.5/nodes/group_by_node.md | 929 ++++++ content/kapacitor/v1.5/nodes/http_out_node.md | 884 ++++++ .../kapacitor/v1.5/nodes/http_post_node.md | 977 +++++++ .../v1.5/nodes/influx_d_b_out_node.md | 324 +++ .../kapacitor/v1.5/nodes/influx_q_l_node.md | 911 ++++++ content/kapacitor/v1.5/nodes/join_node.md | 1159 ++++++++ .../v1.5/nodes/k8s_autoscale_node.md | 1101 ++++++++ .../v1.5/nodes/kapacitor_loopback_node.md | 255 ++ content/kapacitor/v1.5/nodes/log_node.md | 900 ++++++ content/kapacitor/v1.5/nodes/no_op_node.md | 867 ++++++ content/kapacitor/v1.5/nodes/query_node.md | 1112 ++++++++ content/kapacitor/v1.5/nodes/sample_node.md | 881 ++++++ content/kapacitor/v1.5/nodes/shift_node.md | 880 ++++++ content/kapacitor/v1.5/nodes/sideload_node.md | 933 ++++++ .../kapacitor/v1.5/nodes/state_count_node.md | 898 ++++++ .../v1.5/nodes/state_duration_node.md | 922 ++++++ content/kapacitor/v1.5/nodes/stats_node.md | 909 ++++++ content/kapacitor/v1.5/nodes/stream_node.md | 202 ++ .../v1.5/nodes/swarm_autoscale_node.md | 1039 +++++++ content/kapacitor/v1.5/nodes/u_d_f_node.md | 913 ++++++ content/kapacitor/v1.5/nodes/union_node.md | 896 ++++++ content/kapacitor/v1.5/nodes/where_node.md | 876 ++++++ content/kapacitor/v1.5/nodes/window_node.md | 967 +++++++ content/kapacitor/v1.5/reference/spec.md | 84 + content/kapacitor/v1.5/tick/_index.md | 26 + content/kapacitor/v1.5/tick/expr.md | 361 +++ content/kapacitor/v1.5/tick/introduction.md | 152 + content/kapacitor/v1.5/tick/syntax.md | 1111 ++++++++ .../kapacitor/v1.5/troubleshooting/_index.md | 12 + .../frequently-asked-questions.md | 136 + content/kapacitor/v1.5/working/_index.md | 19 + content/kapacitor/v1.5/working/alerts.md | 190 ++ content/kapacitor/v1.5/working/api.md | 2492 +++++++++++++++++ content/kapacitor/v1.5/working/cli_client.md | 1077 +++++++ .../kapacitor/v1.5/working/custom_alert.md | 13 + .../kapacitor/v1.5/working/custom_output.md | 555 ++++ .../kapacitor/v1.5/working/kapa-and-chrono.md | 468 ++++ .../v1.5/working/scraping-and-discovery.md | 104 + .../kapacitor/v1.5/working/template_tasks.md | 411 +++ .../v1.5/working/using_alert_topics.md | 235 ++ content/telegraf/v1.15/_index.md | 27 + .../v1.15/about_the_project/_index.md | 26 + .../telegraf/v1.15/about_the_project/cla.md | 10 + .../v1.15/about_the_project/contributing.md | 10 + .../v1.15/about_the_project/license.md | 10 + .../release-notes-changelog.md | 2469 ++++++++++++++++ .../telegraf/v1.15/administration/_index.md | 21 + .../v1.15/administration/configuration.md | 423 +++ .../administration/enterprise-plugins.md | 18 + .../v1.15/administration/troubleshooting.md | 89 + .../v1.15/administration/windows_service.md | 48 + content/telegraf/v1.15/concepts/_index.md | 21 + .../concepts/aggregator_processor_plugins.md | 62 + content/telegraf/v1.15/concepts/glossary.md | 103 + content/telegraf/v1.15/concepts/metrics.md | 28 + content/telegraf/v1.15/data_formats/_index.md | 21 + .../v1.15/data_formats/input/_index.md | 46 + .../v1.15/data_formats/input/collectd.md | 48 + .../telegraf/v1.15/data_formats/input/csv.md | 111 + .../v1.15/data_formats/input/dropwizard.md | 179 ++ .../v1.15/data_formats/input/graphite.md | 55 + .../telegraf/v1.15/data_formats/input/grok.md | 226 ++ .../v1.15/data_formats/input/influx.md | 27 + .../telegraf/v1.15/data_formats/input/json.md | 224 ++ .../v1.15/data_formats/input/logfmt.md | 42 + .../v1.15/data_formats/input/nagios.md | 29 + .../v1.15/data_formats/input/value.md | 44 + .../v1.15/data_formats/input/wavefront.md | 28 + .../v1.15/data_formats/output/_index.md | 35 + .../v1.15/data_formats/output/carbon2.md | 60 + .../v1.15/data_formats/output/graphite.md | 58 + .../v1.15/data_formats/output/influx.md | 41 + .../v1.15/data_formats/output/json.md | 89 + .../v1.15/data_formats/output/nowmetric.md | 90 + .../v1.15/data_formats/output/splunkmetric.md | 147 + .../v1.15/data_formats/template-patterns.md | 145 + content/telegraf/v1.15/guides/_index.md | 12 + content/telegraf/v1.15/guides/using_http.md | 116 + content/telegraf/v1.15/introduction/_index.md | 22 + .../v1.15/introduction/downloading.md | 12 + .../v1.15/introduction/getting-started.md | 135 + .../v1.15/introduction/installation.md | 443 +++ content/telegraf/v1.15/plugins/_index.md | 25 + content/telegraf/v1.15/plugins/aggregators.md | 14 + content/telegraf/v1.15/plugins/inputs.md | 17 + content/telegraf/v1.15/plugins/outputs.md | 14 + content/telegraf/v1.15/plugins/plugin-list.md | 45 + content/telegraf/v1.15/plugins/processors.md | 15 + layouts/partials/header.html | 2 +- layouts/shortcodes/telegraf/verify.md | 32 + 155 files changed, 56635 insertions(+), 1 deletion(-) create mode 100644 content/kapacitor/v1.5/_index.md create mode 100644 content/kapacitor/v1.5/about_the_project/_index.md create mode 100644 content/kapacitor/v1.5/about_the_project/cla.md create mode 100644 content/kapacitor/v1.5/about_the_project/contributing.md create mode 100644 content/kapacitor/v1.5/about_the_project/license.md create mode 100644 content/kapacitor/v1.5/about_the_project/releasenotes-changelog.md create mode 100644 content/kapacitor/v1.5/administration/_index.md create mode 100644 content/kapacitor/v1.5/administration/configuration.md create mode 100644 content/kapacitor/v1.5/administration/security.md create mode 100644 content/kapacitor/v1.5/administration/subscription-management.md create mode 100644 content/kapacitor/v1.5/administration/upgrading.md create mode 100644 content/kapacitor/v1.5/event_handlers/_index.md create mode 100644 content/kapacitor/v1.5/event_handlers/aggregate.md create mode 100644 content/kapacitor/v1.5/event_handlers/alerta.md create mode 100644 content/kapacitor/v1.5/event_handlers/discord.md create mode 100644 content/kapacitor/v1.5/event_handlers/email.md create mode 100644 content/kapacitor/v1.5/event_handlers/exec.md create mode 100644 content/kapacitor/v1.5/event_handlers/hipchat.md create mode 100644 content/kapacitor/v1.5/event_handlers/kafka.md create mode 100644 content/kapacitor/v1.5/event_handlers/log.md create mode 100644 content/kapacitor/v1.5/event_handlers/microsoftteams.md create mode 100644 content/kapacitor/v1.5/event_handlers/mqtt.md create mode 100644 content/kapacitor/v1.5/event_handlers/opsgenie/v1.md create mode 100644 content/kapacitor/v1.5/event_handlers/opsgenie/v2.md create mode 100644 content/kapacitor/v1.5/event_handlers/pagerduty/v1.md create mode 100644 content/kapacitor/v1.5/event_handlers/pagerduty/v2.md create mode 100644 content/kapacitor/v1.5/event_handlers/post.md create mode 100644 content/kapacitor/v1.5/event_handlers/publish.md create mode 100644 content/kapacitor/v1.5/event_handlers/pushover.md create mode 100644 content/kapacitor/v1.5/event_handlers/sensu.md create mode 100644 content/kapacitor/v1.5/event_handlers/slack.md create mode 100644 content/kapacitor/v1.5/event_handlers/snmptrap.md create mode 100644 content/kapacitor/v1.5/event_handlers/talk.md create mode 100644 content/kapacitor/v1.5/event_handlers/tcp.md create mode 100644 content/kapacitor/v1.5/event_handlers/telegram.md create mode 100644 content/kapacitor/v1.5/event_handlers/victorops.md create mode 100644 content/kapacitor/v1.5/guides/_index.md create mode 100644 content/kapacitor/v1.5/guides/anomaly_detection.md create mode 100644 content/kapacitor/v1.5/guides/continuous_queries.md create mode 100644 content/kapacitor/v1.5/guides/event-handler-setup.md create mode 100644 content/kapacitor/v1.5/guides/hierarchical-alert-suppression.md create mode 100644 content/kapacitor/v1.5/guides/join_backfill.md create mode 100644 content/kapacitor/v1.5/guides/live_leaderboard.md create mode 100644 content/kapacitor/v1.5/guides/load_directory.md create mode 100644 content/kapacitor/v1.5/guides/reference_scripts.md create mode 100644 content/kapacitor/v1.5/guides/scheduled-downtime.md create mode 100644 content/kapacitor/v1.5/guides/socket_udf.md create mode 100644 content/kapacitor/v1.5/guides/two-measurement-alert.md create mode 100644 content/kapacitor/v1.5/introduction/_index.md create mode 100644 content/kapacitor/v1.5/introduction/getting-started.md create mode 100644 content/kapacitor/v1.5/introduction/install-docker.md create mode 100644 content/kapacitor/v1.5/introduction/installation.md create mode 100644 content/kapacitor/v1.5/nodes/_index.md create mode 100644 content/kapacitor/v1.5/nodes/alert_node.md create mode 100644 content/kapacitor/v1.5/nodes/barrier_node.md create mode 100644 content/kapacitor/v1.5/nodes/batch_node.md create mode 100644 content/kapacitor/v1.5/nodes/change_detect_node.md create mode 100644 content/kapacitor/v1.5/nodes/combine_node.md create mode 100644 content/kapacitor/v1.5/nodes/default_node.md create mode 100644 content/kapacitor/v1.5/nodes/delete_node.md create mode 100644 content/kapacitor/v1.5/nodes/derivative_node.md create mode 100644 content/kapacitor/v1.5/nodes/ec2_autoscale_node.md create mode 100644 content/kapacitor/v1.5/nodes/eval_node.md create mode 100644 content/kapacitor/v1.5/nodes/flatten_node.md create mode 100644 content/kapacitor/v1.5/nodes/from_node.md create mode 100644 content/kapacitor/v1.5/nodes/group_by_node.md create mode 100644 content/kapacitor/v1.5/nodes/http_out_node.md create mode 100644 content/kapacitor/v1.5/nodes/http_post_node.md create mode 100644 content/kapacitor/v1.5/nodes/influx_d_b_out_node.md create mode 100644 content/kapacitor/v1.5/nodes/influx_q_l_node.md create mode 100644 content/kapacitor/v1.5/nodes/join_node.md create mode 100644 content/kapacitor/v1.5/nodes/k8s_autoscale_node.md create mode 100644 content/kapacitor/v1.5/nodes/kapacitor_loopback_node.md create mode 100644 content/kapacitor/v1.5/nodes/log_node.md create mode 100644 content/kapacitor/v1.5/nodes/no_op_node.md create mode 100644 content/kapacitor/v1.5/nodes/query_node.md create mode 100644 content/kapacitor/v1.5/nodes/sample_node.md create mode 100644 content/kapacitor/v1.5/nodes/shift_node.md create mode 100644 content/kapacitor/v1.5/nodes/sideload_node.md create mode 100644 content/kapacitor/v1.5/nodes/state_count_node.md create mode 100644 content/kapacitor/v1.5/nodes/state_duration_node.md create mode 100644 content/kapacitor/v1.5/nodes/stats_node.md create mode 100644 content/kapacitor/v1.5/nodes/stream_node.md create mode 100644 content/kapacitor/v1.5/nodes/swarm_autoscale_node.md create mode 100644 content/kapacitor/v1.5/nodes/u_d_f_node.md create mode 100644 content/kapacitor/v1.5/nodes/union_node.md create mode 100644 content/kapacitor/v1.5/nodes/where_node.md create mode 100644 content/kapacitor/v1.5/nodes/window_node.md create mode 100644 content/kapacitor/v1.5/reference/spec.md create mode 100644 content/kapacitor/v1.5/tick/_index.md create mode 100644 content/kapacitor/v1.5/tick/expr.md create mode 100644 content/kapacitor/v1.5/tick/introduction.md create mode 100644 content/kapacitor/v1.5/tick/syntax.md create mode 100644 content/kapacitor/v1.5/troubleshooting/_index.md create mode 100644 content/kapacitor/v1.5/troubleshooting/frequently-asked-questions.md create mode 100644 content/kapacitor/v1.5/working/_index.md create mode 100644 content/kapacitor/v1.5/working/alerts.md create mode 100644 content/kapacitor/v1.5/working/api.md create mode 100644 content/kapacitor/v1.5/working/cli_client.md create mode 100644 content/kapacitor/v1.5/working/custom_alert.md create mode 100644 content/kapacitor/v1.5/working/custom_output.md create mode 100644 content/kapacitor/v1.5/working/kapa-and-chrono.md create mode 100644 content/kapacitor/v1.5/working/scraping-and-discovery.md create mode 100644 content/kapacitor/v1.5/working/template_tasks.md create mode 100644 content/kapacitor/v1.5/working/using_alert_topics.md create mode 100644 content/telegraf/v1.15/_index.md create mode 100644 content/telegraf/v1.15/about_the_project/_index.md create mode 100644 content/telegraf/v1.15/about_the_project/cla.md create mode 100644 content/telegraf/v1.15/about_the_project/contributing.md create mode 100644 content/telegraf/v1.15/about_the_project/license.md create mode 100644 content/telegraf/v1.15/about_the_project/release-notes-changelog.md create mode 100644 content/telegraf/v1.15/administration/_index.md create mode 100644 content/telegraf/v1.15/administration/configuration.md create mode 100644 content/telegraf/v1.15/administration/enterprise-plugins.md create mode 100644 content/telegraf/v1.15/administration/troubleshooting.md create mode 100644 content/telegraf/v1.15/administration/windows_service.md create mode 100644 content/telegraf/v1.15/concepts/_index.md create mode 100644 content/telegraf/v1.15/concepts/aggregator_processor_plugins.md create mode 100644 content/telegraf/v1.15/concepts/glossary.md create mode 100644 content/telegraf/v1.15/concepts/metrics.md create mode 100644 content/telegraf/v1.15/data_formats/_index.md create mode 100644 content/telegraf/v1.15/data_formats/input/_index.md create mode 100644 content/telegraf/v1.15/data_formats/input/collectd.md create mode 100644 content/telegraf/v1.15/data_formats/input/csv.md create mode 100644 content/telegraf/v1.15/data_formats/input/dropwizard.md create mode 100644 content/telegraf/v1.15/data_formats/input/graphite.md create mode 100644 content/telegraf/v1.15/data_formats/input/grok.md create mode 100644 content/telegraf/v1.15/data_formats/input/influx.md create mode 100644 content/telegraf/v1.15/data_formats/input/json.md create mode 100644 content/telegraf/v1.15/data_formats/input/logfmt.md create mode 100644 content/telegraf/v1.15/data_formats/input/nagios.md create mode 100644 content/telegraf/v1.15/data_formats/input/value.md create mode 100644 content/telegraf/v1.15/data_formats/input/wavefront.md create mode 100644 content/telegraf/v1.15/data_formats/output/_index.md create mode 100644 content/telegraf/v1.15/data_formats/output/carbon2.md create mode 100644 content/telegraf/v1.15/data_formats/output/graphite.md create mode 100644 content/telegraf/v1.15/data_formats/output/influx.md create mode 100644 content/telegraf/v1.15/data_formats/output/json.md create mode 100644 content/telegraf/v1.15/data_formats/output/nowmetric.md create mode 100644 content/telegraf/v1.15/data_formats/output/splunkmetric.md create mode 100644 content/telegraf/v1.15/data_formats/template-patterns.md create mode 100644 content/telegraf/v1.15/guides/_index.md create mode 100644 content/telegraf/v1.15/guides/using_http.md create mode 100644 content/telegraf/v1.15/introduction/_index.md create mode 100644 content/telegraf/v1.15/introduction/downloading.md create mode 100644 content/telegraf/v1.15/introduction/getting-started.md create mode 100644 content/telegraf/v1.15/introduction/installation.md create mode 100644 content/telegraf/v1.15/plugins/_index.md create mode 100644 content/telegraf/v1.15/plugins/aggregators.md create mode 100644 content/telegraf/v1.15/plugins/inputs.md create mode 100644 content/telegraf/v1.15/plugins/outputs.md create mode 100644 content/telegraf/v1.15/plugins/plugin-list.md create mode 100644 content/telegraf/v1.15/plugins/processors.md create mode 100644 layouts/shortcodes/telegraf/verify.md diff --git a/content/kapacitor/v1.5/_index.md b/content/kapacitor/v1.5/_index.md new file mode 100644 index 000000000..561e02303 --- /dev/null +++ b/content/kapacitor/v1.5/_index.md @@ -0,0 +1,26 @@ +--- +title: Kapacitor 1.5 documentation + +menu: + kapacitor: + name: v1.5 + identifier: kapacitor_1_5 + weight: 1 +--- + +Kapacitor is an open source data processing framework that makes it easy to create +alerts, run ETL jobs and detect anomalies. +Kapacitor is the final piece of the [TICK stack](https://influxdata.com/time-series-platform/). + +## Key features + +Here are some of the features that Kapacitor currently supports that make it a +great choice for data processing. + +* Process both streaming data and batch data. +* Query data from InfluxDB on a schedule, and receive data via the +[line protocol](/influxdb/v1.4/write_protocols/line/) and any other method InfluxDB supports. +* Perform any transformation currently possible in [InfluxQL](/influxdb/v1.7/query_language/spec/). +* Store transformed data back in InfluxDB. +* Add custom user defined functions to detect anomalies. +* Integrate with HipChat, OpsGenie, Alerta, Sensu, PagerDuty, Slack, and more. diff --git a/content/kapacitor/v1.5/about_the_project/_index.md b/content/kapacitor/v1.5/about_the_project/_index.md new file mode 100644 index 000000000..7ded82588 --- /dev/null +++ b/content/kapacitor/v1.5/about_the_project/_index.md @@ -0,0 +1,31 @@ +--- +title: About the project +aliases: + - kapacitor/v1.5/contributing/ +menu: + kapacitor_1_5_ref: + name: About the project + weight: 1 +--- + +Kapacitor is open source and we welcome contributions from the community. + +If you want Kapacitor to be able to output to you own endpoint see this [How To](/kapacitor/v1.5/about_the_project/custom_output/). + +## [Release Notes/Changelog](/kapacitor/v1.5/about_the_project/releasenotes-changelog/) + +## [Contributing](https://github.com/influxdata/kapacitor/blob/master/CONTRIBUTING.md) + +## [CLA](https://influxdata.com/community/cla/) + +## [Licenses](https://github.com/influxdata/kapacitor/blob/master/LICENSE) + +## Third Party Software +InfluxData products contain third party software, which means the copyrighted, patented, or otherwise legally protected +software of third parties that is incorporated in InfluxData products. + +Third party suppliers make no representation nor warranty with respect to such third party software or any portion thereof. +Third party suppliers assume no liability for any claim that might arise with respect to such third party software, nor for a +customer’s use of or inability to use the third party software. + +The [list of third party software components, including references to associated licenses and other materials](https://github.com/influxdata/kapacitor/blob/master/LICENSE_OF_DEPENDENCIES.md), is maintained on a version by version basis. diff --git a/content/kapacitor/v1.5/about_the_project/cla.md b/content/kapacitor/v1.5/about_the_project/cla.md new file mode 100644 index 000000000..84d7be27e --- /dev/null +++ b/content/kapacitor/v1.5/about_the_project/cla.md @@ -0,0 +1,10 @@ +--- +title: CLA + +menu: + kapacitor_1_5_ref: + name: CLA + weight: 30 + parent: About the project + url: https://influxdb.com/community/cla.html +--- diff --git a/content/kapacitor/v1.5/about_the_project/contributing.md b/content/kapacitor/v1.5/about_the_project/contributing.md new file mode 100644 index 000000000..183d0a581 --- /dev/null +++ b/content/kapacitor/v1.5/about_the_project/contributing.md @@ -0,0 +1,10 @@ +--- +title: Contributing + +menu: + kapacitor_1_5_ref: + name: Contributing + weight: 10 + parent: About the project + url: https://github.com/influxdata/kapacitor/blob/master/CONTRIBUTING.md +--- diff --git a/content/kapacitor/v1.5/about_the_project/license.md b/content/kapacitor/v1.5/about_the_project/license.md new file mode 100644 index 000000000..4117ee6d4 --- /dev/null +++ b/content/kapacitor/v1.5/about_the_project/license.md @@ -0,0 +1,10 @@ +--- +title: License + +menu: + kapacitor_1_5_ref: + name: License + weight: 40 + parent: About the project + url: https://github.com/influxdata/kapacitor/blob/master/LICENSE +--- diff --git a/content/kapacitor/v1.5/about_the_project/releasenotes-changelog.md b/content/kapacitor/v1.5/about_the_project/releasenotes-changelog.md new file mode 100644 index 000000000..7a86b652a --- /dev/null +++ b/content/kapacitor/v1.5/about_the_project/releasenotes-changelog.md @@ -0,0 +1,578 @@ +--- +title: Release Notes/Changelog +menu: + kapacitor_1_5_ref: + parent: About the project +--- + +## v1.5.6 [2020-07-17] + +## Features + +- Add [Microsoft Teams event handler](/kapacitor/1.5/event_handlers/microsoftteams/), thanks @mmindenhall! +- Add [Discord event handler](/kapacitor/1.5/event_handler/discord/), thanks @mattnotmitt! +- Add [support for TLS 1.3](/kapacitor/v1.5/administration/configuration/#transport-layer-security-tls-settings). + +### Bug fixes + +- Fix UDF agent Python 3.0 issues, thanks @elohmeier! +- Add `scraper_test` package to fix discovery service lost configuration (`discovery.Config`), thanks @flisky! +- Use `systemd` for Amazon Linux 2. +- Correct issue with `go vet` invocation in `.hooks/pre-commit` file that caused the hook to fail, thanks @mattnotmitt! +- Update `build.py` to support `arm64`, thanks @povlhp! +- Fix panic when setting a zero interval for ticker, which affected deadman and stats nodes. +- Fix a panic on int div-by-zero and return an error instead. +- Fix issue that caused Kapacitor to ignore the `pushover().userKey('')` TICKScript operation. + +## v1.5.5 [2020-04-20] + +## Breaking changes + +- Update release checksums (used to verify release bits haven't been tampered with) from MD5 (Message Digest, 128-bit digest) to SHA-256 (Secure Hash Algorithm 2, 256-bit digest). + +### Bug fixes + +- Update the Kafka client to ensure errors are added to Kapacitor logs. + +## v1.5.4 [2020-01-16] + +## Features + +- Add the ability to use templates when specifying MQTT (message queue telemetry transport) topic. +- Upgrade to support Python 3.0 for user defined functions (UDFs). + +### Bug fixes + +- Upgrade the Kafka library to set the timestamp correctly. +- Upgrade to Go 1.13, fixes various `go vet` issues. + +## v1.5.3 [2019-06-18] + +{{% warn %}} +### Authentication and shared secret +If using Kapacitor v1.5.3 or newer and InfluxDB with [authentication enabled](/influxdb/v1.7/administration/authentication_and_authorization/), +set the `[http].shared-secret` option in your `kapacitor.conf` to the shared secret of your InfluxDB instances. + +```toml +# ... +[http] + # ... + shared-secret = "youramazingsharedsecret" +``` + +If not set, set to an empty string, or does not match InfluxDB's shared-secret, +the integration with InfluxDB will fail and Kapacitor will not start. +Kapacitor will output an error similar to: + +``` +kapacitord[4313]: run: open server: open service *influxdb.Service: failed to link subscription on startup: signature is invalid +``` +{{% /warn %}} + +#### Important update [2019-07-11] +- Some customers have reported a high number of CLOSE_WAIT connections. + Upgrade to this release to resolve this issue. + +### Features +- Add ability to skip SSL verification with an alert post node. +- Add TLS configuration options. + +### Bug fixes + +- Use default transport consistently. +- Fix deadlock in barrier node when delete is used. +- Make RPM create files with correct ownership on install. +- Delete group stats when a group is deleted. +- Avoid extra allocation when building GroupID. + +## v1.5.2 [2018-12-12] + +### Features + +- Add barrier node support to JoinNode. +- Add ability to expire groups using the BarrierNode. +- Add alert/persist-topics to config. +- Add multiple field support to the ChangeDetectNode. +- Add links to PagerDuty v2 alerts. +- Add additional metadata to Sensu alerts. + +### Bug fixes + +- Fix join not catching up fast enough after a pause in the data stream. + +## v1.5.1 [2018-08-06] + +### Bug fixes + +- `pagerduty2` should use `routingKey` rather than `serviceKey`. +- Fix KafkaTopic not working from TICKscript. +- Improve Kafka alert throughput. + +## v1.5.0 [2018-05-17] + +### Features + +- Add alert inhibitors that allow an alert to suppress events from other matching alerts. +- Config format updated to allow for more than one slack configuration. +- Added a new Kapacitor node changeDetect that emits a value for each time a series field changes. +- Add recoverable field to JSON alert response to indicate whether the alert will auto-recover. +- Update OpsGenie integration to use the v2 API. + To upgrade to using the new API simply update your configuration and TICKscripts to use opsgenie2 instead of opsgenie. + If your `opsgenie` configuration uses the `recovery_url` option, for `opsgenie2` you will need to change it to the `recovery_action` option. + This is because the new v2 API is not structured with static URLs, and so only the action can be defined and not the entire URL. +- Add https-private-key option to httpd config. +- Add `.quiet` to all nodes to silence any errors reported by the node. +- Add Kafka event handler. + +### Bug fixes + +- Kapacitor ticks generating a hash instead of their actual given name. +- Fix deadlock in load service when task has an error. +- Support PagerDuty API v2. +- Fix bug where you could not delete a topic handler with the same name as its topic. +- Adjust PagerDuty v2 service-test names and capture detailed error messages. +- Fix Kafka configuration. + +## v1.4.1 [2018-03-13] + +### Bug fixes + +* Fix bug where task type was invalid when using var for stream/batch + +## v1.4.0 [2017-12-08] + +### Release notes + +Kapacitor v1.4.0 adds many new features, highlighted here: + +- Load directory service for adding topic handlers, tasks, and templates from `dir`. +- Structured logging with logging API endpoints that can be used to tail logs for specified tasks. +- Autoscale support for Docker Swarm and AWS EC2. +- Sideload data into your TICKscript streams from external sources. +- Fully-customizable HTTP Post body for the alert Post handler and the HTTP Post node. + +### Breaking changes + +#### Change over internal API to use message passing semantics. + +The `Combine` and `Flatten` nodes previously operated (erroneously) across batch boundaries: this has been fixed. + +### Features + +- Added service for loading topic handlers, tasks, and templates from `dir`. +- Topic handler file format modified to include TopicID and HandlerID. +- TICKscript now allows task descriptions exclusively through a TICKscript. +- Task types (batch or stream) no longer must be specified. +- `dbrp` expressions were added to TICKscript. +- Added support for AWS EC2 autoscaling services. +- Added support for Docker Swarm autoscaling services. +- Added `BarrierNode` to emit `BarrierMessage` periodically. +- Added `Previous` state. +- Added support to persist replay status after it finishes. +- Added `alert.post` and `https_post` timeouts to ensure cleanup of hung connections. +- Added subscriptions modes to InfluxDB subscriptions. +- Added linear fill support for `QueryNode`. +- Added MQTT alert handler. +- Added built-in functions for converting timestamps to integers. +- Added `bools` field types to UDFs. +- Added stateless `now()` function to get the current local time. +- Added support for timeout, tags, and service templates in the Alerta AlertNode. +- Added support for custom HTTP Post bodies via a template system. +- Added support allowing for the addition of the HTTP status code as a field when using HTTP Post. +- Added `logfmt` support and refactor logging. +- Added support for exposing logs via the API. API is released as a technical preview. +- Added support for `{{ .Duration }}` on Alert Message property. +- Added support for [JSON lines](https://en.wikipedia.org/wiki/JSON_Streaming#Line-delimited_JSON) for steaming HTTP logs. +- Added new node `Sideload` that allows loading data from files into the stream of data. Data can be loaded using a hierarchy. +- Promote Alert API to stable v1 path. +- Change `WARN` level logs to `INFO` level. +- Updated Go version to 1.9.2. + +### Bug fixes + +- Fixed issues where log API checked the wrong header for the desired content type. +- Fixed VictorOps "data" field being a string instead of actual JSON. +- Fixed panic with `MQTT.toml` configuration generation. +- Fix oddly-generated TOML for MQTT & HTTPpost. +- Address Idle Barrier dropping all messages when source has clock offset. +- Address crash of Kapacitor on Windows x64 when starting a recording. +- Allow for `.yml` file extensions in `define-topic-handler`. +- Fix HTTP server error logging. +- Fixed bugs with stopping a running UDF agent. +- Fixed error messages for missing fields which are arguments to functions are not clear. +- Fixed bad PagerDuty test the required server info. +- Added SNMP sysUpTime to SNMP Trap service. +- Fixed panic on recording replay with HTTPPostHandler. +- Fixed Kubernetes incluster master API DNS resolution. +- Remove the pidfile after the server has exited. +- Logs API writes multiple HTTP headers. +- Fixed missing dependency in RPM package. +- Force tar owner/group to be `root`. +- Fixed install/remove of Kapacitor on non-systemd Debian/Ubuntu systems. +- Fixed packaging to not enable services on RHEL systems. +- Fixed issues with recusive symlinks on systemd systems. +- Fixed invalid default MQTT config. + +## v1.3.3 [2017-08-11] + +### Bug fixes + +- Expose pprof without authentication, if enabled. + +## v1.3.2 [2017-08-08] + +### Bug fixes + +- Use details field from alert node in PagerDuty. + +## v1.3.1 [2017-06-02] + +### Bug fixes + +- Proxy from environment for HTTP request to Slack +- Fix derivative node preserving fields from previous point in stream tasks + +## v1.3.0 [2017-05-22] + +### Release Notes + +This release has two major features. + +1. Addition of scraping and discovering for Prometheues style data collection. +2. Updates to the Alert Topic system. + +Here is a quick example of how to configure Kapacitor to scrape discovered targets. +First, configure a discoverer, here we use the file-discovery discoverer. +Next, configure a scraper to use that discoverer. + +``` +# Configure file discoverer +[[file-discovery]] + enabled = true + id = "discover_files" + refresh-interval = "10s" + ##### This will look for prometheus json files + ##### File format is here https://prometheus.io/docs/operating/configuration/#%3Cfile_sd_config%3E + files = ["/tmp/prom/*.json"] + +# Configure scraper +[[scraper]] + enabled = true + name = "node_exporter" + discoverer-id = "discover_files" + discoverer-service = "file-discovery" + db = "prometheus" + rp = "autogen" + type = "prometheus" + scheme = "http" + metrics-path = "/metrics" + scrape-interval = "2s" + scrape-timeout = "10s" +``` + +Add the above snippet to your `kapacitor.conf` file. + +Create the below snippet as the file `/tmp/prom/localhost.json`: + +``` +[{ + "targets": ["localhost:9100"] +}] +``` + +Start the Prometheues `node_exporter` locally. + +Now, startup Kapacitor and it will discover the `localhost:9100` `node_exporter` target and begin scrapping it for metrics. +For more details on the scraping and discovery systems, see the full documentation [here](/kapacitor/v1.3/pull_metrics/scraping-and-discovery/). + +The second major feature with this release are changes to the alert topic system. +The previous release introduced this new system as a technical preview and with this release the alerting service has been simplified. +Alert handlers now only have a single action and belong to a single topic. + +The handler definition has been simplified as a result. +Here are some example alert handlers using the new structure: + +```yaml +id: my_handler +kind: pagerDuty +options: + serviceKey: XXX +``` + +```yaml +id: aggregate_by_1m +kind: aggregate +options: + interval: 1m + topic: aggregated +``` + +```yaml +id: publish_to_system +kind: publish +options: + topics: [ system ] +``` + +To define a handler now you must specify which topic the handler belongs to. +For example, to define the above aggregate handler on the system topic, use this command: + +```sh +kapacitor define-handler system aggregate_by_1m.yaml +``` + +For more details on the alerting system, see the full documentation [here](https://docs.influxdata.com/kapacitor/v1.3/alerts). + +### Breaking Change + +#### Fixed inconsistency with JSON data from alerts. + + The alert handlers Alerta, Log, OpsGenie, PagerDuty, Post and VictorOps allow extra opaque data to beattached to alert notifications. + That opaque data was inconsistent and this change fixes that. + Depending on how that data was consumed this could result in a breaking change, since the original behavior + was inconsistent we decided it would be best to fix the issue now and make it consistent for all future builds. + Specifically in the JSON result data the old key `Series` is always `series`, and the old key `Err` is now + always `error` instead of for only some of the outputs. + +#### Refactor the Alerting service. + + The change is completely breaking for the technical preview alerting service, a.k.a. the new alert topic + handler features. The change boils down to simplifying how you define and interact with topics. + Alert handlers now only ever have a single action and belong to a single topic. + An automatic migration from old to new handler definitions will be performed during startup. + See the updated API docs. + +#### Add generic error counters to every node type. + + Renamed `query_errors` to `errors` in batch node. + Renamed `eval_errors` to `errors` in eval node. + +#### The UDF agent Go API has changed. + + The changes now make it so that the agent package is self contained. + +#### A bug was fixed around missing fields in the derivative node. + + The behavior of the node changes slightly in order to provide a consistent fix to the bug. + The breaking change is that now, the time of the points returned are from the right hand or current point time, + instead of the left hand or previous point time. + +### Features + +- Allow Sensu handler to be specified. +- Added type signatures to Kapacitor functions. +- Added `isPresent` operator for verifying whether a value is present (part of [#1284](https://github.com/influxdata/kapacitor/pull/1284)). +- Added Kubernetes scraping support. +- Added `groupBy exclude` and added `dropOriginalFieldName` to `flatten`. +- Added KapacitorLoopback node to be able to send data from a task back into Kapacitor. +- Added headers to alert POST requests. +- TLS configuration in Slack service for Mattermost compatibility. +- Added generic HTTP Post node. +- Expose server specific information in alert templates. +- Added Pushover integration. +- Added `working_cardinality` stat to each node type that tracks the number of groups per node. +- Added StateDuration node. +- Default HipChat URL should be blank. +- Add API endpoint for performing Kapacitor database backups. +- Adding source for sensu alert as parameter. +- Added discovery and scraping services for metrics collection (pull model). +- Updated Go version to 1.7.5. + +### Bug fixes + +- Fixed broken ENV var configuration overrides for the Kubernetes section. +- Copy batch points slice before modification, fixes potential panics and data corruption. +- Use the Prometheus metric name as the measurement name by default for scrape data. +- Fixed possible deadlock for scraper configuration updating. +- Fixed panic with concurrent writes to same points in state tracking nodes. +- Simplified static-discovery configuration. +- Fixed panic in InfluxQL node with missing field. +- Fixed missing working_cardinality stats on stateDuration and stateCount nodes. +- Fixed panic in scraping TargetManager. +- Use ProxyFromEnvironment for all outgoing HTTP traffic. +- Fixed bug where batch queries would be missing all fields after the first nil field. +- Fix case-sensitivity for Telegram `parseMode` value. +- Fix pprof debug endpoint. +- Fixed hang in configuration API to update a configuration section. + Now if the service update process takes too long the request will timeout and return an error. + Previously the request would block forever. +- Make the Alerta auth token prefix configurable and default it to Bearer. +- Fixed logrotate file to correctly rotate error log. +- Fixed bug with alert duration being incorrect after restoring alert state. +- Fixed bug parsing dbrp values with quotes. +- Fixed panic on loading replay files without a file extension. +- Fixed bug in Default Node not updating batch tags and groupID. + Also empty string on a tag value is now a sufficient condition for the default conditions to be applied. + See [#1233](https://github.com/influxdata/kapacitor/pull/1233) for more information. +- Fixed dot view syntax to use xlabels and not create invalid quotes. +- Fixed curruption of recordings list after deleting all recordings. +- Fixed missing "vars" key when listing tasks. +- Fixed bug where aggregates would not be able to change type. +- Fixed panic when the process cannot stat the data dir. + +## v1.2.0 [2017-01-23] + +### Release Notes + +A new system for working with alerts has been introduced. +This alerting system allows you to configure topics for alert events and then configure handlers for various topics. +This way alert generation is decoupled from alert handling. + +Existing TICKscripts will continue to work without modification. + +To use this new alerting system remove any explicit alert handlers from your TICKscript and specify a topic. +Then configure the handlers for the topic. + +``` +stream + |from() + .measurement('cpu') + .groupBy('host') + |alert() + // Specify the topic for the alert + .topic('cpu') + .info(lambda: "value" > 60) + .warn(lambda: "value" > 70) + .crit(lambda: "value" > 80) + // No handlers are configured in the script, they are instead defined on the topic via the API. +``` + +The API exposes endpoints to query the state of each alert and endpoints for configuring alert handlers. +See the [API docs](https://docs.influxdata.com/kapacitor/latest/api/api/) for more details. +The kapacitor CLI has been updated with commands for defining alert handlers. + +This release introduces a new feature where you can window based off the number of points instead of their time. +For example: + +``` +stream + |from() + .measurement('my-measurement') + // Emit window for every 10 points with 100 points per window. + |window() + .periodCount(100) + .everyCount(10) + |mean('value') + |alert() + .crit(lambda: "mean" > 100) + .slack() + .channel('#alerts') +``` + + +With this change alert nodes will have an anonymous topic created for them. +This topic is managed like all other topics preserving state etc. across restarts. +As a result existing alert nodes will now remember the state of alerts after restarts and disiabling/enabling a task. + +>NOTE: The new alerting features are being released under technical preview. +This means breaking changes may be made in later releases until the feature is considered complete. +See the [API docs on technical preview](https://docs.influxdata.com/kapacitor/v1.2/api/api/#technical-preview) for specifics of how this effects the API. + +### Features + +- Add new query property for aligning group by intervals to start times. +- Add new alert API, with support for configuring handlers and topics. +- Move alerta api token to header and add option to skip TLS verification. +- Add SNMP trap service for alerting. +- Add fillPeriod option to Window node, so that the first emit waits till the period has elapsed before emitting. +- Now when the Window node every value is zero, the window will be emitted immediately for each new point. +- Preserve alert state across restarts and disable/enable actions. +- You can now window based on count in addition to time. +- Enable markdown in slack attachments. + + +### Bug fixes + +- Fix issue with the Union node buffering more points than necessary. +- Fix panic during close of failed startup when connecting to InfluxDB. +- Fix panic during replays. +- logrotate.d ignores kapacitor configuration due to bad file mode. +- Fix panic during failed aggregate results. + +## v1.1.1 [2016-12-02] + +### Release Notes + +No changes to Kapacitor, only upgrading to GoLang 1.7.4 for security patches. + +## v1.1.0 [2016-10-07] + +### Release Notes + +New K8sAutoscale node that allows you to auotmatically scale Kubernetes deployments driven by any metrics Kapacitor consumes. +For example, to scale a deployment `myapp` based off requests per second: + +``` +// The target requests per second per host +var target = 100.0 + +stream + |from() + .measurement('requests') + .where(lambda: "deployment" == 'myapp') + // Compute the moving average of the last 5 minutes + |movingAverage('requests', 5*60) + .as('mean_requests_per_second') + |k8sAutoscale() + .resourceName('app') + .kind('deployments') + .min(4) + .max(100) + // Compute the desired number of replicas based on target. + .replicas(lambda: int(ceil("mean_requests_per_second" / target))) +``` + + +New API endpoints have been added to be able to configure InfluxDB clusters and alert handlers dynamically without needing to restart the Kapacitor daemon. +Along with the ability to dynamically configure a service, API endpoints have been added to test the configurable services. +See the [API docs](https://docs.influxdata.com/kapacitor/latest/api/api/) for more details. + +>NOTE: The `connect_errors` stat from the query node was removed since the client changed, all errors are now counted in the `query_errors` stat. + +### Features + +- Add a Kubernetes autoscaler node. You can now autoscale your Kubernetes deployments via Kapacitor. +- Add new API endpoint for dynamically overriding sections of the configuration. +- Upgrade to using GoLang 1.7 +- Add API endpoints for testing service integrations. +- Add support for Slack icon emojis and custom usernames. +- Bring Kapacitor up to parity with available InfluxQL functions in 1.1. + +### Bug fixes + +- Fix bug where keeping a list of fields that where not referenced in the eval expressions would cause an error. +- Fix the number of subscriptions statistic. +- Fix inconsistency with InfluxDB by adding configuration option to set a default retention policy. +- Sort and dynamically adjust column width in CLI output. +- Adds missing strLength function. + +## v1.0.2 [2016-10-06] + +### Bug fixes + +- Fix bug where errors to save cluster/server ID files were ignored. +- Create data_dir on startup if it does not exist. + +## v1.0.1 [2016-09-26] + +### Features + +- Add TCP alert handler +- Add ability to set alert message as a field +- Add `.create` property to InfluxDBOut node, which when set will create the database and retention policy on task start. +- Allow duration / duration in TICKscript. +- Add support for string manipulation functions. +- Add ability to set specific HTTP port and hostname per configured InfluxDB cluster. + +### Bug fixes + +- Fixed typo in the default configuration file +- Change |log() output to be in JSON format so its self documenting structure. +- Fix issue with TMax and the Holt-Winters method. +- Fix bug with TMax and group by time. + +## v1.0.0 [2016-09-02] + +### Release Notes + +First release of Kapacitor v1.0.0. diff --git a/content/kapacitor/v1.5/administration/_index.md b/content/kapacitor/v1.5/administration/_index.md new file mode 100644 index 000000000..c1a86acc0 --- /dev/null +++ b/content/kapacitor/v1.5/administration/_index.md @@ -0,0 +1,10 @@ +--- +title: Administration + +menu: + kapacitor_1_5: + name: Administration + weight: 80 +--- + +## [Upgrading to Kapacitor 1.4](/kapacitor/v1.5/administration/upgrading/) diff --git a/content/kapacitor/v1.5/administration/configuration.md b/content/kapacitor/v1.5/administration/configuration.md new file mode 100644 index 000000000..79b97bdc1 --- /dev/null +++ b/content/kapacitor/v1.5/administration/configuration.md @@ -0,0 +1,935 @@ +--- +title: Configuring Kapacitor + +menu: + kapacitor_1_5: + weight: 10 + parent: Administration +--- + + * [Startup](#startup) + * [Kapacitor configuration file](#the-kapacitor-configuration-file) + * [Kapacitor environment variables](#kapacitor-environment-variables) + * [Configuring with the HTTP API](#configuring-with-the-http-api) + +Basic installation and startup of the Kapacitor service is covered in +[Getting started with Kapacitor](/kapacitor/v1.5/introduction/getting-started/). +The basic principles of working with Kapacitor described there should be understood before continuing here. +This document presents Kapacitor configuration in greater detail. + +Kapacitor service properties are configured using key-value pairs organized +into groups. +Any property key can be located by following its path in the configuration file (for example, `[http].https-enabled` or `[slack].channel`). +Values for configuration keys are declared in the configuration file. +On POSIX systems this file is located by default at the following location: `/etc/kapacitor/kapacitor.conf`. On Windows systems a sample configuration file can be found in the same directory as the `kapacitord.exe`. +The location of this file can be defined at startup with the `-config` argument. +The path to the configuration file can also be declared using the environment variable `KAPACITOR_CONFIG_PATH`. +Values declared in this file can be overridden by environment variables beginning with the token `KAPACITOR_`. +Some values can also be dynamically altered using the HTTP API when the key `[config-override].enabled` is set to `true`. + +Four primary mechanisms for configuring different aspects of the Kapacitor service are available and listed here in the descending order by which they may be overridden: + +* The configuration file. +* Environment variables. +* The HTTP API (for optional services and the InfluxDB connection). +* Command line arguments (for changing hostname and logging). + +> ***Note:*** Setting the property `skip-config-overrides` in the configuration file to `true` will disable configuration overrides at startup. + +## Startup + +To specify how to load and run the Kapacitor daemon, set the following command line options: + +* `-config`: Path to the configuration file. +* `-hostname`: Hostname that will override the hostname specified in the configuration file. +* `-pidfile`: File where the process ID will be written. +* `-log-file`: File where logs will be written. +* `-log-level`: Threshold for writing messages to the log file. Valid values include `debug, info, warn, error`. + +### Systemd + +On POSIX systems, when the Kapacitor daemon starts as part of `systemd`, environment variables can be set in the file `/etc/default/kapacitor`. + +1. To start Kapacitor as part of `systemd`, do one of the following: + + - ```sh + $ sudo systemctl enable kapacitor + ``` + + - ```sh + $ sudo systemctl enable kapacitor —-now + ``` + +2. Define where the PID file and log file will be written: + + a. Add a line like the following into the `/etc/default/kapacitor` file: + + ```sh + KAPACITOR_OPTS="-pidfile=/home/kapacitor/kapacitor.pid -log-file=/home/kapacitor/logs/kapacitor.log" + ``` + + b. Restart Kapacitor: + + ```sh + sudo systemctl restart kapacitor + ``` + +The environment variable `KAPACITOR_OPTS` is one of a few special variables used +by Kapacitor at startup. +For more information on working with environment variables, +see [Kapacitor environment variables](#kapacitor-environment-variables) +below. + +## Kapacitor configuration file + +The default configuration can be displayed using the `config` command of the Kapacitor daemon. + +```bash +kapacitord config +``` + +A sample configuration file is also available in the Kapacitor code base. +The most current version can be accessed on [github](https://github.com/influxdata/kapacitor/blob/master/etc/kapacitor/kapacitor.conf). + +Use the Kapacitor HTTP API to get current configuration settings and values that can be changed while the Kapacitor service is running. See [Retrieving the current configuration](/kapacitor/v1.5/working/api/#retrieving-the-current-configuration). + +### TOML + +The configuration file is based on [TOML](https://github.com/toml-lang/toml). +Important configuration properties are identified by case-sensitive keys +to which values are assigned. +Key-value pairs are grouped into tables whose identifiers are delineated by brackets. +Tables can also be grouped into table arrays. + +The most common value types found in the Kapacitor configuration file include +the following: + + * **String** (declared in double quotes) + - Examples: `host = "localhost"`, `id = "myconsul"`, `refresh-interval = "30s"`. + * **Integer** + - Examples: `port = 80`, `timeout = 0`, `udp-buffer = 1000`. + * **Float** + - Example: `threshold = 0.0`. + * **Boolean** + - Examples: `enabled = true`, `global = false`, `no-verify = false`. + * **Array** – + - Examples: `my_database = [ "default", "longterm" ]`, ` urls = ["http://localhost:8086"]` + * **Inline Table** + - Example: `basic-auth = { username = "my-user", password = "my-pass" }` + +Table grouping identifiers are declared within brackets. +For example, `[http]`, `[deadman]`,`[kubernetes]`. + +An array of tables is declared within double brackets. +For example, `[[influxdb]]`. `[[mqtt]]`, `[[dns]]`. + +### Organization + +Most keys are declared in the context of a table grouping, but the basic properties of the Kapacitor system are defined in the root context of the configuration file. +The four basic properties of the Kapacitor service include: + + * `hostname`: String declaring the DNS hostname where the Kapacitor daemon runs. + * `data_dir`: String declaring the file system directory where core Kapacitor data is stored. + * `skip-config-overrides`: Boolean indicating whether or not to skip configuration overrides. + * `default-retention-policy`: String declaring the default retention policy to be used on the InfluxDB database. + +Table groupings and arrays of tables follow the basic properties and include essential and optional features, +including specific alert handlers and mechanisms for service discovery and data scraping. + +### Essential tables + +#### HTTP + +The Kapacitor service requires an HTTP connection. Important +HTTP properties, such as a bind address and the path to an HTTPS certificate, +are defined in the `[http]` table. + +**Example: The HTTP grouping** + +```toml +... +[http] + # HTTP API Server for Kapacitor + # This server is always on, + # it serves both as a write endpoint + # and as the API endpoint for all other + # Kapacitor calls. + bind-address = ":9092" + log-enabled = true + write-tracing = false + pprof-enabled = false + https-enabled = false + https-certificate = "/etc/ssl/influxdb-selfsigned.pem" + ### Use a separate private key location. + # https-private-key = "" +... +``` + +#### Transport Layer Security (TLS) settings + +If the TLS configuration settings is not specified, Kapacitor supports all of the cipher suite IDs listed and all of the TLS versions implemented in the [Constants section of the Go `crypto/tls` package documentation](https://golang.org/pkg/crypto/tls/#pkg-constants), depending on the version of Go used to build InfluxDB. +Use the `SHOW DIAGNOSTICS` command to see the version of Go used to build Kapacitor. + +##### `ciphers = [ TLS_AES_128_GCM_SHA256", "TLS_AES_256_GCM_SHA384", "TLS_CHACHA20_POLY1305_SHA256"]` + +Determines the available set of cipher suites. For a list of available ciphers, which depends on the version of Go, see https://golang.org/pkg/crypto/tls/#pkg-constants. +You can use the query `SHOW DIAGNOSTICS` to see the version of Go used to build Kapacitor. +If not specified, uses the default settings from Go's crypto/tls package. + +##### `min-version = "tls1.3"` + +Minimum version of the tls protocol that will be negotiated. Valid values include: `tls1.0`, `tls1.1`, `tls1.2`, and `tls1.3`. If not specified, uses the default settings from the [Go `crypto/tls` package](https://golang.org/pkg/crypto/tls/#pkg-constants). +In this example, `tls1.0` specifies the minimum version as TLS 1.0. + +##### `max-version = "tls1.3"` + +Maximum version of the tls protocol that will be negotiated. IValid values include: `tls1.0`, `tls1.1`, `tls1.2`, and `tls1.3`. If not specified, uses the default settings from the [Go `crypto/tls` package](https://golang.org/pkg/crypto/tls/#pkg-constants). + +##### Recommended configuration for "modern compatibility" + +InfluxData recommends configuring your Kapacitor server's TLS settings for "modern compatibility" — this provides a higher level of security and assumes that backward compatibility is not required. +Our recommended TLS configuration settings for `ciphers`, `min-version`, and `max-version` are based on Mozilla's "modern compatibility" TLS server configuration described in [Security/Server Side TLS](https://wiki.mozilla.org/Security/Server_Side_TLS#Modern_compatibility). + +InfluxData's recommended TLS settings for "modern compatibility" are specified in the following configuration settings example. + +```toml +ciphers = [ "TLS_AES_128_GCM_SHA256", +"TLS_AES_256_GCM_SHA384", +"TLS_CHACHA20_POLY1305_SHA256" +] + +min-version = "tls1.3" + +max-version = "tls1.3" +``` + +> **Important:*** The order of the cipher suite IDs in the `ciphers` setting determines which algorithms are selected by priority. The TLS `min-version` and the `max-version` settings in the example above restrict support to TLS 1.3. + +##### Config override + +The `[config-override]` table contains only one key which enables or disables the ability to +override certain values through the HTTP API. It is enabled by default. + +**Example: The Config Override grouping** + +```toml +... +[config-override] + # Enable/Disable the service for overridding configuration via the HTTP API. + enabled = true +... +``` + +##### Logging + +The Kapacitor service uses logging to monitor and inspect its behavior. +The path to the log and the log threshold is defined in `[logging]` table. + +**Example: The Logging grouping** + +```toml +... +[logging] + # Destination for logs + # Can be a path to a file or 'STDOUT', 'STDERR'. + file = "/var/log/kapacitor/kapacitor.log" + # Logging level can be one of: + # DEBUG, INFO, WARN, ERROR, or OFF + level = "INFO" +... +``` + +##### Load + +Starting with Kapacitor 1.4, the Kapacitor service includes a feature +that enables the loading of TICKscript tasks when the service loads. +The path to these scripts is defined in this table. + +**Example: The Load grouping** + +```toml +... +[load] + # Enable/Disable the service for loading tasks/templates/handlers + # from a directory + enabled = true + # Directory where task/template/handler files are set + dir = "/etc/kapacitor/load" +... +``` + +##### Replay + +The Kapacitor client application can record data streams and batches for testing +tasks before they are enabled. +This table contains one key which declares the path to the directory where the replay files are to be stored. + +**Example: The Replay grouping** + +```toml +... +[replay] + # Where to store replay files, aka recordings. + dir = "/var/lib/kapacitor/replay" +... +``` + +##### Task + +Prior to Kapacitor 1.4, tasks were written to a special task database. +This table and its associated keys are _deprecated_ and should only be used for +migration purposes. + +##### Storage + +The Kapacitor service stores its configuration and other information in the key-value [Bolt](https://github.com/boltdb/bolt) database. +The location of this database on the file system is defined in the storage table +grouping. + +**Example: The Storage grouping** + +```toml +... +[storage] + # Where to store the Kapacitor boltdb database + boltdb = "/var/lib/kapacitor/kapacitor.db" +... +``` + +##### Deadman + +Kapacitor provides a deadman's switch alert which can be configured globally +in this table grouping. +See the [Deadman](/kapacitor/v1.5/nodes/alert_node/#deadman) helper function topic in the AlertNode documentation. + +For a Deadman's switch to work it needs a threshold below which the switch will +be triggered. It also needs a polling interval as well as an id and message +which will be passed to the alert handler. + +**Example: The Deadman grouping** + +```toml +... +[deadman] + # Configure a deadman's switch + # Globally configure deadman's switches on all tasks. + # NOTE: for this to be of use you must also globally configure at least one alerting method. + global = false + # Threshold, if globally configured the alert will be triggered if the throughput in points/interval is <= threshold. + threshold = 0.0 + # Interval, if globally configured the frequency at which to check the throughput. + interval = "10s" + # Id: the alert Id, NODE_NAME will be replaced with the name of the node being monitored. + id = "node 'NODE_NAME' in task '{{ .TaskName }}'" + # The message of the alert. INTERVAL will be replaced by the interval. + message = "{{ .ID }} is {{ if eq .Level \"OK\" }}alive{{ else }}dead{{ end }}: {{ index .Fields \"collected\" | printf \"%0.3f\" }} points/INTERVAL." +... +``` + +#### InfluxDB + +Kapacitor's main purpose processing between nodes within an InfluxDB Enterprise cluster or between multiple clusters. +You must define at least one `[[influxdb]]` table array configuration for an InfluxDB connection. +Multiple InfluxDB table array configurations can be specified, +but one InfluxDB table array configuration must be flagged as the `default`. + +**Example: An InfluxDB connection grouping** +======= +{{% note %}} +To use Kapacitor with an InfluxDB instance that requires authentication, +it must authenticate using an InfluxDB user with **read and write** permissions. +{{% /note %}} + +```toml +... +[[influxdb]] + # Connect to an InfluxDB cluster + # Kapacitor can subscribe, query and write to this cluster. + # Using InfluxDB is not required and can be disabled. + enabled = true + default = true + name = "localhost" + urls = ["http://localhost:8086"] + username = "" + password = "" + timeout = 0 + # Absolute path to pem encoded CA file. + # A CA can be provided without a key/cert pair + # ssl-ca = "/etc/kapacitor/ca.pem" + # Absolutes paths to pem encoded key and cert files. + # ssl-cert = "/etc/kapacitor/cert.pem" + # ssl-key = "/etc/kapacitor/key.pem" + + # Do not verify the TLS/SSL certificate. + # This is insecure. + insecure-skip-verify = false + + # Maximum time to try and connect to InfluxDB during startup + startup-timeout = "5m" + + # Turn off all subscriptions + disable-subscriptions = false + + # Subscription mode is either "cluster" or "server" + subscription-mode = "server" + + # Which protocol to use for subscriptions + # one of 'udp', 'http', or 'https'. + subscription-protocol = "http" + + # Subscriptions resync time interval + # Useful if you want to subscribe to new created databases + # without restart Kapacitord + subscriptions-sync-interval = "1m0s" + + # Override the global hostname option for this InfluxDB cluster. + # Useful if the InfluxDB cluster is in a separate network and + # needs special configuration to connect back to this Kapacitor instance. + # Defaults to `hostname` if empty. + kapacitor-hostname = "" + + # Override the global http port option for this InfluxDB cluster. + # Useful if the InfluxDB cluster is in a separate network and + # needs special configuration to connect back to this Kapacitor instance. + # Defaults to the port from `[http] bind-address` if 0. + http-port = 0 + + # Host part of a bind address for UDP listeners. + # For example if a UDP listener is using port 1234 + # and `udp-bind = "hostname_or_ip"`, + # then the UDP port will be bound to `hostname_or_ip:1234` + # The default empty value will bind to all addresses. + udp-bind = "" + # Subscriptions use the UDP network protocl. + # The following options of for the created UDP listeners for each subscription. + # Number of packets to buffer when reading packets off the socket. + udp-buffer = 1000 + # The size in bytes of the OS read buffer for the UDP socket. + # A value of 0 indicates use the OS default. + udp-read-buffer = 0 + + [influxdb.subscriptions] + # Set of databases and retention policies to subscribe to. + # If empty will subscribe to all, minus the list in + # influxdb.excluded-subscriptions + # + # Format + # db_name = + # + # Example: + # my_database = [ "default", "longterm" ] + [influxdb.excluded-subscriptions] + # Set of databases and retention policies to exclude from the subscriptions. + # If influxdb.subscriptions is empty it will subscribe to all + # except databases listed here. + # + # Format + # db_name = + # + # Example: + # my_database = [ "default", "longterm" ] +... +``` + +#### Internals + +Kapacitor includes internal services that can be enabled or disabled and +that have properties that need to be defined. + +##### HTTP Post + +The HTTP Post service configuration is commented out by default. It is used for +POSTing alerts to an HTTP endpoint. + +##### Reporting + +Kapacitor will send usage statistics back to InfluxData. +This feature can be disabled or enabled in the `[reporting]` table grouping. + +**Example 9 – Reporting configuration** +```toml +... +[reporting] + # Send usage statistics + # every 12 hours to Enterprise. + enabled = true + url = "https://usage.influxdata.com" +... +``` + +##### Stats + +Internal statistics about Kapacitor can also be emitted to an InfluxDB database. +The collection frequency and the database to which the statistics are emitted +can be configured in the `[stats]` table grouping. + +**Example: Stats configuration** + +```toml +... +[stats] + # Emit internal statistics about Kapacitor. + # To consume these stats create a stream task + # that selects data from the configured database + # and retention policy. + # + # Example: + # stream|from().database('_kapacitor').retentionPolicy('autogen')... + # + enabled = true + stats-interval = "10s" + database = "_kapacitor" + retention-policy= "autogen" +# ... +``` + +##### Alert +Kapacitor includes global alert configuration options that apply to all alerts +created by the [alertNode](/kapacitor/v1.5/nodes/alert_node) + +```toml +[alert] + # Persisting topics can become an I/O bottleneck under high load. + # This setting disables them entirely. + persist-topics = false +``` + +#### Optional table groupings + +Optional table groupings are disabled by default and relate to specific features that can be leveraged by TICKscript nodes or used to discover and scrape information from remote locations. +In the default configuration, these optional table groupings may be commented out or include a key `enabled` set to `false` (i.e., `enabled = false`). +A feature defined by an optional table should be enabled whenever a relevant node or a handler for a relevant node is required by a task, or when an input source is needed. + +For example, if alerts are to be sent via email, then the SMTP service should +be enabled and configured in the `[smtp]` properties table. + +**Example 11 – Enabling SMTP** + +```toml +... +[smtp] + # Configure an SMTP email server + # Will use TLS and authentication if possible + # Only necessary for sending emails from alerts. + enabled = true + host = "192.168.1.24" + port = 25 + username = "schwartz.pudel" + password = "f4usT!1808" + # From address for outgoing mail + from = "kapacitor@test.org" + # List of default To addresses. + to = ["heinrich@urfaust.versuch.de","valentin@urfaust.versuch.de","wagner@urfaust.versuch.de"] + + # Skip TLS certificate verify when connecting to SMTP server + no-verify = false + # Close idle connections after timeout + idle-timeout = "30s" + + # If true the all alerts will be sent via Email + # without explicitly marking them in the TICKscript. + global = false + # Only applies if global is true. + # Sets all alerts in state-changes-only mode, + # meaning alerts will only be sent if the alert state changes. + state-changes-only = false +# ... +``` + +Optional features include supported alert handlers, Docker services, user defined functions, input services, and discovery services. + +##### Supported event handlers + +Event handlers manage communications from Kapacitor to third party services or +across Internet standard messaging protocols. +They are activated through chaining methods on the [Alert](/kapacitor/v1.5/nodes/alert_node/) node. + +Most of the handler configurations include common properties. +Every handler has the property `enabled`. They also need an endpoint to which +messages can be sent. +Endpoints may include single properties (e.g, `url` and `addr`) or property pairs (e.g., `host` and `port`). +Most also include an authentication mechanism such as a `token` or a pair of properties like `username` and `password`. +A sample SMTP configuration is shown in Example 11 above. + +Specific properties are included directly in the configuration file and +discussed along with the specific handler information in the [Alert](/kapacitor/v1.5/nodes/alert_node/) +document. + +The following handlers are currently supported: + +* [Alerta](/kapacitor/v1.5/event_handlers/alerta/): Sending alerts to Alerta. +* [Discord](/kapacitor/v1.5/event_handlers/discord/): Sending alerts to Discord. +* [Email](/kapacitor/v1.5/event_handlers/email/): To send alerts by email. +* [HipChat](/kapacitor/v1.5/event_handlers/hipchat/): Sending alerts to the HipChat service. +* [Kafka](/kapacitor/v1.5/event_handlers/kafka/): Sending alerts to an Apache Kafka cluster. +* [MQTT](/kapacitor/v1.5/event_handlers/mqtt/): Publishing alerts to an MQTT broker. +* [OpsGenie](/kapacitor/v1.5/event_handlers/opsgenie/v2/): Sending alerts to the OpsGenie service. +* [PagerDuty](/kapacitor/v1.5/event_handlers/pagerduty/v2/): Sending alerts to the PagerDuty service. +* [Pushover](/kapacitor/v1.5/event_handlers/pushover/): Sending alerts to the Pushover service. +* [Sensu](/kapacitor/v1.5/event_handlers/sensu/): Sending alerts to Sensu. +* [Slack](/kapacitor/v1.5/event_handlers/slack/): Sending alerts to Slack. +* [SNMP Trap](/kapacitor/v1.5/event_handlers/snmptrap/): Posting to SNMP traps. +* [Talk](/kapacitor/v1.5/event_handlers/talk/): Sending alerts to the Talk service. +* [Telegram](/kapacitor/v1.5/event_handlers/telegram/): Sending alerts to Telegram. +* [VictorOps](/kapacitor/v1.5/event_handlers/victorops/): Sending alerts to the VictorOps service. + +##### Docker services + +Kapacitor can be used to trigger changes in Docker clusters. This +is activated by the [SwarmAutoScale](/kapacitor/v1.5/nodes/swarm_autoscale_node/) +and the [K8sAutoScale](/kapacitor/v1.5/nodes/k8s_autoscale_node/) nodes. + +The following service configurations corresponding to these chaining methods can +be found in the configuration file: + + * [Swarm](/kapacitor/v1.5/nodes/swarm_autoscale_node/) + + **Example 12 – The Docker Swarm configuration** + + ```toml + ... + [[swarm]] + # Enable/Disable the Docker Swarm service. + # Needed by the swarmAutoscale TICKscript node. + enabled = false + # Unique ID for this Swarm cluster + # NOTE: This is not the ID generated by Swarm rather a user defined + # ID for this cluster since Kapacitor can communicate with multiple clusters. + id = "" + # List of URLs for Docker Swarm servers. + servers = ["http://localhost:2376"] + # TLS/SSL Configuration for connecting to secured Docker daemons + ssl-ca = "" + ssl-cert = "" + ssl-key = "" + insecure-skip-verify = false + ... + ``` + * [Kubernetes](/kapacitor/v1.5/nodes/k8s_autoscale_node/) + + **Example: The Kubernetes configuration** + + ```toml + ... + [kubernetes] + # Enable/Disable the kubernetes service. + # Needed by the k8sAutoscale TICKscript node. + enabled = false + # There are several ways to connect to the kubernetes API servers: + # + # Via the proxy, start the proxy via the `kubectl proxy` command: + # api-servers = ["http://localhost:8001"] + # + # From within the cluster itself, in which case + # kubernetes secrets and DNS services are used + # to determine the needed configuration. + # in-cluster = true + # + # Direct connection, in which case you need to know + # the URL of the API servers, the authentication token and + # the path to the ca cert bundle. + # These value can be found using the `kubectl config view` command. + # api-servers = ["http://192.168.99.100:8443"] + # token = "..." + # ca-path = "/path/to/kubernetes/ca.crt" + # + # Kubernetes can also serve as a discoverer for scrape targets. + # In that case the type of resources to discoverer must be specified. + # Valid values are: "node", "pod", "service", and "endpoint". + # resource = "pod" + ... + ``` + +##### User defined functions (UDFs) + +Kapacitor can be used to plug in a user defined function +([UDF](/kapacitor/v1.5/nodes/u_d_f_node/)), which can then be leveraged as +chaining methods in a TICKscript. +A user defined function is indicated by the declaration of a new grouping table with the following identifier: `[udf.functions.]`. +A UDF configuration requires a path to an executable, identified by the following properties: + +* `prog`: A string indicating the path to the executable. +* `args`: An array of string arguments to be passed to the executable. +* `timeout`: A timeout monitored when waiting for communications from the executable. + +The UDF can also include a group of environment variables declared in a table +identified by the string `udf.functions..env`. + + **Example: Configuring a User Defined Function** + + ```toml + ... + [udf] +# Configuration for UDFs (User Defined Functions) +[udf.functions] + ... + # Example python UDF. + # Use in TICKscript like: + # stream.pyavg() + # .field('value') + # .size(10) + # .as('m_average') + # + [udf.functions.pyavg] + prog = "/usr/bin/python2" + args = ["-u", "./udf/agent/examples/moving_avg.py"] + timeout = "10s" + [udf.functions.pyavg.env] + PYTHONPATH = "./udf/agent/py" + ... + ``` + +Additional examples can be found directly in the default configuration file. + +##### Input methods + +Kapacitor can receive and process data from sources other than InfluxDB, and the results of this processing can then be written to an InfluxDB database. + +Currently, the following two sources external to InfluxDB are supported: + +* **Collectd**: The POSIX daemon `collectd` for collecting system, network and service performance data. +* **Opentsdb**: The Open Time Series Database (Opentsdb) and its daemon tsd. + +Configuration of connections to third party input sources requires properties such as: + +* `bind-address`: Address at which Kapacitor will receive data. +* `database`: Database to which Kapacitor will write data. +* `retention-policy`: Retention policy for that database. +* `batch-size`: Number of datapoints to buffer before writing. +* `batch-pending`: Number of batches that may be pending in memory. +* `batch-timeout`: Length of time to wait before writing the batch. If +the batch size has not been reached, then a short batch will be written. + +Each input source has additional properties specific to its configuration. They +follow the same configurations for these services used in +[Influxdb](https://github.com/influxdata/influxdb/blob/master/etc/config.sample.toml). + +**Example: Collectd configuration** + +```toml +... +[collectd] + enabled = false + bind-address = ":25826" + database = "collectd" + retention-policy = "" + batch-size = 1000 + batch-pending = 5 + batch-timeout = "10s" + typesdb = "/usr/share/collectd/types.db" +... +``` + +**Example 16 – Opentsdb configuration** + +```toml +... +[opentsdb] + enabled = false + bind-address = ":4242" + database = "opentsdb" + retention-policy = "" + consistency-level = "one" + tls-enabled = false + certificate = "/etc/ssl/influxdb.pem" + batch-size = 1000 + batch-pending = 5 + batch-timeout = "1s" +... +``` + +**User Datagram Protocol (UDP)** + +As demonstrated in the [Live Leaderboard](/kapacitor/v1.5/guides/live_leaderboard/) +guide and the [Scores](https://github.com/influxdb/kapacitor/tree/master/examples/scores) +example, Kapacitor can be configured to accept raw data from a UDP connection. + +This is configured much like other input services. + +**Example: UDP configuration** + +```toml +... +[[udp]] + enabled = true + bind-address = ":9100" + database = "game" + retention-policy = "autogen" +... +``` + +#### Service discovery and metric scraping + +When the number and addresses of the hosts and services for which Kapacitor +should collect information are not known at the time of configuring or booting +the Kapacitor service, they can be determined, and the data collected, at runtime +with the help of discovery services. +This process is known as metric _scraping and discovery_. +For more information, see [Scraping and Discovery](/kapacitor/v1.5/pull_metrics/scraping-and-discovery/). + +For scraping and discovery to work one or more scrapers must be configured. One +scraper can be bound to one discovery service. + +**Example: Scraper configuration** + + +```toml +... +[[scraper]] + enabled = false + name = "myscraper" + # Specify the id of a discoverer service specified below + discoverer-id = "goethe-ec2" + # Specify the type of discoverer service being used. + discoverer-service = "ec2" + db = "prometheus_raw" + rp = "autogen" + type = "prometheus" + scheme = "http" + metrics-path = "/metrics" + scrape-interval = "1m0s" + scrape-timeout = "10s" + username = "schwartz.pudel" + password = "f4usT!1808" + bearer-token = "" + ssl-ca = "" + ssl-cert = "" + ssl-key = "" + ssl-server-name = "" + insecure-skip-verify = false +... +``` + +The example above is illustrative only. + +###### Discovery services + +Kapacitor currently supports 12 discovery services. +Each of these has an `id` property by which it will be bound to a scraper. + +Configuration entries are prepared by default for the following discovery +services: + +* Azure +* Consul +* DNS +* EC2 +* File Discovery +* GCE +* Marathon +* Nerve +* ServerSet +* Static Discovery +* Triton +* UDP + +**Example: EC2 Discovery Service configuration** + +```toml +... +[[ec2]] + enabled = false + id = "goethe-ec2" + region = "us-east-1" + access-key = "ABCD1234EFGH5678IJKL" + secret-key = "1nP00dl3N01rM4Su1v1Ju5qU3ch3ZM01" + profile = "mph" + refresh-interval = "1m0s" + port = 80 +... +``` + +The above example is illustrative. + +## Kapacitor environment variables + +Kapacitor can use environment variables for high-level properties or to +override properties in the configuration file. + +### Environment variables not in configuration file + +These variables are not found in the configuration file. + +* `KAPACITOR_OPTS`: Found in the `systemd` startup script and used to pass +command line options to `kapacitord` started by `systemd`. +* `KAPACITOR_CONFIG_PATH`: Sets the path to the configuration file. +* `KAPACITOR_URL`: Used by the client application `kapacitor` to locate +the `kapacitord` service. +* `KAPACITOR_UNSAFE_SSL`: A Boolean used by the client application `kapacitor` +to skip verification of the `kapacitord` certificate when connecting over SSL. + +### Mapping properties to environment variables + +Kapacitor-specific environment variables begin with the token `KAPACITOR` +followed by an underscore (`_`). +Properties then follow their path through the configuration file tree with each node in the tree separated by an underscore. +Dashes in configuration file identifiers are replaced with underscores. +Table groupings in table arrays are identified by integer tokens. + +Examples: + +* `KAPACITOR_SKIP_CONFIG_OVERRIDES`: Could be used to set the value for +`skip-config-overrides`. +* `KAPACITOR_INFLUXDB_0_URLS_0`: Could be used to set the value of the +first URL item in the URLS array in the first Influxdb property grouping table, +i.e. `[infludxb][0].[urls][0]`. +* `KAPACITOR_STORAGE_BOLTDB`: Could be used to set the path to the boltdb +directory used for storage, i.e. `[storage].botldb`. +* `KAPACITOR_HTTPPOST_0_HEADERS_Authorization`: Could be used to set the +value of the `authorization` header for the first HTTPPost configuration (`[httppost][0].headers.{authorization:"some_value"}`). +* `KAPACITOR_KUBERNETES_ENABLED`: Could be used to enable the Kubernetes +configuration service (`[kubernetes].enabled`). + +## Configuring with the HTTP API + +The Kapacitor [HTTP API](/kapacitor/v1.5/working/api/) can also be used to override +certain parts of the configuration. +This can be useful when a property may contain security sensitive information that should not be left in plain view in the file system, or when you need to reconfigure a service without restarting Kapacitor. +To view which parts of the configuration are available, +pull the JSON file at the `/kapacitor/v1/config` endpoint. +(e.g., http://localhost:9092/kapacitor/v1/config). + +Working with the HTTP API to override configuration properties is presented in +detail in the [Configuration](/kapacitor/v1.5/working/api/#overriding-configurations) section +of the HTTP API document. +In order for overrides over the HTTP API to work, +the `[config-override].enabled` property must be set to `true`. + +Generally, specific sections of the configuration can be viewed as JSON files by +GETting them from the context path built by their identifier from the `config` +endpoint. +For example, to get the table groupings of InfluxDB properties, +use the context `/kapacitor/v1/config/influxdb`. +Security-sensitive fields such as passwords, keys, and security tokens are redacted when using GET. + +Properties can be altered by POSTing a JSON document to the endpoint. +The JSON document must contain a `set` field with a map of the properties to override and +their new values. + +**Example: JSON file for enabling the SMTP configuration** + +```json +{ + "set":{ + "enabled": true + } +} +``` + +By POSTing this document to the `/kapacitor/v1/config/smtp/` endpoint, the SMTP +service can be enabled. + +Property overrides can be removed with the `delete` field in the JSON document. + +**Example: JSON file for removing an SMTP override** + +```json +{ + "delete":[ + "enabled" + ] +} +``` +By POSTing this document to the `/kapacitor/v1/config/smtp/` endpoint the SMTP +override is removed and Kapacitor reverts to the behavior defined in the +configuration file. diff --git a/content/kapacitor/v1.5/administration/security.md b/content/kapacitor/v1.5/administration/security.md new file mode 100644 index 000000000..10913bcb4 --- /dev/null +++ b/content/kapacitor/v1.5/administration/security.md @@ -0,0 +1,446 @@ +--- +title: Security + +menu: + kapacitor_1_5: + weight: 12 + parent: Administration +--- + +# Contents + +* [Overview](#overview) +* [Secure InfluxDB and Kapacitor](#secure-influxdb-and-kapacitor) +* [Kapacitor Security](#kapacitor-security) +* [Secure Kapacitor and Chronograf](#secure-kapacitor-and-chronograf) + +# Overview + +This document covers the basics of securing the open-source distribution of +Kapacitor. For information about security with Enterprise Kapacitor see the +[Enterprise Kapacitor](/enterprise_kapacitor/v1.5/) documentation. + +When seeking to secure Kapacitor it is assumed that the Kapacitor server will be +communicating with an already secured InfluxDB server. It will also make its +tasks and alerts available to a Chronograf installation. + +The following discussion will cover configuring Kapacitor to communicate with a +[secure InfluxDB server](#secure-influxdb-and-kapacitor), enabling +[TLS in Kapacitor](#kapacitor-security) and connecting a TLS enabled +Kapacitor server to [Chronograf](#secure-kapacitor-and-chronograf). + +Authentication and Authorization are not fully implemented in the open-source +Kapacitor distribution, but are available as a feature of Enterprise Kapacitor. + +## Secure InfluxDB and Kapacitor + +InfluxDB can secure its communications with TLS on the transport layer and +with authentication into the database. How to enable TLS and authentication +and authorization in InfluxDB is covered in the InfluxDB documentation, in the +sections [HTTPS Setup](/influxdb/v1.4/administration/https_setup/) and +[Authentication and Authorization](/influxdb/v1.4/query_language/authentication_and_authorization) +respectively. + +Kapacitor configuration supports both HTTPS communications and Authentication +with InfluxDB. Parameters can be set directly in the configuration file, as +environment variables or over Kapacitor's HTTP API. + +An overview of Kapacitor configuration is provided in the +[Configuration](/kapacitor/v1.5/administration/configuration/) document. + +### Kapacitor and InfluxDB HTTPS + +To activate a TLS connection the `urls` strings in the `influxdb` servers +configuration will need to contain the `https` protocol. Furthermore either a +PEM encoded public key and certificate pair or a PEM encoded CA file will need +to be specified. + +When testing with a **self-signed certificate** it is also important to switch off +certificate verification with the property `insecure-skip-verify`. Failure to do +so will result in x509 certificate errors as follows: + +``` +ts=2018-02-19T13:26:11.437+01:00 lvl=error msg="failed to connect to InfluxDB, retrying..." service=influxdb cluster=localhost err="Get https://localhost:8086/ping: x509: certificate is valid for lenovo-TP02, not localhost" +``` + + + +> **Important** – Please note that in a production environment with a standard CA certificate, `insecure-skip-verify` needs to be switched on. + +In the configuration file these values are set according to the following example. + +**Example 1 – TLS Configuration Properties for InfluxDB – kapacitor.conf** +```toml +[[influxdb]] + # Connect to an InfluxDB cluster + # Kapacitor can subscribe, query and write to this cluster. + # Using InfluxDB is not required and can be disabled. + enabled = true + default = true + name = "localhost" + urls = ["https://localhost:8086"] + timeout = 0 + # Absolute path to pem encoded CA file. + # A CA can be provided without a key/cert pair + # ssl-ca = "/etc/ssl/influxdata-selfsigned-incl-pub-key.pem" + # Absolutes paths to pem encoded key and cert files. + ssl-cert = "/etc/ssl/influxdb-selfsigned.crt" + ssl-key = "/etc/ssl/influxdb-selfsigned.key" +... + insecure-skip-verify = false +... + subscription-protocol = "https" +... +``` +The relevant properties in Example 1 are: + +* `urls` – note the protocol is `https` and _not_ `http`. +* `ssl-cert` and `ssl-key` – to indicate the location of the certificate and key files. +* `insecure-skip-verify` – for testing with a self-signed certificate set this to `true` otherwise it should be `false`, especially in production environments. +* `subscription-protocol` – to declare the correct protocol for subscription communications. For example if Kapacitor is to run on HTTP then this should be set to `"http"`, however if Kapacitor is to run on "HTTPS" then this should be set to `"https"`. + +Note that when a CA file contains the certificate and key together the property +`ssl-ca` can be used in place of `ssl-cert` and `ssl-key`. + +As environment variables these properties can be set as follows: + +**Example 2 – TLS Configuration Properties for InfluxDB – ENVARS** +``` +KAPACITOR_INFLUXDB_0_URLS_0="https://localhost:8086" +KAPACITOR_INFLUXDB_0_SSL_CERT="/etc/ssl/influxdb-selfsigned.crt" +KAPACITOR_INFLUXDB_0_SSL_KEY="/etc/ssl/influxdb-selfsigned.key" +KAPACITOR_INFLUXDB_0_INSECURE_SKIP_VERIFY=true +KAPACITOR_INFLUXDB_0_SUBSCRIPTION_PROTOCOL="https" +``` +When using Systemd to manage the Kapacitor daemon the above parameters can be +stored in the file `/etc/default/kapacitor`. + +#### Kapacitor to InfluxDB TLS configuration over HTTP API + +These properties can also be set using the HTTP API. To get the current +`InfluxDB` part of the Kapacitor configuration, use the following `curl` command: + +``` +curl -ks http://localhost:9092/kapacitor/v1/config/influxdb | python -m json.tool > kapacitor-influxdb.conf +``` + +This results in the following file: + +**Example 3 – The InfluxDB part of the Kapacitor configuration** + +```json +{ + "elements": [ + { + "link": { + "href": "/kapacitor/v1/config/influxdb/localhost", + "rel": "self" + }, + "options": { + "default": true, + "disable-subscriptions": false, + "enabled": true, + "excluded-subscriptions": { + "_kapacitor": [ + "autogen" + ] + }, + "http-port": 0, + "insecure-skip-verify": false, + "kapacitor-hostname": "", + "name": "localhost", + "password": true, + "ssl-ca": "", + "ssl-cert": "/etc/ssl/influxdb-selfsigned.crt", + "ssl-key": "/etc/ssl/influxdb-selfsigned.key", + "startup-timeout": "5m0s", + "subscription-mode": "cluster", + "subscription-protocol": "https", + "subscriptions": {}, + "subscriptions-sync-interval": "1m0s", + "timeout": "0s", + "udp-bind": "", + "udp-buffer": 1000, + "udp-read-buffer": 0, + "urls": [ + "https://localhost:8086" + ], + "username": "admin" + }, + "redacted": [ + "password" + ] + } + ], + "link": { + "href": "/kapacitor/v1/config/influxdb", + "rel": "self" + } +} +``` + +Properties can be updated by _POSTing_ a JSON document containing the field `"set"`` +followed by the properties to be modified. + +For example, the following command switches off the `insecure-skip-verify` property. + +``` +curl -kv -d '{ "set": { "insecure-skip-verify": false } }' http://localhost:9092/kapacitor/v1/config/influxdb/ +... +upload completely sent off: 43 out of 43 bytes +< HTTP/1.1 204 No Content +< Content-Type: application/json; charset=utf-8 +< Request-Id: 189e9abb-157b-11e8-866a-000000000000 +< X-Kapacitor-Version: 1.5.1~n201802140813 +< Date: Mon, 19 Feb 2018 13:45:07 GMT +< +* Connection #0 to host localhost left intact +``` + +Similar commands: + +* To change the URLS: + +`curl -kv -d '{ "set": { "urls": [ "https://lenovo-TP02:8086" ]} }' https://localhost:9092/kapacitor/v1/config/influxdb/` + +* To set the `subscription-protocol`: + +`curl -kv -d '{ "set": { "subscription-protocol": "https" } }' https://localhost:9092/kapacitor/v1/config/influxdb/` + +* To set the path to the CA Certificate: + +`curl -kv -d '{ "set": { "ssl-ca": "/etc/ssl/influxdata-selfsigned-incl-pub-key.pem" } }' https://localhost:9092/kapacitor/v1/config/influxdb/` + +Other properties can be set in a similar fashion. + +### Kapacitor and InfluxDB Authentication + +An additional security mechanism available in InfluxDB is Authentication and +Authorization. Kapacitor can be configured to communicate with InfluxDB using +a username:password pair. These properties can be set in the configuration +file, as environment variables or over the HTTP API. + +**Example 4 – InfluxDB Authentication Parameters – kapacitor.conf** + +```toml +[[influxdb]] + # Connect to an InfluxDB cluster + # Kapacitor can subscribe, query and write to this cluster. + # Using InfluxDB is not required and can be disabled. + enabled = true + default = true + name = "localhost" + urls = ["https://localhost:8086"] + username = "admin" + password = "changeit" + timeout = 0 +... +``` + +The relevant parameters in Example 4 are `username` and `password`. + +These can also be set as environment variables. + +**Example 5 – InfluxDB Authentication Paramenters – ENVARS** + +``` +KAPACITOR_INFLUXDB_0_USERNAME="admin" +KAPACITOR_INFLUXDB_0_PASSWORD="changeit" +``` + +When using Systemd to manage the Kapacitor daemon the above parameters can be +stored in the file `/etc/defaults/kapacitor`. + +Alternately they can be set or updated over the HTTP API. + +``` +$ curl -kv -d '{ "set": { "username": "foo", "password": "bar" } }' https://localhost:9092/kapacitor/v1/config/influxdb/ +``` + +## Kapacitor Security + +Open-source Kapacitor offers TLS for encrypting communications to the HTTP API. + +### Kapacitor over TLS + +This feature can be enabled in the configuration `http` group of the configuration. +Activation requires simply setting the property `https-enabled` to `true` and +then providing a path to a certificate with the property, `https-certificate`. +If your certificate's private key is separate, specify the path to the private key +using the `https-private-key` property. + +The following example shows how this is done in the `kapacitor.conf` file. + +**Example 6 – Enabling TLS in kapacitor.conf** + +```toml +[http] + # HTTP API Server for Kapacitor + # This server is always on, + # it serves both as a write endpoint + # and as the API endpoint for all other + # Kapacitor calls. + bind-address = ":9092" + log-enabled = true + write-tracing = false + pprof-enabled = false + https-enabled = true + https-certificate = "/etc/ssl/influxdata-selfsigned.crt" + https-private-key = "/etc/ssl/influxdata-selfsigned.key" +``` + +These values can also be set as environment variables as shown in the next example. + +**Example 7 – Enabling TLS as ENVARS** + +``` +KAPACITOR_HTTP_HTTPS_ENABLED=true +KAPACITOR_HTTP_HTTPS_CERTIFICATE="/etc/ssl/influxdata-selfsigned.crt" +KAPACITOR_HTTP_HTTPS_PRIVATE_KEY="/etc/ssl/influxdata-selfsigned.key" +``` + +However, they _cannot_ be set over the HTTP API. + +Please remember, that when Kapacitor is running on HTTPS, this needs to be +reflected in the `subscription-protocol` property for the `[[influxdb]]` group +of the Kapacitor configuration. See [Example 1](#example-1) above. The value of +this property needs to be set to `https`. Failure to do so will result in +a `TLS handshake error` with the message ` oversized record received with +length 21536` in the Kapacitor log as shown here: + +``` +ts=2018-02-19T13:23:49.684+01:00 lvl=error msg="2018/02/19 13:23:49 http: TLS handshake error from 127.0.0.1:49946: tls: oversized record received with length 21536\n" service=http service=httpd_server_errors +``` + +If for any reason TLS is switched off, this property needs to be reset to `http`. +Failure to do so will result in the inability of InfluxDB to push subscribed +data to Kapacitor with a message in the InfluxDB log like the following: + +``` +mar 05 17:02:40 algonquin influxd[32520]: [I] 2018-03-05T16:02:40Z Post https://localhost:9092/write?consistency=&db=telegraf&precision=ns&rp=autogen: http: server gave HTTP response to HTTPS client service=subscriber +``` + +#### Kapacitor command-line client with HTTPS + +Once HTTPS has been enabled the Kapacitor command line client will need to be +supplied the `-url` argument in order to connect. If a self-signed or other +certificate is used, which has not been added to the system certificate store, +an addition argument `-skipVerify` will also need to be provided. + +``` +$ kapacitor -url https://localhost:9092 -skipVerify list tasks +ID Type Status Executing Databases and Retention Policies +chronograf-v1-3586109e-8b7d-437a-80eb-a9c50d00ad53 stream enabled true ["telegraf"."autogen"] +``` + +### Kapacitor Authentication and Authorization + +The following applies to the open-source distribution of Kapacitor. While it is +possible to add parameters such as `username`, `password` and `auth-enabled` to +the section `[http]` of the configuration file, `kapacitor.conf`, and while the +Kapacitor server will then expect a username and password to be supplied when +connecting, the authorization and authentication handler in the open-source +distribution does not enforce checks against a user-store, nor does it verify +access permissions to resources using an Access Control List (ACL). + +A true authentication and authorization handler is available only in the +Enterprise Kapacitor distribution. + +### Note on HTTP API Configuration and Restarting Kapacitor + +Please be aware that when configuration values are set using the HTTP API, that +these values will persist in the Kapacitor database even after restart. To +switch off these overrides on restart set the property `skip-config-overrides` +to `true` either in the configuration file (`kapacitor.conf`) or as an +environment variable (`KAPACITOR_SKIP_CONFIG_OVERRIDES`). + +When troubleshooting connection issues after restart, check the HTTP API, for example +at http://localhost:9092/kapacitor/v1/config. +This can be especially useful if Kapacitor to InfluxDB communications do not +seem to be respecting values seen in the file `kapacitor.conf` or in environment +variables. + +## Secure Kapacitor and Chronograf + +With Kapacitor configured with HTTPS/TLS enabled many users will want to add +Kapacitor to their connection configuration in Chronograf. The primary +requirement for this to work is to have the base signing certificate installed +on the host where the Chronograf service is running. With most operating systems +this should already be the case. + +When working with a **self-signed** certificate, this means installing the +self-signed certificate into the system. + +### Install a Self-Signed Certificate on Debian + +As an example of installing a self-signed certificate to the system, in +Debian/Ubuntu any certificate can be copied to the directory +`/usr/local/share/ca-certificates/` and then the certificate store can be rebuilt. + +``` +$ sudo cp /etc/ssl/influxdb-selfsigned.crt /usr/local/share/ca-certificates/ +$ sudo update-ca-certificates +Updating certificates in /etc/ssl/certs... +1 added, 0 removed; done. +Running hooks in /etc/ca-certificates/update.d... + +Replacing debian:influxdb-selfsigned.pem +done. +done. +``` + +If a self-signed or other certificate has been added to the system the +Chronograf service needs to be restarted to gather the new certificate +information. + +``` +$ sudo systemctl restart chronograf.service +``` + +### Adding a Kapacitor Connection in Chronograf + +The following instructions apply to the Chronograf UI. If Chronograf has been +installed it can be found by default at port 8888 (e.g. http://localhost:8888). + +1) In the left side navigation bar open the **Configuration** page. +This will show all available InfluxDB connections. In the row containing the +InfluxDB connection for which a Kapacitor connection is to be added, click the +link **Add Kapacitor Connection**. This will load the Add a New Kapacitor +Connection page. + +**Image 1 – Adding a Kapacitor Connection** + +add kapacitor 01 + +2) In the **Connection Details** group fill in such details as a name for the +connection and click the **Connect** button. + +**Image 2 – Kapacitor Connection Details** + +add kapacitor 02 + +3) If the certificate is installed on the system a success notification will +appear. + +**Image 3 – Kapacitor Connection Success** + +add kapacitor 03 + +If an error notification is returned check the Chronograf log for proxy errors. +For example: + +``` +mar 06 13:53:07 lenovo-tp02 chronograf[12079]: 2018/03/06 13:53:07 http: proxy error: x509: certificate is valid for locahlost, not localhost +``` + +4) Also tabbed forms for editing and adding Kapacitor Handler Endpoints will +appear. In wider screens they will be to the right of the Connection Details +group. In narrower screens they will be below the Connection Details group. + +**Image 4 – Configure Kapacitor Handler Endpoints** + +add kapacitor 04 + +At this point Kapacitor can be used to generate alerts and TICKscripts through +Chronograf. These features are available through the **Alerting** item in the +left navigation bar. diff --git a/content/kapacitor/v1.5/administration/subscription-management.md b/content/kapacitor/v1.5/administration/subscription-management.md new file mode 100644 index 000000000..ae6b0eaf5 --- /dev/null +++ b/content/kapacitor/v1.5/administration/subscription-management.md @@ -0,0 +1,147 @@ +--- +title: Manage Kapacitor subscriptions +description: Kapacitor subscribes to InfluxDB and receives all data as it is written to InfluxDB. This article walks through how Kapacitor subscriptions work, how to configure them, and how to manage them. +menu: + kapacitor_1_5: + name: Manage subscriptions + parent: Administration + weight: 100 +--- + +Kapacitor is tightly integrated with InfluxDB through the use of [InfluxDB subscriptions](/influxdb/latest/administration/subscription-management/), +local or remote endpoints to which all data written to InfluxDB is copied. +Kapacitor subscribes to InfluxDB allowing it to capture, manipulate, and act on your data. + +## How Kapacitor subscriptions work +Kapacitor allows you to manipulate and act on data as it is written into InfluxDB. +Rather than querying InfluxDB for data *(except when using the [BatchNode](/kapacitor/v1.5/nodes/batch_node/))*, +all data is copied to your Kapacitor server or cluster through an InfluxDB subscription. +This reduces the query load on InfluxDB and isolates overhead associated with data +manipulation to your Kapacitor server or cluster. + +On startup, Kapacitor will check for a subscription in InfluxDB with a name matching the Kapacitor server or cluster ID. +This ID is stored inside of `/var/lib/kapacitor/`. +If the ID file doesn't exist on startup, Kapacitor will create one. +If a subscription matching the Kapacitor ID doesn't exist in InfluxDB, Kapacitor +will create a new subscription in InfluxDB. +This process ensures that when Kapacitor stops, it will reconnect to the same subscription +on restart as long as the contents of `/var/lib/kapacitor/` remain intact. + +_The directory in which Kapacitor stores its ID can be configured with the +[`data-dir` root configuration option](/kapacitor/v1.5/administration/configuration/#organization) +in the `kapacitor.conf`._ + +> #### Kapacitor IDs in containerized or ephemeral filesystems +> In containerized environments, filesystems are considered ephemeral and typically +> do not persist between container stops and restarts. +> If `/var/lib/kapacitor/` is not persisted, Kapacitor will create a new InfluxDB subscription +> on startup, resulting in unnecessary "duplicate" subscriptions. +> You will then need to manually [drop the unnecessary subscriptions](/influxdb/latest/administration/subscription-management/#remove-subscriptions). +> +> To avoid this, InfluxData recommends that you persist the `/var/lib/kapacitor` directory. +> Many persistence strategies are available and which to use depends on your +> specific architecture and containerization technology. + + +## Configure Kapacitor subscriptions +Kapacitor subscription configuration options are available under the `[[influxdb]]` section in the [`kapacitor.conf`](/kapacitor/v1.5/administration/configuration/). +Below is an example of subscription-specific configuration options followed by a description of each. + +_**Example Kapacitor subscription configuration**_ +```toml +[[influxdb]] + + # ... + + disable-subscriptions = false + subscription-mode = "server" + subscription-protocol = "http" + subscriptions-sync-interval = "1m0s" + + # ... + + [influxdb.subscriptions] + my_database1 = [ "default", "longterm" ] + [influxdb.excluded-subscriptions] + my_database2 = [ "default", "shortterm" ] +``` + +### `disable-subscriptions` +Set to `true` to disable all subscriptions. + +### `subscription-mode` +Defines the subscription mode of Kapacitor. +Available options: + +- `"server"` +- `"cluster"` _(See warning below)_ + +{{% warn %}} +The default setting for `subscription-mode` is `cluster`, however this should +not be used with [Kapacitor Enterprise](/enterprise_kapacitor/). +Multi-node Kapacitor Enterprise clusters should only use the `server` subscription-mode, +otherwise subscription data will not be received. +{{% /warn %}} + +### `subscription-protocol` +Defines which protocol to use for subscriptions. +Available options: + +- `"udp"` +- `"http"` +- `"https"` + +### `[influxdb.subscriptions]` +Defines a set of databases and retention policies to subscribe to. +If empty, Kapacitor will subscribe to all databases and retention policies except for those listed in +[`[influxdb.excluded-subscriptions]`](#influxdb-excluded-subscriptions). + +```toml +[influxdb.subscriptions] + # Pattern: + db_name = + + # Example: + my_database = [ "default", "longterm" ] +``` + +### `[influxdb.excluded-subscriptions]` +Defines a set of databases and retention policies to exclude from subscriptions. + +```toml +[influxdb.excluded-subscriptions] + # Pattern: + db_name = + + # Example: + my_database = [ "default", "longterm" ] +``` + +> Only one of `[influxdb.subscriptions]` or `[influxdb.excluded-subscriptions]` +> need be defined. They essentially fulfill the same purpose in different ways, +> but specific use cases do lend themselves to one or the other. + +## Troubleshooting + +### View the Kapacitor server or cluster ID +There are two ways to view your Kapacitor server or cluster ID: + +1. View the contents of `/var/lib/kapacitor/server.id` or `/var/lib/kapacitor/cluster.id`. + + _The location of ID files depends on your operating system and the + [`data-dir`](/kapacitor/v1.5/administration/configuration/#organization) + setting in your `kapacitor.conf`._ + +2. Run the following command: + + ```bash + kapacitor stats general + ``` + + The server and cluster IDs are included in the output. + +### Duplicate Kapacitor subscriptions +Duplicate Kapacitor subscriptions are often caused by the contents of `/var/lib/kapacitor` +not persisting between restarts as described [above](#kapacitor-ids-in-containerized-or-ephemeral-filesystems). +The solution is to ensure the contents of this director are persisted. +Any duplicate Kapacitor subscriptions already created will need to be [manually removed](/influxdb/latest/administration/subscription-management/#remove-subscriptions). diff --git a/content/kapacitor/v1.5/administration/upgrading.md b/content/kapacitor/v1.5/administration/upgrading.md new file mode 100644 index 000000000..8ba97eb08 --- /dev/null +++ b/content/kapacitor/v1.5/administration/upgrading.md @@ -0,0 +1,473 @@ +--- +title: Upgrading to Kapacitor v1.5 +aliases: + - kapacitor/v1.5/introduction/upgrading/ +menu: + kapacitor_1_5: + weight: 30 + parent: Administration +--- + +## Contents +1. [Overview](#overview) +2. [Stopping the Kapacitor service](#stopping-the-kapacitor-service) +3. [Backup configuration and data](#backup-configuration-and-data) +4. [Debian package upgrade](#debian-package-upgrade) +5. [RPM package upgrade](#rpm-package-upgrade) +5. [Upgrade with .zip or .tar.gz](#upgrade-with-zip-or-tar-gz) +6. [Verifying the restart](#verifying-the-restart) + +## Overview + +How Kapacitor was installed will determine how Kapacitor should be upgraded. + +The application may have been installed directly using the package management mechanisms of the OS or it may have been installed by unpackging the `.zip` or `.tar.gz` distributions. This document will cover upgrading Kapacitor from release 1.3.1 to release 1.5 on Linux(Ubuntu 16.04 and CentOS 7.3). This document presents some specifics of upgrading using the `.deb` package; some similar specifics of upgrading using the `.rpm` package; and then more generally upgrading using the `.tar.gz` binary distribution. The binary package upgrade should serve as an example offering hints as to how to upgrade using the binary distributions on other operating systems, for example on Windows using the `.zip` file. On other operating systems the general steps presented here will be roughly the same. + +Before proceeding with the Kapacitor upgrade please ensure that InfluxDB and Telegraf (if used) have been upgraded to a release compatible with the latest release of Kapacitor. In this example we will use: + + * InfluxDB 1.5.2 + * Telegraf 1.6 + * Kapacitor 1.5 + +For instructions on upgrading InfluxDB, please see the [InfluxDB upgrade](/influxdb/latest/administration/upgrading/) documentation. For instructions on upgrading Telegraf, please see the [Telegraf upgrade](/telegraf/latest/administration/upgrading/#main-nav) documentation. + +For information about what is new in the latest Kapacitor release, view the [Changelog](/kapacitor/v1.5/about_the_project/releasenotes-changelog/). + +In general the steps for upgrading Kapacitor are as follows: + + 1. Download a copy of the latest Kapacitor install package or binary distribution from the [Influxdata download site](https://portal.influxdata.com/downloads). + + **Important note** - When upgrading Kapacitor, simply download the package using `wget`. Do not proceed directly with the installation/upgrade until the following instructions and recommendations have been understood and put to use. + + 1. Stop the running Kapacitor service. + 1. Backup the configuration file (e.g. `/etc/kapacitor/kapacitor.conf` - n.b. the default location). + 1. (Optional) Back up a copy of the contents of the Kapacitor data directory (e.g `/var/lib/kapacitor/*` - n.b. the default location). + 1. Perform the upgrade. + 1. If during the upgrade the current configuration was not preserved, manually migrate the values in the backup configuration file to the new one. + 1. Restart the Kapacitor service. + 1. Verify the restart in the log files and by testing existing tasks. + +## Stopping the Kapacitor service + +No matter how Kapacitor was installed, it is assumed that Kapacitor is configured to run as a service using `systemd`. + +Through `systemctl` check to see if the Kapacitor service is running . + +```bash +$ sudo systemctl status kapacitor.service +● kapacitor.service - Time series data processing engine. + Loaded: loaded (/lib/systemd/system/kapacitor.service; enabled; vendor preset: enabled) + Active: inactive (dead) since Po 2017-08-21 14:06:18 CEST; 2s ago + Docs: https://github.com/influxdb/kapacitor + Process: 27741 ExecStart=/usr/bin/kapacitord -config /etc/kapacitor/kapacitor.conf $KAPACITOR_OPTS (code=exited, status=0/SUCCESS) +Main PID: 27741 (code=exited, status=0/SUCCESS) +``` + +The value for the `Active` field shown above should be set to 'inactive'. + +If instead this value happens to be `active(running)`, the service can be stopped using `systemctl`. + +*Example - Stopping the service* +```bash +sudo systemctl stop kapacitor.service +``` + +## Backup configuration and data + +Whenever upgrading, no matter the upgrade approach, it can pay to be a bit paranoid and to backup essential files and data. The Kapacitor configuration file, located at `/etc/kapacitor/kapacitor.conf` by default, is most important when upgrading Kapacitor. In addition, you may want to backup your Kapacitor database, replays, and id files in `/var/lib/kapacitor`. + +## Debian package upgrade + +Check to see if Kapacitor was installed as a Debian package. + +```bash +$ dpkg --list | grep "kapacitor" +ii kapacitor 1.3.1-1 amd64 Time series data processing engine +``` + +If the line `ii kapacitor...` is returned, it is safe to continue the upgrade using the Debian package and the instructions in this section. If nothing is returned, please consult the [Upgrade with .zip or .tar.gz section below](#upgrade-with-zip-or-tar-gz) for a general example on how to proceed. + +### Package upgrade + +Kapacitor can now be upgraded using the Debian package manager: + +*Example - upgrade with dpkg* + +``` +$ sudo dpkg -i kapacitor_1.5.1_amd64.deb +(Reading database ... 283418 files and directories currently installed.) +Preparing to unpack kapacitor_1.5.1_amd64.deb ... +Unpacking kapacitor (1.5.1-1) over (1.3.1-1) ... +Removed symlink /etc/systemd/system/kapacitor.service. +Removed symlink /etc/systemd/system/multi-user.target.wants/kapacitor.service. +Setting up kapacitor (1.5.1-1) ... +``` + +During the upgrade the package manager will detect any differences between the current configuration file and the new configuration file included in the installation package. The package manager prompts the user to choose how to deal with this conflict. The default behavior is to preserve the existing configuration file. This is generally the safest choice, but it can mean losing visibility of new features provided in the more recent release. + +*Example - Prompt on configuration file conflict* +``` +Configuration file '/etc/kapacitor/kapacitor.conf' + ==> Modified (by you or by a script) since installation. + ==> Package distributor has shipped an updated version. + What would you like to do about it ? Your options are: + Y or I : install the package maintainer's version + N or O : keep your currently-installed version + D : show the differences between the versions + Z : start a shell to examine the situation + The default action is to keep your current version. +*** kapacitor.conf (Y/I/N/O/D/Z) [default=N] ? +``` + +### Migrate configuration file values + +If during the upgrade the configuration file was overwritten, open the new configuration file in an editor such as `nano` or `vim` and from the backup copy of the old configuration file update the values of all changed keys - for example the InfluxDB fields for `username`, `password`, `urls` and the paths to `ssl-cert` and `ssl-key`. Depending on the installation, there will most likely be more than just these. + +### Restart Kapacitor + +Restart is best handled through `systemctl`. + +```bash +sudo systemctl restart kapacitor.service +``` + +Note that `restart` is used here instead of `start`, in the event that Kapacitor was not shutdown properly. + +For tips on verifying the restart, see the [Verifying the Restart](#verifying-the-restart) section below. + +## RPM package upgrade + +Check to see if Kapacitor was installed as an RPM package. + +*Example - checking for Kapacitor installation* +``` +# yum list installed kapacitor +Loaded plugins: fastestmirror +Loading mirror speeds from cached hostfile + * base: ftp.sh.cvut.cz + * extras: ftp.fi.muni.cz + * updates: ftp.sh.cvut.cz +Installed Packages +kapacitor.x86_64 1.5.1-1 installed +``` +If the line `kapacitor.x86_64...1.5.1-1...installed` is returned, it is safe to continue the upgrade using the RPM package and the instructions in this section. If instead the message `Error: No matching Packages to list` was returned please consult the [Upgrade with .zip or .tar.gz section below](#upgrade-with-zip-or-tar-gz) for a general example on how to proceed. + +### Package upgrade + +Please note that the following example commands are run as user `root`. To use them directly please log in as the `root` user or append `sudo` to them. + +Kapacitor can now be upgraded using `yum localupdate` from the directory into which the installation packages were downloaded: + +*Example - yum localupdate* +``` +# yum -y localupdate kapacitor-1.5.1.x86_64.rpm +Loaded plugins: fastestmirror +Examining kapacitor-1.5.1.x86_64.rpm: kapacitor-1.3.1-1.x86_64 +Marking kapacitor-1.5.1.x86_64.rpm as an update to kapacitor-1.3.1-1.x86_64 +Resolving Dependencies +--> Running transaction check +---> Package kapacitor.x86_64 0:1.3.1-1 will be updated +---> Package kapacitor.x86_64 0:1.5.1-1 will be an update +--> Finished Dependency Resolution + +Dependencies Resolved + +============================================================================================================================================================= + Package Arch Version Repository Size +============================================================================================================================================================= +Updating: + kapacitor x86_64 1.5.1-1 /kapacitor-1.5.1.x86_64 90 M + +Transaction Summary +============================================================================================================================================================= +Upgrade 1 Package + +Total size: 90 M +Downloading packages: +Running transaction check +Running transaction test +Transaction test succeeded +Running transaction + Updating : kapacitor-1.5.1-1.x86_64 1/2 +warning: /etc/kapacitor/kapacitor.conf created as /etc/kapacitor/kapacitor.conf.rpmnew +Failed to execute operation: Too many levels of symbolic links +warning: %post(kapacitor-1.5.1-1.x86_64) scriptlet failed, exit status 1 +Non-fatal POSTIN scriptlet failure in rpm package kapacitor-1.5.1-1.x86_64 + Cleanup : kapacitor-1.3.1-1.x86_64 2/2 +Removed symlink /etc/systemd/system/multi-user.target.wants/kapacitor.service. +Removed symlink /etc/systemd/system/kapacitor.service. +Created symlink from /etc/systemd/system/kapacitor.service to /usr/lib/systemd/system/kapacitor.service. +Created symlink from /etc/systemd/system/multi-user.target.wants/kapacitor.service to /usr/lib/systemd/system/kapacitor.service. + Verifying : kapacitor-1.5.1-1.x86_64 1/2 + Verifying : kapacitor-1.3.1-1.x86_64 2/2 + +Updated: + kapacitor.x86_64 0:1.5.1-1 + +Complete! + +``` + +If after running `yum localupdate` the console messages are the same as above, it is safe to continue with managing the configuration files. + +### Migrate configuration file values + +In the example from the previous section a warning concerning the `kapacitor.conf` file may have been observed. The original configuration file has been preserved and the new configuration file has been created with the extension `.rpmnew`. To use the new configuration file rename the current configuration file `kapacitor.conf.121` and the new configuration file `kapacitor.conf`. Using `vim` or `nano` manually migrate the old values from `kapacitor.conf.121` or from a backup copy into the new copy of `kapacitor.conf`. + +### Restart Kapacitor + +Restart is best handled through `systemctl`. + +```bash +systemctl restart kapacitor.service +``` + +Note that `restart` is used here instead of `start`, in the event that Kapacitor was not shutdown properly. + +For tips on verifying the restart see the [Verifying the Restart](#verifying-the-restart) section below. + +## Upgrade with .zip or .tar.gz + +How Kapacitor has been installed using the binary distribution (.zip, .tgz) is open to a certain number of variables depending on the specific OS, organizational preferences and other factors. The package contents may have been simply unpacked in a `/home/` directory. They may have been copied into the system directories suggested by the package file structure. Or they may have been leveraged using another file system strategy. The following discussion presents one hypothetical installation. The steps are presentational and should, with a little bit of creative thinking, be adaptable to other types of installation. + +### A hypothetical installation +The following presentation will use a hypothetical installation, where all Influxdata products have been unpacked and are running from the directory `/opt/influxdata`. Please note that it is recommended that Influxdata products should be installed using the system specific install packages (e.g. `.deb`, `.rpm`) whenever possible, however on other systems, for which there is no current installation package, the binary distribution (`.zip`, `.tar.gz`) can be used. + +*Example - the Influxdata directory* +``` +$ ls -l /opt/influxdata/ +total 20 +lrwxrwxrwx 1 influxdb influxdb 33 srp 22 12:51 influxdb -> /opt/influxdata/influxdb-1.3.1-1/ +drwxr-xr-x 5 influxdb influxdb 4096 kvě 8 22:16 influxdb-1.3.1-1 +lrwxrwxrwx 1 kapacitor kapacitor 34 srp 22 12:52 kapacitor -> /opt/influxdata/kapacitor-1.5.1-1/ +drwxr-xr-x 6 kapacitor kapacitor 4096 srp 22 10:56 kapacitor-1.5.1-1 +drwxr-xr-x 2 influxdb influxdb 4096 srp 22 13:52 ssl +drwxrwxr-x 5 telegraf telegraf 4096 úno 1 2017 telegraf +``` +In the above example it can be seen that for the InfluxDB server and the Kapacitor application a generic directory has been created using a symbolic link to the directory for the specific product release. + +Elsewhere in the file system, configuration and lib directories have been pointed into these locations using additional symbolic links. + +*Example - symbolic links from /etc* +``` +... +$ ls -l `find /etc -maxdepth 1 -type l -print` +lrwxrwxrwx 1 root root 38 srp 22 12:56 /etc/influxdb -> /opt/influxdata/influxdb/etc/influxdb/ +lrwxrwxrwx 1 root root 40 srp 22 12:57 /etc/kapacitor -> /opt/influxdata/kapacitor/etc/kapacitor/ +lrwxrwxrwx 1 root root 38 srp 22 12:57 /etc/telegraf -> /opt/influxdata/telegraf/etc/telegraf/ +... +``` + +*Example - symbolic links from /usr/lib* +``` +$ ls -l `find /usr/lib -maxdepth 1 -type l -print` +lrwxrwxrwx 1 root root 42 srp 22 13:31 /usr/lib/influxdb -> /opt/influxdata/influxdb/usr/lib/influxdb/ +lrwxrwxrwx 1 root root 44 srp 22 13:33 /usr/lib/kapacitor -> /opt/influxdata/kapacitor/usr/lib/kapacitor/ +... +lrwxrwxrwx 1 root root 42 srp 22 13:32 /usr/lib/telegraf -> /opt/influxdata/telegraf/usr/lib/telegraf/ +``` + +*Example - symbolic links from /usr/bin* +``` + ls -l `find /usr/bin -maxdepth 1 -type l -print` + ... +lrwxrwxrwx 1 root root 39 srp 22 14:40 /usr/bin/influx -> /opt/influxdata/influxdb/usr/bin/influx +lrwxrwxrwx 1 root root 40 srp 22 14:40 /usr/bin/influxd -> /opt/influxdata/influxdb/usr/bin/influxd +... +lrwxrwxrwx 1 root root 43 srp 22 14:04 /usr/bin/kapacitor -> /opt/influxdata/kapacitor/usr/bin/kapacitor +lrwxrwxrwx 1 root root 44 srp 22 14:04 /usr/bin/kapacitord -> /opt/influxdata/kapacitor/usr/bin/kapacitord +... +lrwxrwxrwx 1 root root 41 srp 22 13:57 /usr/bin/telegraf -> /opt/influxdata/telegraf/usr/bin/telegraf +... +``` + +Data file directories have been setup by hand. + +*Example - /var/lib directory* +``` +$ ls -l /var/lib/ | sort -k3,3 +total 284 +... +drwxr-xr-x 5 influxdb influxdb 4096 srp 22 14:12 influxdb +drwxr-xr-x 3 kapacitor kapacitor 4096 srp 22 14:16 kapacitor +... + +``` + +InfluxDB is configured to use HTTPS and authentication. InfluxDB, Telegraf and Kapacitor have been configured to start and stop with Systemd. + +*Example - symbolic links in the systemd directory* +``` +$ ls -l `find /etc/systemd/system -maxdepth 1 -type l -print` +... +lrwxrwxrwx 1 root root 42 srp 22 13:39 /etc/systemd/system/influxdb.service -> /usr/lib/influxdb/scripts/influxdb.service +lrwxrwxrwx 1 root root 44 srp 22 13:40 /etc/systemd/system/kapacitor.service -> /usr/lib/kapacitor/scripts/kapacitor.service +lrwxrwxrwx 1 root root 42 srp 22 13:39 /etc/systemd/system/telegraf.service -> /usr/lib/telegraf/scripts/telegraf.service +... + +``` +### Manual upgrade + +Ensure that InfluxDB and Telegraf (if installed) have been upgraded, that the Kapacitor service has been stopped and that a backup copy of `kapacitor.conf` has been saved. + +Here the latest InfluxDB distribution has been unpacked alongside the previous distribution and the general symbolic link has been updated. The Telegraf distribution has been unpacked on top of the previous one. + +*Example - the Influxdata directory post InfluxDB and Telegraf upgrade* +``` +$ ls -l /opt/influxdata/ +total 24 +drwxr-xr-x 2 root root 4096 srp 22 15:21 bak +lrwxrwxrwx 1 root root 17 srp 22 15:15 influxdb -> influxdb-1.5.2-1/ +drwxr-xr-x 5 influxdb influxdb 4096 kvě 8 22:16 influxdb-1.2.4-1 +drwxr-xr-x 5 influxdb influxdb 4096 srp 5 01:33 influxdb-1.5.2-1 +lrwxrwxrwx 1 kapacitor kapacitor 34 srp 22 12:52 kapacitor -> /opt/influxdata/kapacitor-1.5.1-1/ +drwxr-xr-x 6 kapacitor kapacitor 4096 srp 22 10:56 kapacitor-1.5.1-1 +drwxr-xr-x 2 influxdb influxdb 4096 srp 22 13:52 ssl +drwxr-xr-x 5 telegraf telegraf 4096 čec 27 01:26 telegraf +``` +Kapacitor is upgraded using the same approach as the InfluxDB upgrade. The new distribution package is unpacked alongside of the current one. + +*Example - unpacking the latest Kapacitor distribution* +``` +$ cd /opt/influxdata +$ sudo tar -xvzf /home/karl/Downloads/install/kapacitor-1.3.1_linux_amd64.tar.gz +./kapacitor-1.5.1-1/ +./kapacitor-1.5.1-1/usr/ +./kapacitor-1.5.1-1/usr/bin/ +./kapacitor-1.5.1-1/usr/bin/kapacitord +./kapacitor-1.5.1-1/usr/bin/kapacitor +./kapacitor-1.5.1-1/usr/bin/tickfmt +./kapacitor-1.5.1-1/usr/lib/ +./kapacitor-1.5.1-1/usr/lib/kapacitor/ +./kapacitor-1.5.1-1/usr/lib/kapacitor/scripts/ +./kapacitor-1.5.1-1/usr/lib/kapacitor/scripts/init.sh +./kapacitor-1.5.1-1/usr/lib/kapacitor/scripts/kapacitor.service +./kapacitor-1.5.1-1/usr/share/ +./kapacitor-1.5.1-1/usr/share/bash-completion/ +./kapacitor-1.5.1-1/usr/share/bash-completion/completions/ +./kapacitor-1.5.1-1/usr/share/bash-completion/completions/kapacitor +./kapacitor-1.5.1-1/var/ +./kapacitor-1.5.1-1/var/log/ +./kapacitor-1.5.1-1/var/log/kapacitor/ +./kapacitor-1.5.1-1/var/lib/ +./kapacitor-1.5.1-1/var/lib/kapacitor/ +./kapacitor-1.5.1-1/etc/ +./kapacitor-1.5.1-1/etc/kapacitor/ +./kapacitor-1.5.1-1/etc/kapacitor/kapacitor.conf +./kapacitor-1.5.1-1/etc/logrotate.d/ +./kapacitor-1.5.1-1/etc/logrotate.d/kapacitor +``` +Following extraction the old symbolic link is removed and a new one is created to the new distribution. This approach is similar to simply unpacking or copying the distribution contents over the existing directories, which is also a feasible approach. Parallel unpacking and link creation offers the advantage of preserving the previous installation, albeit in a now inactive place. This approach facilitates reverting back to the previous installation, if for some reason that will be desired. + +*Example - Post extraction commands* +```bash +$ sudo chown -R kapacitor:kapacitor kapacitor-1.5.1-1/ +$ sudo rm kapacitor +$ sudo ln -s ./kapacitor-1.5.1-1/ ./kapacitor +$ sudo chown kapacitor:kapacitor kapacitor +$ ls -l +total 28 +drwxr-xr-x 2 root root 4096 srp 22 15:21 bak +lrwxrwxrwx 1 root root 17 srp 22 15:15 influxdb -> influxdb-1.5.2-1/ +drwxr-xr-x 5 influxdb influxdb 4096 kvě 8 22:16 influxdb-1.2.4-1 +drwxr-xr-x 5 influxdb influxdb 4096 srp 5 01:33 influxdb-1.5.2-1 +lrwxrwxrwx 1 kapacitor kapacitor 20 srp 22 15:35 kapacitor -> ./kapacitor-1.5.1-1/ +drwxr-xr-x 6 kapacitor kapacitor 4096 srp 22 10:56 kapacitor-1.5.1-1 +drwxr-xr-x 5 kapacitor kapacitor 4096 čen 2 20:22 kapacitor-1.5.1-1 +drwxr-xr-x 2 influxdb influxdb 4096 srp 22 13:52 ssl +drwxr-xr-x 5 telegraf telegraf 4096 čec 27 01:26 telegraf +``` +### Migrate configuration file values + +Using `vim` the values from the backup of the previous configuration file are manually migrated to the new one. + +```bash +$ sudo -u kapacitor vim kapacitor/etc/kapacitor/kapacitor.conf +``` + +### Restart Kapacitor + +Restart is handled through `systemctl`. + +```bash +sudo systemctl restart kapacitor.service +``` +Note that `restart` is used here instead of `start`, in the event that Kapacitor was not shutdown properly. + +## Verifying the restart + +First check the service status in `systemctl`. + +*Example - service status check* +```bash +$ sudo systemctl status kapacitor.service +● kapacitor.service - Time series data processing engine. + Loaded: loaded (/lib/systemd/system/kapacitor.service; enabled; vendor preset: enabled) + Active: active (running) since Po 2017-08-21 14:22:18 CEST; 16min ago + Docs: https://github.com/influxdb/kapacitor + Main PID: 29452 (kapacitord) + Tasks: 13 + Memory: 11.6M + CPU: 726ms + CGroup: /system.slice/kapacitor.service + └─29452 /usr/bin/kapacitord -config /etc/kapacitor/kapacitor.conf +``` +Check the log in `journalctl` + +*Example - journalctl check* +``` +srp 21 14:22:18 algonquin systemd[1]: Started Time series data processing engine.. +srp 21 14:22:18 algonquin kapacitord[29452]: '##:::'##::::'###::::'########:::::'###:::::'######::'####:'########::'#######::'########:: +srp 21 14:22:18 algonquin kapacitord[29452]: ##::'##::::'## ##::: ##.... ##:::'## ##:::'##... ##:. ##::... ##..::'##.... ##: ##.... ##: +srp 21 14:22:18 algonquin kapacitord[29452]: ##:'##::::'##:. ##:: ##:::: ##::'##:. ##:: ##:::..::: ##::::: ##:::: ##:::: ##: ##:::: ##: +srp 21 14:22:18 algonquin kapacitord[29452]: #####::::'##:::. ##: ########::'##:::. ##: ##:::::::: ##::::: ##:::: ##:::: ##: ########:: +srp 21 14:22:18 algonquin kapacitord[29452]: ##. ##::: #########: ##.....::: #########: ##:::::::: ##::::: ##:::: ##:::: ##: ##.. ##::: +srp 21 14:22:18 algonquin kapacitord[29452]: ##:. ##:: ##.... ##: ##:::::::: ##.... ##: ##::: ##:: ##::::: ##:::: ##:::: ##: ##::. ##:: +srp 21 14:22:18 algonquin kapacitord[29452]: ##::. ##: ##:::: ##: ##:::::::: ##:::: ##:. ######::'####:::: ##::::. #######:: ##:::. ##: +srp 21 14:22:18 algonquin kapacitord[29452]: ..::::..::..:::::..::..:::::::::..:::::..:::......:::....:::::..::::::.......:::..:::::..:: +srp 21 14:22:18 algonquin kapacitord[29452]: 2017/08/21 14:22:18 Using configuration at: /etc/kapacitor/kapacitor.conf +``` +Check as well the log in the directory `/var/log/kapacitor`. + +*Example - kapacitor.log check* +```bash +$ sudo tail -f /var/log/kapacitor/kapacitor.log +[httpd] 127.0.0.1 - - [21/Aug/2017:14:41:50 +0200] "POST /write?consistency=&db=_internal&precision=ns&rp=monitor HTTP/1.1" 204 0 "-" "InfluxDBClient" 1a122e03-866e-11e7-80f1-000000000000 375 +[httpd] 127.0.0.1 - - [21/Aug/2017:14:41:50 +0200] "POST /write?consistency=&db=telegraf&precision=ns&rp=autogen HTTP/1.1" 204 0 "-" "InfluxDBClient" 1a401bb1-866e-11e7-80f2-000000000000 303 +[httpd] 127.0.0.1 - - [21/Aug/2017:14:42:00 +0200] "POST /write?consistency=&db=_internal&precision=ns&rp=monitor HTTP/1.1" 204 0 "-" "InfluxDBClient" 200818be-866e-11e7-80f3-000000000000 398 +[httpd] 127.0.0.1 - - [21/Aug/2017:14:42:00 +0200] "POST /write?consistency=&db=telegraf&precision=ns&rp=autogen HTTP/1.1" 204 0 "-" "InfluxDBClient" 20360382-866e-11e7-80f4-000000000000 304 +[httpd] 127.0.0.1 - - [21/Aug/2017:14:42:10 +0200] "POST /write?consistency=&db=_internal&precision=ns&rp=monitor HTTP/1.1" 204 0 "-" "InfluxDBClient" 25fded1a-866e-11e7-80f5-000000000000 550 +[httpd] 127.0.0.1 - - [21/Aug/2017:14:42:10 +0200] "POST /write?consistency=&db=telegraf&precision=ns&rp=autogen HTTP/1.1" 204 0 "-" "InfluxDBClient" 262be594-866e-11e7-80f6-000000000000 295 +[httpd] 127.0.0.1 - - [21/Aug/2017:14:42:20 +0200] "POST /write?consistency=&db=_internal&precision=ns&rp=monitor HTTP/1.1" 204 0 "-" "InfluxDBClient" 2bf3d170-866e-11e7-80f7-000000000000 473 +[httpd] 127.0.0.1 - - [21/Aug/2017:14:42:20 +0200] "POST /write?consistency=&db=telegraf&precision=ns&rp=autogen HTTP/1.1" 204 0 "-" "InfluxDBClient" 2c21ddde-866e-11e7-80f8-000000000000 615 +[httpd] 127.0.0.1 - - [21/Aug/2017:14:42:30 +0200] "POST /write?consistency=&db=_internal&precision=ns&rp=monitor HTTP/1.1" 204 0 "-" "InfluxDBClient" 31e9b251-866e-11e7-80f9-000000000000 424 +[httpd] 127.0.0.1 - - [21/Aug/2017:14:42:30 +0200] "POST /write?consistency=&db=telegraf&precision=ns&rp=autogen HTTP/1.1" 204 0 "-" "InfluxDBClient" 3217a267-866e-11e7-80fa-000000000000 288 + +``` + +Check for Kapacitor client activity in Influxdb. + +*Example - Influxdb check* +```bash +sudo journalctl --unit influxdb.service | grep "Kapacitor" +srp 21 14:45:18 algonquin influxd[27308]: [httpd] 127.0.0.1 - admin [21/Aug/2017:14:45:18 +0200] "GET /ping HTTP/1.1" 204 0 "-" "KapacitorInfluxDBClient" 965e7c0b-866e-11e7-81c7-000000000000 21 +srp 21 14:45:18 algonquin influxd[27308]: [httpd] 127.0.0.1 - admin [21/Aug/2017:14:45:18 +0200] "POST /query?db=&q=SHOW+DATABASES HTTP/1.1" 200 123 "-" "KapacitorInfluxDBClient" 965e89e5-866e-11e7-81c8-000000000000 570 +srp 21 14:45:18 algonquin influxd[27308]: [httpd] 127.0.0.1 - admin [21/Aug/2017:14:45:18 +0200] "POST /query?db=&q=SHOW+RETENTION+POLICIES+ON+_internal HTTP/1.1" 200 158 "-" "KapacitorInfluxDBClient" 965fcf0f-866e-11e7-81c9-000000000000 308 +srp 21 14:45:18 algonquin influxd[27308]: [httpd] 127.0.0.1 - admin [21/Aug/2017:14:45:18 +0200] "POST /query?db=&q=SHOW+RETENTION+POLICIES+ON+telegraf HTTP/1.1" 200 154 "-" "KapacitorInfluxDBClient" 96608b2b-866e-11e7-81ca-000000000000 1812 +srp 21 14:45:18 algonquin influxd[27308]: [httpd] 127.0.0.1 - admin [21/Aug/2017:14:45:18 +0200] "POST /query?db=&q=SHOW+SUBSCRIPTIONS HTTP/1.1" 200 228 "-" "KapacitorInfluxDBClient" 96618c32-866e-11e7-81cb-000000000000 380 + +``` + +Verify that old tasks are once again visible and enabled. + +*Example - tasks check* +```bash +$ kapacitor list tasks +ID Type Status Executing Databases and Retention Policies +cpu_alert_batch batch disabled false ["telegraf"."autogen"] +cpu_alert_stream stream enabled true ["telegraf"."autogen"] +``` + +Testing recording existing tasks and replaying the results is also recommended for checking the status of the newly upgraded Kapacitor service. Which tasks to record will depend on the specifics of the installation. Please see the [Kapacitor API documentation](/kapacitor/v1.5/working/api#recordings) for more details. + +If these checks look correct, then the upgrade can be considered complete. diff --git a/content/kapacitor/v1.5/event_handlers/_index.md b/content/kapacitor/v1.5/event_handlers/_index.md new file mode 100644 index 000000000..4861c11a8 --- /dev/null +++ b/content/kapacitor/v1.5/event_handlers/_index.md @@ -0,0 +1,133 @@ +--- +title: Kapacitor event handlers +description: Kapacitor event handlers provide ways to integrate Kapacitor alert messages with logging, specific URLs, and many third-party applications. +aliases: + - /kapacitor/v1.5/working/event-handler-setup/ +menu: + kapacitor_1_5_ref: + name: Event handlers + weight: 50 +--- + +Kapacitor can be integrated into a monitoring system by sending +[alert messages](/kapacitor/v1.5/nodes/alert_node/#message) to supported event +handlers. Currently, Kapacitor can send alert messages to specific log files and +specific URLs, as well as to many third party applications. + +These documents outline configuration options, setup instructions, +[handler file](#handler-file) and [TICKscript](/kapacitor/v1.5/tick/introduction/) +syntax for officially supported Kapacitor event handlers. + +[Aggregate](/kapacitor/v1.5/event_handlers/aggregate/) +[Alerta](/kapacitor/v1.5/event_handlers/alerta/) +[Discord](/kapacitor/v1.5/event_handlers/discord/) +[Email](/kapacitor/v1.5/event_handlers/email/) +[Exec](/kapacitor/v1.5/event_handlers/exec/) +[Hipchat](/kapacitor/v1.5/event_handlers/hipchat/) +[Kafka](/kapacitor/v1.5/event_handlers/kafka/) +[Log](/kapacitor/v1.5/event_handlers/log/) +[Microsoft Teams](/kapacitor/v1.5/event_handlers/microsoftteams/) +[MQTT](/kapacitor/v1.5/event_handlers/mqtt/) +[Opsgenie](/kapacitor/v1.5/event_handlers/opsgenie/) +[Pagerduty](/kapacitor/v1.5/event_handlers/pagerduty/) +[Post](/kapacitor/v1.5/event_handlers/post/) +[Publish](/kapacitor/v1.5/event_handlers/publish/) +[Pushover](/kapacitor/v1.5/event_handlers/pushover/) +[Sensu](/kapacitor/v1.5/event_handlers/sensu/) +[Slack](/kapacitor/v1.5/event_handlers/slack/) +[Snmptrap](/kapacitor/v1.5/event_handlers/snmptrap/) +[Talk](/kapacitor/v1.5/event_handlers/talk/) +[TCP](/kapacitor/v1.5/event_handlers/tcp/) +[Telegram](/kapacitor/v1.5/event_handlers/telegram/) +[Victorops](/kapacitor/v1.5/event_handlers/victorops/) + +> **Note:** Setup instructions are not currently available for all supported +> event handlers, but additional information will be added over time. If +> you are familiar with the setup process for a specific event handler, please +> feel free to [contribute](https://github.com/influxdata/docs.influxdata.com/blob/master/CONTRIBUTING.md). + +## Configure event handlers + +Required and default configuration options for most event handlers are +configured in your Kapacitor configuration file, `kapacitor.conf`. +_The default location for this is `/etc/kapacitor/kapacitor.conf`, but may be +different depending on your Kapacitor setup._ + +Many event handlers provide options that can be defined in a TICKscript or in a +handler file while some can only be configured in a handler file. +These configurable options are outlined in the documentation for each handler. + +## Add and use event handlers + +Enable the event handler in your `kapacitor.conf` if applicable. Once +enabled, do one of the following: + +- [Create a topic handler with a handler file](#create-a-topic-handler-with-a-handler-file), and then [add the handler](#add-the-handler). +- [Use a handler in a TICKscripts](#use-a-handler-in-a-tickscripts). + + > **Note:** Not all event handlers can be used in TICKscripts. + +### Create a topic handler with a handler file + +An event handler file is a simple YAML or JSON file that contains information +about the handler. +Although many handlers can be added in a TICKscript, managing multiple handlers in TICKscripts can be cumbersome. +Handler files let you add and use handlers outside of TICKscripts. +For some handler types, using handler files is the only option. + +The handler file contains the following: + +* Required + +- **ID**\*: The unique ID + of the handler. +- **Topic**\*: The topic + to which the handler subscribes. +- **Match**: A lambda expression to filter matching alerts. By default, all alerts + match. Learn more about [match expressions](/kapacitor/v1.5/working/alerts/#match-expressions). +- **Kind**\*: The kind of + handler. +- **Options**: Configurable options determined by the handler kind. If none are + provided, default values defined for the handler in the `kapacitor.conf` are used. + +```yaml +id: handler-id +topic: topic-name +match: changed() +kind: slack +options: + channel: '#oh-nos' +``` + +#### Add the handler + +Use the Kapacitor CLI to define a new handler with a handler file: + +```bash +# Pattern +kapacitor define-topic-handler + +# Example +kapacitor define-topic-handler slack_cpu_handler.yaml +``` + +### Use a handler in a TICKscript + +Many event handlers can be used directly in TICKscripts to send events. +This is generally done with handlers that send messages to third-parties. Below +is an example TICKscript that publishes CPU alerts to Slack using the `.slack()` +event handler: + +```js +stream + |from() + .measurement('cpu') + |alert() + .crit(lambda: "idle_usage" < 10) + .message('You better check your CPU usage.') + .slack() +``` + +> Events are sent to handlers if the alert is in a state other than ‘OK’ or the +alert just changed to the ‘OK’ state from a non ‘OK’ state (the alert +recovered). Use the [AlertNode.StateChangesOnly](/kapacitor/v1.5/nodes/alert_node/#statechangesonly) property to send events to handlers only if the alert state changes. diff --git a/content/kapacitor/v1.5/event_handlers/aggregate.md b/content/kapacitor/v1.5/event_handlers/aggregate.md new file mode 100644 index 000000000..e3839597f --- /dev/null +++ b/content/kapacitor/v1.5/event_handlers/aggregate.md @@ -0,0 +1,84 @@ +--- +title: Aggregate event handler +description: The aggregate event handler allows you to aggregate alerts messages over a specified interval. This page includes aggregate options and usage examples. +menu: + kapacitor_1_5_ref: + name: Aggregrate + weight: 100 + parent: Event handlers +--- + +The aggregate event handler aggregates multiple events into a single event. +It subscribes to a topic and aggregates published messages within a defined +interval into an aggregated topic. + +## Options +The following aggregate event handler options can be set in a +[handler file](/kapacitor/v1.5/event_handlers/#handler-file). + +| Name | Type | Description | +| ---- | ---- | ----------- | +| interval | duration | How often to aggregate events. Interval must be specified in nanoseconds. | +| topic | string | A topic into which to publish the aggregate events. | +| message | string | A template string where `{{.Interval}}` and `{{.Count}}` are available for constructing a meaningful message. | + +### Example: handler file +```yaml +id: handler-id +topic: topic-name +kind: aggregate +options: + interval: 300000000000 + topic: agg_5m + message: '{{.Count}} new events in the last {{.Interval}}' +``` + +## Using the aggregate event handler +The aggregate event handler subscribes to a topic and aggregates messages +published to that topic at specified intervals. +The TICKscript below, `cpu_alert.tick`, publishes alerts to the `cpu` topic if +CPU idle usage is less than 10% (or CPU usage is greater than 90%). + +#### cpu\_alert.tick +```js +stream + |from() + .measurement('cpu') + .groupBy(*) + |alert() + .crit(lambda: "usage_idle" < 10) + .topic('cpu') +``` + +Add and enable this TICKscript with the following: + +```bash +kapacitor define cpu_alert -tick cpu_alert.tick +kapacitor enable cpu_alert +``` + +Create a new handler file, `aggregated_cpu_alerts.yaml`, using the `aggregate` +event handler that subscribes to the `cpu` topic, aggregates alerts from the +last 10 minutes, and publishes aggregated messages to a new `aggr_cpu` topic. +_Handler files can be YAML or JSON._ + +#### aggr_cpu_alerts.yaml +```yaml +id: aggr_cpu_alerts_10m +topic: cpu +kind: aggregate +options: + interval: 600000000000 + topic: aggr_cpu + message: '{{.Count}} CPU alerts in the last {{.Interval}}' +``` + +Add the handler file: + +```bash +kapacitor define-topic-handler aggr_cpu_alerts_10m.yaml +``` + +Aggregated CPU alert messages will be published to the `aggr_cpu` topic every +10 minutes. Further handling of the aggregated events can be configured on the +`aggr_cpu` topic. diff --git a/content/kapacitor/v1.5/event_handlers/alerta.md b/content/kapacitor/v1.5/event_handlers/alerta.md new file mode 100644 index 000000000..5b0ca6d90 --- /dev/null +++ b/content/kapacitor/v1.5/event_handlers/alerta.md @@ -0,0 +1,197 @@ +--- +title: Alerta event handler +description: The Alerta event handler allows you to send Kapacitor alerts to Alerta. This page includes configuration options and usage examples. +menu: + kapacitor_1_5_ref: + name: Alerta + weight: 200 + parent: Event handlers +--- + +[Alerta](http://alerta.io/) is a monitoring tool used to consolidate and +deduplicate alerts from multiple sources for quick ‘at-a-glance’ visualization. +Kapacitor can be configured to send alert messages to Alerta. + +## Configuration +Configuration as well as default [option](#options) values for the Alerta event +handler are set in your `kapacitor.conf`. +Below is an example configuration: + +```toml +[alerta] + enabled = true + url = "http://127.0.0.1" + token = "mysupersecretauthtoken" + environment = "production" + origin = "kapacitor" +``` + +#### `enabled` +Set to `true` to enable the Alerta event handler. + +#### `url` +The Alerta URL. + +#### `token` +Default Alerta authentication token. + +#### `token-prefix` +Default token prefix. +_If you receive invalid token errors, you may need to change this to "Key"._ + +#### `environment` +Default Alerta environment. + +#### `origin` +Default origin of alert. + +## Options +The following Alerta event handler options can be set in a +[handler file](/kapacitor/v1.5/event_handlers/#handler-file) or when using +`.alerta()` in a TICKscript. + +* Required + +| Name | Type | Description | +| ---- | ---- | ----------- | +| token | string | Alerta authentication token. If empty uses the token from the configuration. | +| token-prefix | string | Alerta authentication token prefix. If empty, uses "Bearer". | +| resource\* | string | Alerta resource. Can be a template and has access to the same data as the AlertNode.Details property. Default: {{ .Name }} | +| event\* | string | Alerta event. Can be a template and has access to the same data as the idInfo property. Default: {{ .ID }}. | +| environment | string | Alerta environment. Can be a template and has access to the same data as the AlertNode.Details property. Default is set from the configuration. | +| group | string | Alerta group. Can be a template and has access to the same data as the AlertNode.Details property. Default: {{ .Group }}. | +| value | string | Alerta value. Can be a template and has access to the same data as the AlertNode.Details property. Default is an empty string. | +| origin | string | Alerta origin. If empty uses the origin from the configuration. | +| service | list of strings | List of effected Services. | +| timeout | duration string | Alerta timeout. Default is 24 hours. | + +> **Note:** The `resource` and `event` properties are required. +> Alerta cannot be configured globally because of these required properties. + +### Example: handler file +```yaml +topic: topic-name +id: handler-id +kind: alerta +options: + token: 'mysupersecretauthtoken' + token-prefix: 'Bearer' + resource: '{{ .Name }}' + event: '{{ .ID }}' + environment: 'Production' + group: '{{ .Group }}' + value: 'some-value' + origin: 'kapacitor' + service: ['service1', 'service2'] + timeout: 24h +``` + +### Example: TICKscript +```js +|alert() + // ... + .stateChangesOnly() + .alerta() + .token('mysupersecretauthtoken') + .tokenPrefix('Bearer') + .resource('{{ .Name }}') + .event('{{ .ID }}') + .environment('Production') + .group('{{ .Group }}') + .value('some-value') + .origin('kapacitor') + .service('service1', 'service2') + .timeout(24h) +``` + +## Using the Alerta event handler +With the Alerta event handler enabled and configured in your `kapacitor.conf`, +use the `.alerta()` attribute in your TICKscripts to send alerts to Alerta or +define an Alerta handler that subscribes to a topic and sends published alerts +to Alerta. + +> To avoid posting a message every alert interval, use +> [AlertNode.StateChangesOnly](/kapacitor/v1.5/nodes/alert_node/#statechangesonly) +> so only events where the alert changed state are sent to Alerta. + +The examples below use the following Alerta configuration defined in the `kapacitor.conf`: + +_**Alerta settings in kapacitor.conf**_ +```toml +[alerta] + enabled = true + url = "http://127.0.0.1" + token = "mysupersecretauthtoken" + environment = "production" + origin = "kapacitor" +``` + +### Send alerts to an Alerta room from a TICKscript + +The following TICKscript sends the message, "Hey, check your CPU", to Alerta +whenever idle CPU usage drops below 10% using the `.alerta()` event handler and +default Alerta settings defined in the `kapacitor.conf`. + +_**alerta-cpu-alert.tick**_ +```js +stream + |from() + .measurement('cpu') + |alert() + .crit(lambda: "usage_idle" < 10) + .stateChangesOnly() + .message('Hey, check your CPU') + .alerta() + .resource('{{ .Name }}') + .event('{{ .ID }}') +``` + +### Send alerts to an Alerta room from a defined handler + +The following setup sends an alert to the `cpu` topic with the message, "Hey, +check your CPU". An Alerta handler is added that subscribes to the `cpu` topic +and publishes all alert messages to Alerta using default settings defined in the +`kapacitor.conf`. + +Create a TICKscript that publishes alert messages to a topic. +The TICKscript below sends an alert message to the `cpu` topic any time idle CPU +usage drops below 10%. + +_**cpu\_alert.tick**_ +```js +stream + |from() + .measurement('cpu') + |alert() + .crit(lambda: "usage_idle" < 10) + .stateChangesOnly() + .message('Hey, check your CPU') + .topic('cpu') +``` + +Add and enable the TICKscript: + +```bash +kapacitor define cpu_alert -tick cpu_alert.tick +kapacitor enable cpu_alert +``` + +Create a handler file that subscribes to the `cpu` topic and uses the Alerta +event handler to send alerts to the `alerts` channel in Alerta. + +_**alerta\_cpu\_handler.yaml**_ +```yaml +id: alerta-cpu-alert +topic: cpu +kind: alerta +options: + resource: '{{ .Name }}' + event: '{{ .ID }}' + origin: 'kapacitor' +``` + +Add the handler: + +```bash +kapacitor define-topic-handler alerta_cpu_handler.yaml +``` diff --git a/content/kapacitor/v1.5/event_handlers/discord.md b/content/kapacitor/v1.5/event_handlers/discord.md new file mode 100644 index 000000000..84e8d8a7f --- /dev/null +++ b/content/kapacitor/v1.5/event_handlers/discord.md @@ -0,0 +1,308 @@ +--- +title: Discord event handler +description: The Discord event handler lets you send Kapacitor alerts to Discord. This page includes configuration options and usage examples. +menu: + kapacitor_1_5_ref: + name: Discord + weight: 250 + parent: Event handlers +--- + +[Discord](https://discordapp.com) is a popular chat service targeted primarily at gamers and by teams outside of gaming looking for a free solution. +To configure Kapacitor to send alert messages to Discord, set the applicable configuration options. + +## Configuration +Configuration as well as default [option](#options) values for the Discord event +handler are set in your `kapacitor.conf`. +Below is an example configuration: + +```toml +[[discord]] + enabled = false + default = true + url = "https://discordapp.com/api/webhooks/xxxxxxxxxxxxxxxxxx/xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + workspace = "guild-channel" + timestamp = true + username = "Kapacitor" + avatar-url = "https://influxdata.github.io/branding/img/downloads/influxdata-logo--symbol--pool-alpha.png" + embed-title = "Kapacitor Alert" + global = false + state-changes-only = false + ssl-ca = "/path/to/ca.crt" + ssl-cert = "/path/to/cert.crt" + ssl-key = "/path/to/private-key.key" + insecure-skip-verify = false +``` + +> Multiple Discord clients may be configured by repeating `[[discord]]` sections. +The `workspace` acts as a unique identifier for each configured Discord client. + +#### `enabled` +Set to `true` to enable the Discord event handler. + +#### `default` +If multiple Discord client configurations are specified, identify one configuration as the default. + +#### `workspace` +The Discord workspace ID. +Set this string to identify this particular Discord configuration. +For example, the name of the Discord channel and the guild it's a part +of, such as `-`. + +#### `timestamp` +Boolean signifying whether the timestamp should be shown in the embed. + +#### `url` +The Discord webhook URL. This can be obtained by adding a webhook in the channel settings - see [Intro to Webhooks](https://support.discordapp.com/hc/en-us/articles/228383668) for a full guide. +Discord will provide you with the webhook URL. + +#### `username` +Set the Discord bot username to override the username set when generating the webhook. + +#### `avatar-url` +Set a URL to a specified avatar to override the avatar set when generating the webhook. + +#### `embed-title` +Set the title to display in the alert embed. If blank, no title will is set. + +#### `global` +Set to `true` to send all alerts to Discord without explicitly specifying Discord in the TICKscript. + +#### `state-changes-only` +Sets all alerts in state-changes-only mode, meaning alerts will only be sent if +the alert state changes. +_Only applies if `global` is `true`._ + +#### `ssl-ca` +Set path to certificate authority file. + +#### `ssl-cert` +Set path to host certificate file. + +#### `ssl-key` +Set path to certificate private key file. + +#### `insecure-skip-verify` +Set to `true` to use SSL but skip chain and host verification. +_This is necessary if using a self-signed certificate._ + +## Options +Set the following Discord event handler options in a +[handler file](/kapacitor/v1.5/event_handlers/#handler-file) or when using +`.discord()` in a TICKscript. + +| Name | Type | Description | +| ---- | ---- | ----------- | +| workspace | string | Specifies which Discord configuration to use when there are multiple. | +| timestamp | bool | Specifies whether to show the timestamp in the embed footer. If blank uses the choice from the configuration. | +| username | string | Username of the Discord bot. If empty uses the username from the configuration. | +| avatar-url | string | URL of image to use as the webhook's avatar. If empty uses the url from the configuration. | +| embed-title | string | Title of alert embed posted to the webhook. If empty uses the title set in the configuration. | + +### Example: handler file +```yaml +id: handler-id +topic: topic-name +kind: discord +options: + workspace: 'guild-channel' + username: 'Kapacitor' + avatar-url: 'https://influxdata.github.io/branding/img/downloads/influxdata-logo--symbol--pool-alpha.png' + timestamp: true + embed-title: 'Kapacitor Alert' +``` + +### Example: TICKscript +```js +|alert() + // ... + .discord() + .workspace('guild-channel') + .username('Kapacitor') + .avatarUrl('https://influxdata.github.io/branding/img/downloads/influxdata-logo--symbol--pool-alpha.png') + .timestamp(true) + .embedTitle('Kapacitor Alert') +``` + +## Set up Guild +To allow Kapacitor to send alerts to Discord, obtain a webhook url from Discord - see [Intro to Webhooks](https://support.discordapp.com/hc/en-us/articles/228383668) +Then, add the generated webhook URL as the `url` in the `[[discord]]` configuration section of +your `kapacitor.conf`. + +## Using the Discord event handler +With one or more Discord event handlers enabled and configured in your +`kapacitor.conf`, use the `.discord()` attribute in your TICKscripts to send +alerts to Discord or define a Discord handler that subscribes to a topic and sends +published alerts to Discord. + +> To avoid posting a message every alert interval, use +> [AlertNode.StateChangesOnly](/kapacitor/v1.5/nodes/alert_node/#statechangesonly) +> so only events where the alert changed state are sent to Discord. + +See examples below for sample Discord configurations defined the `kapacitor.conf`: + +_**Discord settings in kapacitor.conf**_ +```toml +[[discord]] + enabled = true + default = true + url = "https://discordapp.com/api/webhooks/xxxxxxxxxxxxxxxxxx/xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + workspace = "guild-alerts" + timestamp = true + username = "AlertBot" + avatar-url = "https://influxdata.github.io/branding/img/downloads/influxdata-logo--symbol--pool-alpha.png" + embed-title = "Alert" + global = false + state-changes-only = false + +[[discord]] + enabled = true + default = false + url = "https://discordapp.com/api/webhooks/xxxxxxxxxxxxxxxxxx/xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + workspace = "guild-errors" + timestamp = true + username = "StatsBot" + avatar-url = "https://influxdata.github.io/branding/img/downloads/influxdata-logo--symbol--pool-alpha.png" + embed-title = "Errors" + global = false + state-changes-only = false +``` + +### Send alerts to Discord from a TICKscript +Use the `.discord()` event handler in your TICKscript to send an alert. +For example, this configuration will send an alert with the message +"Hey, check your CPU", to the Discord channel whenever idle CPU usage +drops below 20%. + +_**discord-cpu-alert.tick**_ +```js +stream + |from() + .measurement('cpu') + |alert() + .warn(lambda: "usage_idle" < 20) + .stateChangesOnly() + .message('Hey, check your CPU') + .discord() + .embedTitle('Uh Oh!') +``` + +### Send alerts to Discord from a defined handler + +Add a Discord handler that subscribes to the `cpu` by creating a TICKscript that publishes alert messages to a topic. +For example, this configuration will send an alert with the message "Hey, check your CPU". +A Discord handler is added that subscribes to the `cpu` topic and publishes all +alert messages to Discord. + +Create a TICKscript that publishes alert messages to a topic. +The TICKscript below sends an critical alert message to the `cpu` topic any time +idle CPU usage drops below 5%. + +_**cpu\_alert.tick**_ +```js +stream + |from() + .measurement('cpu') + |alert() + .crit(lambda: "usage_idle" < 5) + .stateChangesOnly() + .message('Hey, check your CPU') + .topic('cpu') +``` + +Add and enable the TICKscript: + +```bash +kapacitor define cpu_alert -tick cpu_alert.tick +kapacitor enable cpu_alert +``` + +Create a handler file that subscribes to the `cpu` topic and uses the Discord +event handler to send alerts to Discord. This handler is using the non-default Discord +handler, "critical-alerts", which sends messages to the #critical-alerts channel +in Discord. + +_**discord\_cpu\_handler.yaml**_ +```yaml +id: discord-cpu-alert +topic: cpu +kind: discord +options: + workspace: 'guild-alerts' + embed-title: 'Hey, Listen!' +``` + +Add the handler: + +```bash +kapacitor define-topic-handler discord_cpu_handler.yaml +``` + +### Using multiple Discord configurations +Kapacitor can use multiple Discord integrations, each identified by the value of +the [`workspace`](#workspace) config. The TICKscript below illustrates how +multiple Discord integrations can be used. + +In the `kapacitor.conf` [above](#using-the-discord-event-handler), there are two +Discord configurations; one for alerts and the other for daily stats. The +`workspace` configuration for each Discord configuration act as a unique identifiers. + +The following TICKscript sends alerts to the `alerts` Discord workspace. + +_**discord-cpu-alert.tick**_ +```js +stream + |from() + .measurement('cpu') + |alert() + .crit(lambda: "usage_idle" < 5) + .stateChangesOnly() + .message('Hey, I think the machine is on fire.') + .discord() + .workspace('alerts') + .embedTitle('AAAAAAAAAAAAAAAAAAAAAA') +``` + +Error rates are also stored in the same InfluxDB instance and we want to +send daily reports of `500` errors to the `error-reports` Discord workspace. +The following TICKscript collects `500` error occurances and publishes them to +the `500-errors` topic. + +_**500_errors.tick**_ +```js +stream + |from() + .measurement('errors') + .groupBy('500') + |alert() + .info(lamda: 'count' > 0) + .noRecoveries() + .topic('500-errors') +``` + +Below is an [aggregate](/kapacitor/v1.5/event_handlers/aggregate/) handler that +subscribes to the `500-errors` topic, aggregates the number of 500 errors over a +24 hour period, then publishes an aggregate message to the `500-errors-24h` topic. + +_**500\_errors\_24h.yaml**_ +```yaml +id: 500-errors-24h +topic: 500-errors +kind: aggregate +options: + interval: 24h + topic: 500-errors-24h + message: '{{ .Count }} 500 errors last 24 hours.' +``` + +Last, but not least, a Discord handler that subscribes to the `500-errors-24h` +topic and publishes aggregated count messages to the `error-reports` Discord workspace: + +_**discord\_500\_errors\_daily.yaml**_ +```yaml +id: discord-500-errors-daily +topic: 500-errors-24h +kind: discord +options: + workspace: guild-errors +``` diff --git a/content/kapacitor/v1.5/event_handlers/email.md b/content/kapacitor/v1.5/event_handlers/email.md new file mode 100644 index 000000000..fb6e11b75 --- /dev/null +++ b/content/kapacitor/v1.5/event_handlers/email.md @@ -0,0 +1,187 @@ +--- +title: Email event handler +description: The "email" event handler allows you to send Kapacitor alerts via email. This page includes configuration options and usage examples. +menu: + kapacitor_1_5_ref: + name: Email + weight: 300 + parent: Event handlers +--- + +The Email event handler sends alert messages via SMTP/email. + +## Configuration +Configuration as well as default [option](#options) values for the Email event +handler are set in the `[smtp]` section of your `kapacitor.conf`. +Below is an example configuration: + +```toml +[smtp] + enabled = true + host = "localhost" + port = 25 + username = "username" + password = "passw0rd" + from = "me@example.com" + to = ["me@example.com", "you@example.com"] + no-verify = false + idle-timeout = "30s" + global = false + state-changes-only = false +``` + +#### `enabled` +Set to `true` to enable the SMTP event handler. + +#### `host` +The SMTP host. + +#### `port` +The SMTP port. + +#### `username` +Your SMTP username. + +#### `password` +Your SMTP password. + +#### `from` +The "From" address for outgoing mail. + +#### `to` +List of default "To" addresses. + +#### `no-verify` +Skip TLS certificate verification when connecting to the SMTP server. + +#### `idle-timeout` +The time after which idle connections are closed. + +#### `global` +If `true`, all alerts will be sent via Email without explicitly specifying the +SMTP handler in the TICKscript. + +#### `state-changes-only` +Sets all alerts in state-changes-only mode, meaning alerts will only be sent if +the alert state changes. +Only applies if `global` is `true`. + + +## Options +The following Email event handler options can be set in a +[handler file](/kapacitor/v1.5/event_handlers/#handler-file) or when using +`.email()` in a TICKscript. + +| Name | Type | Description | +| ---- | ---- | ----------- | +| to | list of strings | List of email addresses. | + +### Example: handler file +```yaml +id: handler-id +topic: topic-name +kind: smtp +options: + to: + - oncall1@example.com + - oncall2@example.com +``` + +### Example: TICKscript +```js +|alert() + // ... + .email() + .to('oncall1@example.com') + .to('oncall2@example.com') + + // OR + .email('oncall1@example.com') + .to('oncall2@example.com') +``` + +### Using the SMTP/Email event handler +The Email event handler can be used in both TICKscripts and handler files to email alerts. +The email subject is the [AlertNode.Message](/kapacitor/v1.5/nodes/alert_node/#message) property. +The email body is the [AlertNode.Details](/kapacitor/v1.5/nodes/alert_node/#details) property. +The emails are sent as HTML emails so the body can contain html markup. + +_**SMTP settings in kapacitor.conf**_ +```toml +[smtp] + enabled = true + host = "smtp.myserver.com" + port = 25 + username = "username" + password = "passw0rd" + from = "me@emyserver.com" + to = ["oncall0@mydomain.com"] + no-verify = false + idle-timeout = "30s" + global = false + state-changes-only = false +``` + +### Email alerts from a TICKscript +The following TICKscript uses the `.email()` event handler to send out emails +whenever idle CPU usage drops below 10%. + +_**email-cpu-alert.tick**_ +```js +stream + |from() + .measurement('cpu') + |alert() + .crit(lambda: 'usage_idle' < 10) + .message('Hey, check your CPU') + .email() + .to('oncall1@mydomain.com') + .to('oncall2@mydomain.com') +``` + +### Email alerts from a defined handler +The following setup sends an alert to the `cpu` topic with the message, "Hey, +check your CPU". An email handler is added that subscribes to the `cpu` topic +and emails all alerts. + +Create a TICKscript that publishes alert messages to a topic. +The TICKscript below sends an alert message to the `cpu` topic any time idle +CPU usage drops below 10%. + +_**cpu\_alert.tick**_ +```js +stream + |from() + .measurement('cpu') + |alert() + .crit(lambda: "usage_idle" < 10) + .message('Hey, check your CPU') + .topic('cpu') +``` + +Add and enable the TICKscript: + +```bash +kapacitor define cpu_alert -tick cpu_alert.tick +kapacitor enable cpu_alert +``` + +Create a handler file that subscribes to the `cpu` topic and uses the `email` or `smtp` +event handler to email alerts. + +_**email\_cpu\_handler.yaml**_ +```yaml +id: email-cpu-alert +topic: cpu +kind: smtp +options: + to: + - oncall1@mydomain.com + - oncall2@mydomain.com +``` + +Add the handler: + +```bash +kapacitor define-topic-handler email_cpu_handler.yaml +``` diff --git a/content/kapacitor/v1.5/event_handlers/exec.md b/content/kapacitor/v1.5/event_handlers/exec.md new file mode 100644 index 000000000..d453a5c15 --- /dev/null +++ b/content/kapacitor/v1.5/event_handlers/exec.md @@ -0,0 +1,109 @@ +--- +title: Exec event handler +description: The "exec" event handler allows you to execute external programs when Kapacitor alert messages are triggered. This page includes configuration options and usage examples. +menu: + kapacitor_1_5_ref: + name: Exec + weight: 400 + parent: Event handlers +--- + +The exec event handler executes an external program. +Event data is passed over STDIN to the process. + +## Options +The following exec event handler options can be set in a +[handler file](/kapacitor/v1.5/event_handlers/#handler-file) or when using +`.exec()` in a TICKscript. + +| Name | Type | Description | +| ---- | ---- | ----------- | +| prog | string | Path to program to execute. | +| args | list of string | List of arguments to the program. | + +### Example: handler file +```yaml +id: handler-id +topic: topic-name +kind: exec +options: + prog: /path/to/executable + args: 'executable arguments' +``` + +### Example: TICKscript +```js +|alert() + // ... + .exec('/path/to/executable', 'executable arguments') +``` + +## Using the exec event handler +The exec event handler can be used in both TICKscripts and handler files to +execute an external program based off of alert logic. + +> **Note:** Exec programs are run as the `kapacitor` user which typically only +> has access to the default system `$PATH`. +> If using an executable not in the `$PATH`, pass the executable's absolute path. + +### Execute an external program from a TICKscript + +The following TICKscript executes the `sound-the-alarm.py` Python script whenever +idle CPU usage drops below 10% using the `.exec()` event handler. + +_**exec-cpu-alert.tick**_ +```js +stream + |from() + .measurement('cpu') + |alert() + .crit(lambda: "usage_idle" < 10) + .exec('/usr/bin/python', 'sound-the-alarm.py') +``` + +### Execute an external program from a defined handler + +The following setup sends an alert to the `cpu` topic with the message, "Hey, +check your CPU". An exec handler is added that subscribes to the `cpu` topic and +executes the `sound-the-alarm.py` Python script whenever an alert message is published. + +Create a TICKscript that publishes alert messages to a topic. +The TICKscript below sends an alert message to the `cpu` topic any time idle CPU +usage drops below 10%. + +_**cpu\_alert.tick**_ +```js +stream + |from() + .measurement('cpu') + |alert() + .crit(lambda: "usage_idle" < 10) + .message('Hey, check your CPU') + .topic('cpu') +``` + +Add and enable the TICKscript: + +```bash +kapacitor define cpu_alert -tick cpu_alert.tick +kapacitor enable cpu_alert +``` + +Create a handler file that subscribes to the `cpu` topic and uses the exec event +handler to execute the `sound-the-alarm.py` Python script. + +_**exec\_cpu\_handler.yaml**_ +```yaml +id: exec-cpu-alert +topic: cpu +kind: exec +options: + prog: '/usr/bin/python' + args: 'sound-the-alarm.py' +``` + +Add the handler: + +```bash +kapacitor define-topic-handler exec_cpu_handler.yaml +``` diff --git a/content/kapacitor/v1.5/event_handlers/hipchat.md b/content/kapacitor/v1.5/event_handlers/hipchat.md new file mode 100644 index 000000000..116516002 --- /dev/null +++ b/content/kapacitor/v1.5/event_handlers/hipchat.md @@ -0,0 +1,200 @@ +--- +title: HipChat event handler +description: The HipChat event handler allows you to send Kapacitor alerts to HipChat. This page includes configuration options and usage examples. +menu: + kapacitor_1_5_ref: + name: HipChat + weight: 500 + parent: Event handlers +--- + +[HipChat](https://www.hipchat.com/) is Atlassian's web service for group chat, +video chat, and screen sharing. +Kapacitor can be configured to send alert messages to a HipChat room. + +## Configuration +Configuration as well as default [option](#options) values for the HipChat event +handler are set in your `kapacitor.conf`. +Below is an example configuration: + +```toml +[hipchat] + enabled = true + url = "https://subdomain.hipchat.com/v2/room" + room = "xxxx" + token = "xxxx" + global = false + state-changes-only = false +``` + +#### `enabled` +Set to `true` to enable HipChat event handler. + +#### `url` +The HipChat API URL. Replace subdomain with your HipChat subdomain. + +#### `room` +Default room for messages. +This serves as the default room ID if the TICKscript does not specify a room ID. +_Visit the [HipChat API documentation](https://www.hipchat.com/docs/apiv2) for +information on obtain your room ID._ + +#### `token` +Default authentication token. +This serves as the default token if the TICKscript does not specify an API +access token. +_Visit the [HipChat API documentation](https://www.hipchat.com/docs/apiv2) for +information on obtain your authentication token._ + +#### `global` +If `true`, all alerts are sent to HipChat without explicitly specifying HipChat +in the TICKscript. + +#### `state-changes-only` +If `true`, alerts will only be sent to HipChat if the alert state changes. +This only applies if the `global` is also set to `true`. + +## Options +The following HipChat event handler options can be set in a +[handler file](/kapacitor/v1.5/event_handlers/#handler-file) or when using +`.hipchat()` in a TICKscript. + +| Name | Type | Description | +| ---- | ---- | ----------- | +| room | string | HipChat room in which to post messages. If empty uses the channel from the configuration. | +| token | string | HipChat authentication token. If empty uses the token from the configuration. | + +### Example: handler file +```yaml +topic: topic-name +id: handler-id +kind: hipchat +options: + room: 'alerts' + token: 'mysupersecretauthtoken' +``` + +### Example: TICKscript +```js +|alert() + // ... + .hipChat() + .room('alerts') + .token('mysupersecretauthtoken') +``` + + +## HipChat Setup + +### Requirements + +To configure Kapacitor with HipChat, the following is needed: + +* A HipChat subdomain name +* A HipChat room ID +* A HipChat API access token for sending notifications + +### Get your HipChat API access token + +1. Log into your HipChat account dashboard. +2. Select "API access" in the left menu. +3. Under "Create new token", enter a label for the token. + The label is arbitrary and is meant only to help identify the token. +4. Under "Create new token", select "Send Notification" as the Scope. +5. Click "Create". + +Your token appears in the table just above the `Create new token` section: + +![HipChat token](/img/kapacitor/hipchat-token.png) + + +## Using the HipChat Event Handler +With the HipChat event handler enabled in your `kapacitor.conf`, use the +`.hipchat()` attribute in your TICKscripts to send alerts to HipChat or define a +HipChat handler that subscribes to a topic and sends published alerts to HipChat. + +> To avoid posting a message every alert interval, use +> [AlertNode.StateChangesOnly](/kapacitor/v1.5/nodes/alert_node/#statechangesonly) +> so only events where the alert changed state are sent to Alerta. + +The examples below use the following HipChat configuration defined in the `kapacitor.conf`: + +_**HipChat settings in kapacitor.conf**_ +```toml +[hipchat] + enabled = true + url = "https://testtest.hipchat.com/v2/room" + room = "malerts" + token = "tokentokentokentokentoken" + global = false + state-changes-only = true +``` + +### Send alerts to a HipChat room from a TICKscript + +The following TICKscript uses the `.hipchat()` event handler to send the message, +"Hey, check your CPU", whenever idle CPU usage drops below 10%. +It publishes the messages to the `alerts` room associated with the HipChat +subdomain defined in the `kapacitor.conf`. + +_**hipchat-cpu-alert.tick**_ +```js +stream + |from() + .measurement('cpu') + |alert() + .crit(lambda: "usage_idle" < 10) + .stateChangesOnly() + .message('Hey, check your CPU') + .hipchat() + .room('alerts') +``` + +### Send alerts to the HipChat room from a defined handler + +The following setup sends an alert to the `cpu` topic with the message, "Hey, +check your CPU". +A HipChat handler is added that subscribes to the `cpu` topic and publishes all +alert messages to the `alerts` room associated with the `testest` HipChat +subdomain defined in the `kapacitor.conf`. + +Create a TICKscript that publishes alert messages to a topic. +The TICKscript below sends an alert message to the `cpu` topic any time CPU +idle usage drops below 10% _(or CPU usage is above 90%)_. + +_**cpu\_alert.tick**_ +```js +stream + |from() + .measurement('cpu') + |alert() + .crit(lambda: "usage_idle" < 10) + .stateChangesOnly() + .message('Hey, check your CPU') + .topic('cpu') +``` + +Add and enable the TICKscript: + +```bash +kapacitor define cpu_alert -tick cpu_alert.tick +kapacitor enable cpu_alert +``` + +Create a handler file that subscribes to the `cpu` topic and uses the HipChat +event handler to send alerts to the `alerts` channel in HipChat. + +_**hipchat\_cpu\_handler.yaml**_ +```yaml +id: hipchat-cpu-alert +topic: cpu +kind: hipchat +options: + room: 'alerts' +``` + +Add the handler: + +```bash +kapacitor define-topic-handler hipchat_cpu_handler.yaml +``` diff --git a/content/kapacitor/v1.5/event_handlers/kafka.md b/content/kapacitor/v1.5/event_handlers/kafka.md new file mode 100644 index 000000000..c4766e086 --- /dev/null +++ b/content/kapacitor/v1.5/event_handlers/kafka.md @@ -0,0 +1,195 @@ +--- +title: Kafka event handler +description: The Kafka event handler allows you to send Kapacitor alerts to an Apache Kafka cluster. This page includes configuration options and usage examples. +menu: + kapacitor_1_5_ref: + name: Kafka + weight: 600 + parent: Event handlers +--- + +[Apache Kafka](https://kafka.apache.org/) is a distributed streaming platform +designed for building real-time data pipelines and streaming apps. +Kapacitor can be configured to send alert messages to a Kafka cluster. + +## Configuration +Configuration as well as default [option](#options) values for the Kafka event +handler are set in your `kapacitor.conf`. +Below is an example configuration: + +```toml +[[kafka]] + enabled = true + id = "localhost" + brokers = [] + timeout = "10s" + batch-size = 100 + batch-timeout = "1s" + use-ssl = false + ssl-ca = "" + ssl-cert = "" + ssl-key = "" + insecure-skip-verify = false +``` + +> Multiple Kafka clients may be configured by repeating `[[kafka]]` sections. +> The `id` acts as a unique identifier for each configured Kafka client. + +#### `enabled` +Set to `true` to enable the Kafka event handler. + +#### `id` +A unique identifier for the Kafka cluster. + +#### `brokers` +List of Kafka broker addresses using the `host:port` format. + +#### `timeout` +Timeout on network operations with the Kafka brokers. +If 0 a default of 10s is used. + +#### `batch-size` +The number of messages batched before being sent to Kafka. +If 0 a default of 100 is used. + +#### `batch-timeout` +The maximum amount of time to wait before flushing an incomplete batch. +If 0 a default of 1s is used. + +#### `use-ssl` +Enable SSL communication. +Must be `true` for other SSL options to take effect. + +#### `ssl-ca` +Path to certificate authority file. + +#### `ssl-cert` +Path to host certificate file. + +#### `ssl-key` +Path to certificate private key file. + +#### `insecure-skip-verify` +Use SSL but skip chain and host verification. +_This is necessary if using a self-signed certificate._ + +## Options +The following Kafka event handler options can be set in a +[handler file](/kapacitor/v1.5/event_handlers/#handler-file) or when using +`.kafka()` in a TICKscript. + +| Name | Type | Description | +| ---- | ---- | ----------- | +| cluster | string | Name of the Kafka cluster. | +| topic | string | Kafka topic. _In TICKscripts, this is set using `.kafkaTopic()`._ | +| template | string | Message template. | + +### Example: handler file +```yaml +id: kafka-event-handler +topic: kapacitor-topic-name +kind: kafka +options: + cluster: 'kafka-cluster' + topic: 'kafka-topic-name' + template: 'kafka-template-name' +``` + +### Example: TICKscript +```js +|alert() + // ... + .kafka() + .cluster('kafka-cluster') + .kafkaTopic('kafka-topic-name') + .template('kafka-template-name') +``` + +## Using the Kafka Event Handler +With the Kafka event handler enabled in your `kapacitor.conf`, use the `.kafka()` +attribute in your TICKscripts to send alerts to a Kafka cluster or define a +Kafka handler that subscribes to a topic and sends published alerts to Kafka. + +The examples below use the following Kafka configuration defined in the `kapacitor.conf`: + +_**Kafka settings in kapacitor.conf**_ +```toml +[[kafka]] + enabled = true + id = "infra-monitoring" + brokers = ["123.45.67.89:9092", "123.45.67.90:9092"] + timeout = "10s" + batch-size = 100 + batch-timeout = "1s" + use-ssl = true + ssl-ca = "/etc/ssl/certs/ca.crt" + ssl-cert = "/etc/ssl/certs/cert.crt" + ssl-key = "/etc/ssl/certs/cert-key.key" + insecure-skip-verify = true +``` + +### Send alerts to a Kafka cluster from a TICKscript + +The following TICKscript uses the `.kafka()` event handler to send the message, +"Hey, check your CPU", whenever idle CPU usage drops below 10%. +It publishes the messages to the `cpu-alerts` topic in the `infra-monitoring` +Kafka cluster defined in the `kapacitor.conf`. + +_**kafka-cpu-alert.tick**_ +```js +stream + |from() + .measurement('cpu') + |alert() + .crit(lambda: "usage_idle" < 10) + .message('Hey, check your CPU') + .kafka() + .kafkaTopic('cpu-alerts') +``` + +### Send alerts to a Kafka cluster from a defined handler + +The following setup sends an alert to the `cpu` topic with the message, "Hey, +check your CPU". A Kafka handler is added that subscribes to the `cpu` topic and +publishes all alert messages to the `cpu-alerts` topic associated with the +`infra-monitoring` Kafka cluster defined in the `kapacitor.conf`. + +Create a TICKscript that publishes alert messages to a topic. +The TICKscript below sends an alert message to the `cpu` topic any time CPU +idle usage drops below 10% _(or CPU usage is above 90%)_. + +_**cpu\_alert.tick**_ +```js +stream + |from() + .measurement('cpu') + |alert() + .crit(lambda: "usage_idle" < 10) + .message('Hey, check your CPU') + .topic('cpu') +``` + +Add and enable the TICKscript: + +```bash +kapacitor define cpu_alert -tick cpu_alert.tick +kapacitor enable cpu_alert +``` + +Create a handler file that subscribes to the `cpu` topic and uses the Kafka +event handler to send alerts to the `cpu-alerts` topic in Kafka. + +_**kafka\_cpu\_handler.yaml**_ +```yaml +id: kafka-cpu-alert +topic: cpu +kind: kafka +options: + topic: 'cpu-alerts' +``` + +Add the handler: + +```bash +kapacitor define-topic-handler kafka_cpu_handler.yaml +``` diff --git a/content/kapacitor/v1.5/event_handlers/log.md b/content/kapacitor/v1.5/event_handlers/log.md new file mode 100644 index 000000000..b54b4b12a --- /dev/null +++ b/content/kapacitor/v1.5/event_handlers/log.md @@ -0,0 +1,107 @@ +--- +title: Log event handler +description: The "log" event handler allows you to send Kapacitor alert messages to a log file. This page includes options and usage examples. +menu: + kapacitor_1_5_ref: + name: Log + weight: 700 + parent: Event handlers +--- + +The log event handler writes to a specified log file with one alert event per line. +If the specified log file does not exist, it will be created. + +## Options +The following log event handler options can be set in a +[handler file](/kapacitor/v1.5/event_handlers/#handler-file) or when using +`.log()` in a TICKscript. + +| Name | Type | Description | +| ---- | ---- | ----------- | +| path | string | Absolute path to the log file. | +| mode | int | File mode and permissions to use when creating the file. Default is `0600`. _**The leading 0 is required to interpret the value as an octal integer.**_ | + +### Example: handler file +```yaml +id: handler-id +topic: topic-name +kind: log +options: + path: '/tmp/alerts.log' + mode: 0644 +``` + +### Example: TICKscript +```js +|alert() + // ... + .log('/tmp/alerts.log') + .mode(0644) +``` + +## Using the log event handler +The log event handler can be used in both TICKscripts and handler files to log +messages to a log file. + +### Log messages from a TICKscript + +The following TICKscript uses the `.log()` event handler to log a message to the +`/tmp/alerts.log` log file whenever idle CPU usage drops below 10%. + +_**log-cpu-alert.tick**_ +```js +stream + |from() + .measurement('cpu') + |alert() + .crit(lambda: "usage_idle" < 10) + .message('{{ .Time }}: CPU usage over 90%') + .log('/tmp/alerts.log') +``` + +### Log messages from a defined handler + +The following setup sends an alert to the `cpu` topic with the message, +"'{{ .Time }}: CPU usage over 90%'". +A log handler is added that subscribes to the `cpu` topic and logs messages to +`/tmp/alerts.log` whenever a new message is published. + +Create a TICKscript that publishes alert messages to a topic. +The TICKscript below sends an alert message to the `cpu` topic any time idle CPU +usage drops below 10%. + +_**cpu\_alert.tick**_ +```js +stream + |from() + .measurement('cpu') + |alert() + .crit(lambda: "usage_idle" < 10) + .message('{{ .Time }}: CPU usage over 90%') + .topic('cpu') +``` + +Add and enable the TICKscript: + +```bash +kapacitor define cpu_alert -tick cpu_alert.tick +kapacitor enable cpu_alert +``` + +Create a handler file that subscribes to the `cpu` topic and uses the log event +handler to log messages to the `/tmp/alerts.log` log file. + +_**log\_cpu\_handler.yaml**_ +```yaml +id: log-cpu-alert +topic: cpu +kind: log +options: + path: '/tmp/alerts.log' +``` + +Add the handler: + +```bash +kapacitor define-topic-handler log_cpu_handler.yaml +``` diff --git a/content/kapacitor/v1.5/event_handlers/microsoftteams.md b/content/kapacitor/v1.5/event_handlers/microsoftteams.md new file mode 100644 index 000000000..8d8067f44 --- /dev/null +++ b/content/kapacitor/v1.5/event_handlers/microsoftteams.md @@ -0,0 +1,177 @@ +--- +title: Microsoft Teams event handler +description: The Microsoft Teams event handler lets you send Kapacitor alerts to a Microsoft Teams channel. This page includes configuration options and usage examples. +menu: + kapacitor_1_5_ref: + name: Microsoft Teams + weight: 750 + parent: Event handlers +--- + +[Microsoft Teams](https://www.microsoft.com/en-us/microsoft-365/microsoft-teams/group-chat-software) is a widely used "digital workspace" that facilitates communication among team members. To configure Kapacitor to send alerts to one or more Microsoft Teams channels, do the following: + +- [Set up a Teams](#set-up-teams) + - [Configuration](#configuration) + - [Handler file options](#handler-file-options) + - [Example Teams handler file](#example-teams-handler-file) +- [Example alerts](#example-alerts) +- [Send an alert to Teams](#send-an-alert-to-teams) + +## Set up Teams + +1. Log in to Teams, and then [create a new incoming webhook](https://docs.microsoft.com/en-us/microsoftteams/platform/concepts/connectors#setting-up-a-custom-incoming-webhook) for a Teams channel. +2. In your `kapacitor.conf` file, add a `[teams]` section with [configuration options](#Teams-configuration-options) for the Microsoft Teams event +handler, including the incoming webhook URL as the `channelurl`. For example: + + ```toml + [teams] + enabled = true + default = true + channel-url = "https://outlook.office.com/webhook/..." + global = true + state-changes-only = true + ``` + +3. To add multiple Microsoft Teams clients, repeat steps 1-2 to obtain a new web hook and add another `[teams]` section in `kapacitor.conf`. +The `channelurl` acts as a unique identifier for each configured Teams client. + +### Configuration + +#### `enabled` + +Set to `true` to enable the Microsoft Teams event handler. + +#### `default` + +If there are multiple `teams` configurations, identify one as the default. + +#### `channelurl` + +Specify the Microsoft Team webhook URL to send messages and alerts. + +#### `global` + +Set to true to send all alerts to Teams without explicitly specifying Microsoft Teams in the TICKscript.\ + +#### `state-changes-only` + +Set to true to send alerts for state-changes-only. +_Only applies if `global` is `true`._ + +### Handler file options + +The following options can be set in a Microsoft Teams event [handler file](/kapacitor/v1.5/event_handlers/#handler-file) or when using +`.teams()` in a TICKscript. + +| Name | Type | Description | +| ---- | ---- | ----------- | +| team | string | Specifies which Team configuration to use when there are multiple configurations. | +| channel | string | Teams channel to post messages to. If empty uses the channel from the configuration. | + +### Example handler file + +```yaml +id: handler-id +topic: topic-name +kind: teams +options: + team: 'teams.microsoft.com/team/' + channel: '#alerts' +``` + +For information about using handler files, see [Add and use event handlers](/kapacitor/v1.5/event_handlers/#create-a-topic-handler-with-a-handler-file). + +## Example alerts + +#### Send alert to Teams channel in configuration file + +```js + stream + |alert() + .teams() +``` + +#### Send alert to Teams channel with webhook (overrides configuration file) + +```js + stream + |alert() + .teams() + .channelURL('https://outlook.office.com/webhook/...') +``` + +#### Send alerts to Teams from a TICKscript + +Use the `.teams()` attribute in your TICKscripts to: + +- Send alerts to Teams +- Define a Teams handler that subscribes to a topic and sends published alerts to Teams + +> To avoid posting a message every alert interval, use +> [AlertNode.StateChangesOnly](/kapacitor/v1.5/nodes/alert_node/#statechangesonly) +> so only events where the alert changed state are sent to Teams. + +The following TICKscript uses the `.teams()` event handler to send the message, +"Hey, check your CPU", to the `#alerts` Teams channel when idle CPU usage drops below 20%. + +_**teams-cpu-alert.tick**_ +```js +stream + |from() + .measurement('cpu') + |alert() + .warn(lambda: "usage_idle" < 20) + .stateChangesOnly() + .message('Hey, check your CPU') + .teams() +``` + +#### Send alerts to Teams from a defined handler + +The following example sends an alert to the `cpu` topic with the message, +"Hey, check your CPU". +A Teams handler is added that subscribes to the `cpu` topic and publishes all +alert messages to Teams. + +Create a TICKscript that publishes alert messages to a topic. +The TICKscript below sends an critical alert message to the `cpu` topic any time +idle CPU usage drops below 5%. + +_**cpu\_alert.tick**_ +```js +stream + |from() + .measurement('cpu') + |alert() + .crit(lambda: "usage_idle" < 5) + .stateChangesOnly() + .message('Hey, check your CPU') + .topic('cpu') +``` + +Add and enable the TICKscript: + +```bash +kapacitor define cpu_alert -tick cpu_alert.tick +kapacitor enable cpu_alert +``` + +Create a handler file that subscribes to the `cpu` topic and uses the Teams +event handler to send alerts to Teams. This handler uses a non-default Teams +handler, "critical-alerts", which sends messages to the #critical-alerts channel +in Teams. + +_**teams\_cpu\_handler.yaml**_ +```yaml +id: teams-cpu-alert +topic: cpu +kind: teams +channelurl: 'alerts' + +``` + +Add the handler: + +```bash +kapacitor define-topic-handler teams_cpu_handler.yaml +``` diff --git a/content/kapacitor/v1.5/event_handlers/mqtt.md b/content/kapacitor/v1.5/event_handlers/mqtt.md new file mode 100644 index 000000000..3718108cc --- /dev/null +++ b/content/kapacitor/v1.5/event_handlers/mqtt.md @@ -0,0 +1,201 @@ +--- +title: MQTT event handler +description: The MQTT event handler allows you to send Kapacitor alert messages to an MQTT handler. This page includes configuration options and usage examples. +menu: + kapacitor_1_5_ref: + name: MQTT + weight: 800 + parent: Event handlers +--- + +[MQTT](http://mqtt.org/) is a lightweight messaging protocol for small sensors and mobile devices. +Kapacitor can be configured to send alert messages to an MQTT broker. + +## Configuration +Configuration as well as default [option](#options) values for the MQTT +event handler are set in your `kapacitor.conf`. +Below is an example configuration: + +```toml +[[mqtt]] + enabled = true + name = "localhost" + default = true + url = "tcp://localhost:1883" + ssl-ca = "/etc/kapacitor/ca.pem" + ssl-cert = "/etc/kapacitor/cert.pem" + ssl-key = "/etc/kapacitor/key.pem" + client-id = "xxxx" + username = "xxxx" + password = "xxxx" +``` + +> Multiple MQTT brokers may be configured by repeating `[[mqtt]]` sections. +> The `name` acts as a unique identifier for each configured MQTT client. + +#### `enabled` +Set to `true` to enable the MQTT event handler. + +#### `name` +Unique name for this broker configuration. + +#### `default` +When using multiple MQTT configurations, sets the current configuration as +the default. + +#### `url` +URL of the MQTT broker. +Possible protocols include: + +**tcp** - Raw TCP network connection +**ssl** - TLS protected TCP network connection +**ws** - Websocket network connection + +#### `ssl-ca` +Absolute path to certificate autority (CA) file. +_A CA can be provided without a key/certificate pair._ + +#### `ssl-cert` +Absolute path to pem encoded certificate file. + +#### `ssl-key` +Absolute path to pem encoded key file. + +#### `client-id` +Unique ID for this MQTT client. +If empty, the value of `name` is used. + +#### `username` +MQTT username. + +#### `password` +MQTT password. + + +## Options +The following MQTT event handler options can be set in a +[handler file](/kapacitor/v1.5/event_handlers/#handler-file) or when using +`.mqtt()` in a TICKscript. + +| Name | Type | Description | +| ---- | ---- | ----------- | +| broker-name | string | The name of the configured MQTT broker to use when publishing the alert. If empty defaults to the configured default broker. | +| topic | string | The MQTT topic to which alerts will be dispatched | +| qos | int64 | The [QoS](http://docs.oasis-open.org/mqtt/mqtt/v3.1.1/os/mqtt-v3.1.1-os.html#_Toc398718099) that will be used to deliver the alerts. Valid values include:

0 : At most once delivery
1 : At least once delivery
2 : Exactly once delivery | +| retained | bool | Indicates whether this alert should be delivered to clients that were not connected to the broker at the time of the alert. | + +### Example: handler file +```yaml +id: handler-id +topic: topic-name +kind: mqtt +options: + broker-name: 'name' + topic: 'topic-name' + qos: 1 + retained: true +``` + +### Example: TICKscript +```js +|alert() + // ... + .mqtt('topic-name') + .brokerName('name') + .qos(1) + .retained() +``` + +## Using the MQTT event handler +The MQTT event handler can be used in both TICKscripts and handler files to send +alerts to an MQTT broker. + +The examples below use the following MQTT broker configurations defined in the +`kapacitor.conf`: + +_**MQTT settings in kapacitor.conf**_ +```toml +[[mqtt]] + enabled = true + name = "localhost" + default = true + url = "tcp://localhost:1883" + +[[mqtt]] + enabled = true + name = "alerts-broker" + default = false + url = "ssl://123.45.67.89:1883" + ssl-ca = "/etc/kapacitor/ca.pem" + ssl-cert = "/etc/kapacitor/cert.pem" + ssl-key = "/etc/kapacitor/key.pem" + client-id = "alerts-broker" + username = "myuser" + password = "mysupersecretpassw0rd" +``` + +### Send alerts to an MQTT broker from a TICKscript + +The following TICKscript uses the `.mqtt()` event handler to send alerts to the +`alerts` MQTT topic of the default MQTT broker defined in the `kapacitor.confi` +whenever idle CPU usage drops below 10%. + +_**log-cpu-alert.tick**_ +```js +stream + |from() + .measurement('cpu') + |alert() + .crit(lambda: "usage_idle" < 10) + .message('{{ .Time }}: CPU usage over 90%') + .mqtt('alerts') + .qos(2) +``` + +### Send alerts to an MQTT broker from a defined handler + +The following setup sends an alert to the `cpu` topic. +An MQTT handler is added that subscribes to the `cpu` topic and sends messages +to `alerts` MQTT topic of the `alerts-broker` whenever a new message is published. + +Create a TICKscript that publishes alert messages to a topic. +The TICKscript below sends an alert message to the `cpu` topic any time idle CPU +usage drops below 10%. + +_**cpu\_alert.tick**_ +```js +stream + |from() + .measurement('cpu') + |alert() + .crit(lambda: "usage_idle" < 10) + .message('{{ .Time }}: CPU usage over 90%') + .topic('cpu') +``` + +Add and enable the TICKscript: + +```bash +kapacitor define cpu_alert -tick cpu_alert.tick +kapacitor enable cpu_alert +``` + +Create a handler file that subscribes to the `cpu` topic and uses the MQTT event +handler to send alerts to the `alerts-broker`. + +_**log\_cpu\_handler.yaml**_ +```yaml +id: log-cpu-alert +topic: cpu +kind: mqtt +options: + broker-name: 'alerts-broker' + topic: 'alerts' + qos: 2 +``` + +Add the handler: + +```bash +kapacitor define-topic-handler log_cpu_handler.yaml +``` diff --git a/content/kapacitor/v1.5/event_handlers/opsgenie/v1.md b/content/kapacitor/v1.5/event_handlers/opsgenie/v1.md new file mode 100644 index 000000000..1f1f40b0b --- /dev/null +++ b/content/kapacitor/v1.5/event_handlers/opsgenie/v1.md @@ -0,0 +1,181 @@ +--- +title: OpsGenie v1 event handler +description: The OpsGenie v1 event handler allows you to send Kapacitor alerts to OpsGenie. This page includes configuration options and usage examples. +--- + +[OpsGenie](https://www.opsgenie.com/) is an incident response orchestration platform for DevOps & ITOps teams. +Kapacitor can be configured to send alert messages to OpsGenie. + +{{% warn %}} + + This page is specific to OpsGenie's v1 API which has been deprecated. + OpsGenie recommends migrating to their v2 API. View the + OpsGenie API migration guide + for more information about upgrading. + If using the v2 API, view the OpsGenie v2 event handler documentation. + +{{% /warn %}} + +## Configuration +Configuration as well as default [option](#options) values for the OpsGenie v1 +event handler are set in your `kapacitor.conf`. +Below is an example configuration: + +```toml +[opsgenie] + enabled = true + api-key = "mysupersecretapikey" + teams = ["team1", "team2"] + recipients = ["recipient1", "recipient2"] + url = "https://api.opsgenie.com/v1/json/alert" + recovery_url = "https://api.opsgenie.com/v1/json/alert/note" + global = false +``` + +#### `enabled` +Set to `true` to enable the OpsGenie v1 event handler. + +#### `api-key` +Your OpsGenie API Key. + +#### `teams` +Default OpsGenie teams. _Can be overridden per alert._ + +#### `recipients` +Default OpsGenie recipients. _Can be overridden per alert._ + +#### `url` +The OpsGenie API URL. _**This should not need to be changed.**_ + +#### `recovery_url` +The OpsGenie Recovery URL. Change this based on which behavior you want a +recovery to trigger (add notes, close alert, etc.) + +#### `global` +If `true`, all alerts are sent to OpsGenie without specifying `opsgenie` in the +TICKscript. +The team and recipients can still be overridden. + +## Options +The following OpsGenie v1 event handler options can be set in a +[handler file](/kapacitor/v1.5/event_handlers/#handler-file) or when using +`.opsGenie()` in a TICKscript. + +| Name | Type | Description | +| ---- | ---- | ----------- | +| teams-list | list of strings | List of teams. | +| recipients-list | list of strings | List of recipients. | + +### Example: handler file +```yaml +id: handler-id +topic: topic-name +kind: opsgenie +options: + teams-list: + - 'team1' + - 'team2' + recipients-list: + - 'recipient1' + - 'recipient2' +``` + +### Example: TICKscript +```js +|alert() + // ... + .opsGenie() + .teams('team1', 'team2') + .recipients('recipient1', 'recipient2') +``` + +## OpsGenie Setup +To allow Kapacitor to send alerts to OpsGenie, +[create an OpsGeneie API Integration](https://docs.opsgenie.com/docs/api-integration#section-using-api-integration). +Use the generated API key as the `api-key` in the `[opsgenie]` section of your +`kapacitor.conf` + +## Using the OpsGenie event handler +With the OpsGenie v1 event handler enabled and configured in your +`kapacitor.conf`, use the `.opsGenie()` attribute in your TICKscripts to send +alerts to OpsGenie or define a OpsGenie v1 handler that subscribes to a topic +and sends published alerts to OpsGenie. + +The examples below use the following OpsGenie configuration defined in the `kapacitor.conf`: + +_**OpsGenie v1 settings in kapacitor.conf**_ +```toml +[opsgenie] + enabled = true + api-key = "mysupersecretapikey" + teams = ["engineering"] + recipients = ["supervisor1", "supervisor2"] + url = "https://api.opsgenie.com/v1/json/alert" + recovery_url = "https://api.opsgenie.com/v1/json/alert/note" + global = false +``` + +### Send alerts to OpsGenie from a TICKscript + +The following TICKscript uses the `.opsGenie()` event handler to send the message, +"Hey, check your CPU", to OpsGenie whenever idle CPU usage drops below 10%. + +_**opsgenie-cpu-alert.tick**_ +```js +stream + |from() + .measurement('cpu') + |alert() + .crit(lambda: 'usage_idle' < 10) + .message('Hey, check your CPU') + .opsGenie() + .teams('engineering', 'support') +``` + +### Send alerts to OpsGenie from a defined handler + +The following setup sends an alert to the `cpu` topic with the message, "Hey, +check your CPU". An OpsGenie v1 handler is added that subscribes to the `cpu` +topic and publishes all alert messages to OpsGenie. + +Create a TICKscript that publishes alert messages to a topic. +The TICKscript below sends an alert message to the `cpu` topic any time idle CPU +usage drops below 10%. + +_**cpu\_alert.tick**_ +```js +stream + |from() + .measurement('cpu') + |alert() + .crit(lambda: 'usage_idle' < 10) + .message('Hey, check your CPU') + .topic('cpu') +``` + +Add and enable the TICKscript: + +```bash +kapacitor define cpu_alert -tick cpu_alert.tick +kapacitor enable cpu_alert +``` + +Create a handler file that subscribes to the `cpu` topic and uses the OpsGenie v1 +event handler to send alerts to OpsGenie. + +_**opsgenie\_cpu\_handler.yaml**_ +```yaml +id: opsgenie-cpu-alert +topic: cpu +kind: opsgenie +options: + teams-list: + - 'engineering' + - 'support' +``` + +Add the handler: + +```bash +kapacitor define-topic-handler opsgenie_cpu_handler.yaml +``` diff --git a/content/kapacitor/v1.5/event_handlers/opsgenie/v2.md b/content/kapacitor/v1.5/event_handlers/opsgenie/v2.md new file mode 100644 index 000000000..49d247dd4 --- /dev/null +++ b/content/kapacitor/v1.5/event_handlers/opsgenie/v2.md @@ -0,0 +1,184 @@ +--- +title: OpsGenie v2 event handler +description: The OpsGenie v2 event handler allows you to send Kapacitor alerts to OpsGenie. This page includes configuration options and usage examples. +aliases: + - kapacitor/v1.5/event_handlers/opsgenie + +menu: + kapacitor_1_5_ref: + name: OpsGenie + weight: 900 + parent: Event handlers +--- + +[OpsGenie](https://www.opsgenie.com/) is an incident response orchestration +platform for DevOps & ITOps teams. +Kapacitor can be configured to send alert messages to OpsGenie. + +> This page is specific to OpsGenie's v2 API. If still using their v1 API, view +> the [OpsGenie v1 event handler](/kapacitor/v1.5/event_handlers/opsgenie/v1/) documentation. + +## Configuration +Configuration as well as default [option](#options) values for the OpsGenie v2 +event handler are set in your `kapacitor.conf`. +Below is an example configuration: + +```toml +[opsgenie2] + enabled = true + api-key = "mysupersecretapikey" + teams = ["team1", "team2"] + recipients = ["recipient1", "recipient2"] + url = "https://api.opsgenie.com/v2/alerts" + recovery_action = "notes" + global = false +``` + +#### `enabled` +Set to `true` to enable the OpsGenie v2 event handler. + +#### `api-key` +Your OpsGenie API Key. + +#### `teams` +Default OpsGenie teams. _Can be overridden per alert._ + +#### `recipients` +Default OpsGenie recipients. _Can be overridden per alert._ + +#### `url` +The OpsGenie API URL. _**This should not need to be changed.**_ + +#### `recovery_action` +The Recovery Action specifies which action to take when alerts recover. +Valid values include: + * `notes` - Add a note to the alert. + * `close` - Close the alert. + +#### `global` +If `true`, all alerts are sent to OpsGenie without specifying `opsgenie2` in the TICKscript. +The team and recipients can still be overridden. + +## Options +The following OpsGenie v2 event handler options can be set in a +[handler file](/kapacitor/v1.5/event_handlers/#handler-file) or when using +`.opsGenie2()` in a TICKscript. + +| Name | Type | Description | +| ---- | ---- | ----------- | +| teams-list | list of strings | List of teams. | +| recipients-list | list of strings | List of recipients. | + +### Example: handler file +```yaml +id: handler-id +topic: topic-name +kind: opsgenie2 +options: + teams-list: + - 'team1' + - 'team2' + recipients-list: + - 'recipient1' + - 'recipient2' +``` + +### Example: TICKscript +```js +|alert() + // ... + .opsGenie2() + .teams('team1', 'team2') + .recipients('recipient1', 'recipient2') +``` + +## OpsGenie Setup +To allow Kapacitor to send alerts to OpsGenie, +[create an OpsGeneie API Integration](https://docs.opsgenie.com/docs/api-integration#section-using-api-integration). +Use the generated API key as the `api-key` in the `[opsgenie2]` section of your +`kapacitor.conf` + +## Using the OpsGenie event handler +With the OpsGenie v2 event handler enabled and configured in your +`kapacitor.conf`, use the `.opsGenie2()` attribute in your TICKscripts to send +alerts to OpsGenie or define an OpsGenie v2 handler that subscribes to a topic +and sends published alerts to OpsGenie. + +The examples below use the following OpsGenie configuration defined in the `kapacitor.conf`: + +_**OpsGenie v2 settings in kapacitor.conf**_ +```toml +[opsgenie2] + enabled = true + api-key = "mysupersecretapikey" + teams = ["engineering"] + recipients = ["supervisor1", "supervisor2"] + url = "https://api.opsgenie.com/v2/alerts" + recovery_action = "close" + global = false +``` + +### Send alerts to OpsGenie from a TICKscript + +The following TICKscript uses the `.opsGenie2()` event handler to send the +message, "Hey, check your CPU", to OpsGenie whenever idle CPU usage drops below 10%. + +_**opsgenie2-cpu-alert.tick**_ +```js +stream + |from() + .measurement('cpu') + |alert() + .crit(lambda: 'usage_idle' < 10) + .message('Hey, check your CPU') + .opsGenie2() + .teams('engineering', 'support') +``` + +### Send alerts to OpsGenie from a defined handler + +The following setup sends an alert to the `cpu` topic with the message, "Hey, +check your CPU". An OpsGenie v2 handler is added that subscribes to the `cpu` +topic and publishes all alert messages to OpsGenie. + +Create a TICKscript that publishes alert messages to a topic. +The TICKscript below sends an alert message to the `cpu` topic any time idle +CPU usage drops below 10%. + +_**cpu\_alert.tick**_ +```js +stream + |from() + .measurement('cpu') + |alert() + .crit(lambda: 'usage_idle' < 10) + .message('Hey, check your CPU') + .topic('cpu') +``` + +Add and enable the TICKscript: + +```bash +kapacitor define cpu_alert -tick cpu_alert.tick +kapacitor enable cpu_alert +``` + +Create a handler file that subscribes to the `cpu` topic and uses the OpsGenie v2 +event handler to send alerts to OpsGenie. + +_**opsgenie2\_cpu\_handler.yaml**_ +```yaml +id: opsgenie-cpu-alert +topic: cpu +kind: opsgenie2 +options: + teams-list: + - 'engineering' + - 'support' +``` + +Add the handler: + +```bash +kapacitor define-topic-handler opsgenie2_cpu_handler.yaml +``` diff --git a/content/kapacitor/v1.5/event_handlers/pagerduty/v1.md b/content/kapacitor/v1.5/event_handlers/pagerduty/v1.md new file mode 100644 index 000000000..996d5614e --- /dev/null +++ b/content/kapacitor/v1.5/event_handlers/pagerduty/v1.md @@ -0,0 +1,157 @@ +--- +title: PagerDuty v1 event handler +description: The PagerDuty v1 event handler allows you to send Kapacitor alerts to PagerDuty. This page includes configuration options and usage examples. +--- + +[PagerDuty](https://www.pagerduty.com/) is an incident management platform that +helps teams detect and fix infrastructure problems quickly. +Kapacitor can be configured to send alert messages to PagerDuty. + +{{% warn %}} + + This page is specific to PagerDuty's v1 API which has been deprecated. + PagerDuty recommends migrating to their v2 API. View the + PagerDuty API migration guide + for more information about upgrading. If using the v2 API, view the + PagerDuty v2 event handler documentation. + +{{% /warn %}} + +## Configuration +Configuration as well as default [option](#options) values for the PagerDuty v1 +event handler are set in your `kapacitor.conf`. +Below is an example configuration: + +```toml +[pagerduty] + enabled = true + service-key = "" + url = "https://events.pagerduty.com/generic/2010-04-15/create_event.json" + global = false +``` + +#### `enabled` +Set to `true` to enable the PagerDuty v1 event handler. + +#### `service-key` +Your [PagerDuty Service Key](https://support.pagerduty.com/docs/services-and-integrations). + +#### `url` +The PagerDuty API v1 URL. _**This should not need to be changed.**_ + +#### `global` +If `true`, all alerts will be sent to PagerDuty without explicitly specifying +PagerDuty in TICKscripts. + + +## Options +The following PagerDuty v1 event handler options can be set in a +[handler file](/kapacitor/v1.5/event_handlers/#handler-file) or when using +`.pagerDuty()` in a TICKscript. + +| Name | Type | Description | +| ---- | ---- | ----------- | +| service-key | string | The PagerDuty service key to use for the alert. | + +### Example: handler file +```yaml +id: handler-id +topic: topic-name +kind: pagerduty +options: + service-key: 'myservicekey' +``` + +### Example: TICKscript +```js +|alert() + // ... + .pagerDuty() + .serviceKey('myservicekey') +``` + +## PagerDuty Setup +To allow Kapacitor to send alerts to PagerDuty +[enable a new "Generic API" integration](https://support.pagerduty.com/docs/services-and-integrations#section-create-a-generic-events-api-integration). +Use the generated "Integration Key" as the `service-key` under the `[pagerduty]` +section of your `kapacitor.conf`. + +## Using the PagerDuty v1 Event Handler +With the PagerDuty v1 event handler enabled in your `kapacitor.conf`, use the +`.pagerDuty()` attribute in your TICKscripts to send alerts to a PagerDuty or +define a PagerDuty v1 handler that subscribes to a topic and sends published +alerts to PagerDuty. + +The examples below use the following PagerDuty v1 configuration defined in the `kapacitor.conf`: + +_**PagerDuty v1 settings in kapacitor.conf**_ +```toml +[pagerduty] + enabled = true + service-key = "myservicekey" + url = "https://events.pagerduty.com/generic/2010-04-15/create_event.json" + global = false +``` + +### Send alerts to PagerDuty from a TICKscript + +The following TICKscript uses the `.pagerDuty()` event handler to send the +message, "Hey, check your CPU", whenever idle CPU usage drops below 10%. + +_**pagerduty-cpu-alert.tick**_ +```js +stream + |from() + .measurement('cpu') + |alert() + .crit(lambda: "usage_idle" < 10) + .message('Hey, check your CPU') + .pagerDuty() +``` + +### Send alerts to PagerDuty from a defined handler + +The following setup sends an alert to the `cpu` topic with the message, +"Hey, check your CPU". +A PagerDuty v1 handler is added that subscribes to the `cpu` topic and publishes +all alert messages to PagerDuty. + +Create a TICKscript that publishes alert messages to a topic. +The TICKscript below sends an alert message to the `cpu` topic any time CPU +idle usage drops below 10% _(or CPU usage is above 90%)_. + +_**cpu\_alert.tick**_ +```js +stream + |from() + .measurement('cpu') + |alert() + .crit(lambda: "usage_idle" < 10) + .message('Hey, check your CPU') + .topic('cpu') +``` + +Add and enable the TICKscript: + +```bash +kapacitor define cpu_alert -tick cpu_alert.tick +kapacitor enable cpu_alert +``` + +Create a handler file that subscribes to the `cpu` topic and uses the PagerDuty +v1 event handler to send alerts to PagerDuty. + +_**pagerduty\_cpu\_handler.yaml**_ +```yaml +topic: cpu +id: pagerduty-cpu-alert +kind: pagerduty +options: + service-key: 'myservicekey' +``` + +Add the handler: + +```bash +kapacitor define-topic-handler pagerduty_cpu_handler.yaml +``` diff --git a/content/kapacitor/v1.5/event_handlers/pagerduty/v2.md b/content/kapacitor/v1.5/event_handlers/pagerduty/v2.md new file mode 100644 index 000000000..f778d45fc --- /dev/null +++ b/content/kapacitor/v1.5/event_handlers/pagerduty/v2.md @@ -0,0 +1,164 @@ +--- +title: PagerDuty v2 event handler +description: The PagerDuty v2 event handler allows you to send Kapacitor alerts to PagerDuty. This page includes configuration options and usage examples. +aliases: + - /kapacitor/v1.5/event_handlers/pagerduty/ + +menu: + kapacitor_1_5_ref: + name: PagerDuty + weight: 1000 + parent: Event handlers +--- + +[PagerDuty](https://www.pagerduty.com/) is an incident management platform that +helps teams detect and fix infrastructure problems quickly. +Kapacitor can be configured to send alert messages to PagerDuty. + +> This page is specific to PagerDuty's v2 API. If still using their v1 API, view +> the [PagerDuty v1 event handler](/kapacitor/v1.5/event_handlers/pagerduty/v1/) documentation. + +## Configuration +Configuration as well as default [option](#options) values for the PagerDuty v2 +event handler are set in your `kapacitor.conf`. +Below is an example configuration: + +```toml +[pagerduty2] + enabled = true + routing-key = "" + url = "https://events.pagerduty.com/v2/enqueue" + global = false +``` + +#### `enabled` +Set to `true` to enable the PagerDuty v2 event handler. + +#### `routing-key` +Your [PagerDuty Routing Key](https://support.pagerduty.com/docs/services-and-integrations). + +#### `url` +The PagerDuty API v2 URL. _**This should not need to be changed.**_ + +#### `global` +If `true`, all alerts will be sent to PagerDuty without explicitly specifying +PagerDuty in TICKscripts. + + +## Options +The following PagerDuty v2 event handler options can be set in a +[handler file](/kapacitor/v1.5/event_handlers/#handler-file) or when using +`.pagerDuty2()` in a TICKscript. + +| Name | Type | Description | +| ---- | ---- | ----------- | +| routing-key | string | The PagerDuty routing key to use for the alert. | +| link | strings | A custom link put in the `links` field of the body sent to the PagerDuty API. | + +### Example: handler file +```yaml +id: handler-id +topic: topic-name +kind: pagerduty2 +options: + routing-key: 'myroutingkey' + links: + - href: 'https://chronograf.example.com/sources/1/dashboards/2' + text: 'Overview Dashboard' + - href: 'https://chronograf.example.com/' +``` + +### Example: TICKscript +```js +|alert() + // ... + .pagerDuty2() + .routingKey('myroutingkey') + .link('https://chronograf.example.com/sources/1/dashboards/2', 'Overview Dashboard') + .link('https://chronograf.example.com/') +``` + +## PagerDuty Setup +To allow Kapacitor to send alerts to PagerDuty +[enable a new "Generic API" integration](https://support.pagerduty.com/docs/services-and-integrations#section-create-a-generic-events-api-integration). +Use the generated "Integration Key" as the `routing-key` under the `[pagerduty2]` +section of your `kapacitor.conf`. + +## Using the PagerDuty v2 Event Handler +With the PagerDuty v2 event handler enabled in your `kapacitor.conf`, use the +`.pagerDuty2()` attribute in your TICKscripts to send alerts to a PagerDuty or +define a PagerDuty v2 handler that subscribes to a topic and sends published +alerts to PagerDuty. + +The examples below use the following PagerDuty v2 configuration defined in the `kapacitor.conf`: + +_**PagerDuty v2 settings in kapacitor.conf**_ +```toml +[pagerduty2] + enabled = true + routing-key = "myroutingkey" + url = "https://events.pagerduty.com/v2/enqueue" + global = false +``` + +### Send alerts to PagerDuty from a TICKscript + +The following TICKscript uses the `.pagerDuty2()` event handler to send the +message, "Hey, check your CPU", whenever idle CPU usage drops below 10%. + +_**pagerduty2-cpu-alert.tick**_ +```js +stream + |from() + .measurement('cpu') + |alert() + .crit(lambda: "usage_idle" < 10) + .message('Hey, check your CPU') + .pagerDuty2() +``` + +### Send alerts to PagerDuty from a defined handler + +The following setup sends an alert to the `cpu` topic with the message, "Hey, +check your CPU". A PagerDuty v2 handler is added that subscribes to the `cpu` +topic and publishes all alert messages to PagerDuty. + +Create a TICKscript that publishes alert messages to a topic. +The TICKscript below sends an alert message to the `cpu` topic any time CPU +idle usage drops below 10% _(or CPU usage is above 90%)_. + +_**cpu\_alert.tick**_ +```js +stream + |from() + .measurement('cpu') + |alert() + .crit(lambda: "usage_idle" < 10) + .message('Hey, check your CPU') + .topic('cpu') +``` + +Add and enable the TICKscript: + +```bash +kapacitor define cpu_alert -tick cpu_alert.tick +kapacitor enable cpu_alert +``` + +Create a handler file that subscribes to the `cpu` topic and uses the PagerDuty v2 +event handler to send alerts to PagerDuty. + +_**pagerduty2\_cpu\_handler.yaml**_ +```yaml +topic: cpu +id: pagerduty2-cpu-alert +kind: pagerduty2 +options: + routing-key: 'myroutingkey' +``` + +Add the handler: + +```bash +kapacitor define-topic-handler pagerduty2_cpu_handler.yaml +``` diff --git a/content/kapacitor/v1.5/event_handlers/post.md b/content/kapacitor/v1.5/event_handlers/post.md new file mode 100644 index 000000000..7b4ab2d9d --- /dev/null +++ b/content/kapacitor/v1.5/event_handlers/post.md @@ -0,0 +1,361 @@ +--- +title: Post event handler +description: The "post" event handler allows you to POST Kapacitor alert data to an HTTP endpoint. This page includes configuration options and usage examples. +menu: + kapacitor_1_5_ref: + name: Post + weight: 1100 + parent: Event handlers +--- + +The post event handler posts JSON encoded data to an HTTP endpoint. + +## Configuration +Configuration as well as default [option](#options) values for the post event +handler are set in your `kapacitor.conf`. +Below is an example configuration: + +### Post Settings in kapacitor.conf +```toml +[[httppost]] + endpoint = "example" + url = "http://example.com/path" + headers = { Example = "your-key" } + basic-auth = { username = "my-user", password = "my-pass" } + alert-template = "{{.Message}}:{{range .Data.Series}}{{.Tags}},{{range .Values}}{{.}}{{end}}{{end}}" + alert-template-file = "/path/to/template/file" + row-template = "{{.Name}} host={{index .Tags \"host\"}}{{range .Values}} {{index . "time"}} {{index . "value"}}{{end}}" + row-template-file = "/path/to/template/file" +``` + +#### `endpoint` +Name of a configured HTTP POST endpoint that acts as an identifier for `[[httppost]]` +configurations when multiple are present. +_Endpoints are identifiers only. They are not appended to HTTP POST URLs._ + +#### `url` +The URL to which the alert data will be posted. + +#### `headers` +Set of extra header values to set on the POST request. + +#### `basic-auth` +Set of authentication credentials to set on the POST request. + +#### `alert-template` +Alert template for constructing a custom HTTP body. +Alert templates are only used with post [alert](/kapacitor/v1.5/nodes/alert_node/) +handlers as they consume alert data. +_Skip to [alert templating](#alert-templates)._ + +#### `alert-template-file` +Absolute path to an alert template file. +_Skip to [alert templating](#alert-templates)._ + +#### `row-template` +Row template for constructing a custom HTTP body. +Row templates are only used with the [httpPost node](/kapacitor/v1.5/nodes/http_post_node/) +pipeline nodes as they consume a row at a time. +_Skip to [row templating](#row-templates)._ + +#### `row-template-file` +Absolute path to a row template file. +_Skip to [row templating](#row-templates)._ + +### Defining configuration options with environment variables +The `endpoint`, `url`, and `headers` configuration options can be defined with +environment variables: + +```bash +KAPACITOR_HTTPPOST_0_ENDPOINT = "example" +KAPACITOR_HTTPPOST_0_URL = "http://example.com/path" +KAPACITOR_HTTPPOST_0_HEADERS_Example1 = "header1" +KAPACITOR_HTTPPOST_0_HEADERS_Example2 = "header2" +``` + +### Configuring and using multiple HTTP POST endpoints +The `kapacitor.conf` supports multiple `[[httppost]]` sections. +The [`endpoint`](#endpoint) configuration option of each acts as a unique identifier for that specific configuration. +To use a specific `[[httppost]]` configuration with the Post alert handler, +specify the endpoint in your [post alert handler file](#example-handler-file-using-a-pre-configured-endpoint), +or [your TICKscript](#example-tickscript-using-a-pre-configured-endpoint). + +_**kapacitor.conf**_ +```toml +[[httppost]] + endpoint = "endpoint1" + url = "http://example-1.com/path" + # ... + +[[httppost]] + endpoint = "endpoint2" + url = "http://example-2.com/path" + # ... +``` + +Multiple HTTP POST endpoint configurations can also be added using environment variables. +Variables values are grouped together using the number in each variable key. + +```bash +KAPACITOR_HTTPPOST_0_ENDPOINT = "example0" +KAPACITOR_HTTPPOST_0_URL = "http://example-0.com/path" +KAPACITOR_HTTPPOST_0_HEADERS_Example1 = "header1" +KAPACITOR_HTTPPOST_0_HEADERS_Example2 = "header2" + +KAPACITOR_HTTPPOST_1_ENDPOINT = "example1" +KAPACITOR_HTTPPOST_1_URL = "http://example-1.com/path" +KAPACITOR_HTTPPOST_1_HEADERS_Example1 = "header1" +KAPACITOR_HTTPPOST_1_HEADERS_Example2 = "header2" +``` + +## Options +The following post event handler options can be set in a +[handler file](/kapacitor/v1.5/event_handlers/#handler-file) or when using +`.post()` in a TICKscript. + +| Name | Type | Description | +| ---- | ---- | ----------- | +| url | string | The URL to which the alert data will be posted. | +| endpoint | string | Name of a HTTP POST endpoint (configured in the `kapacitor.conf`) to use. _Cannot be specified in place of the URL._ | +| headers | map of string to string | Set of extra header values to set on the POST request. | +| capture‑response | bool | If the HTTP status code is not an `2xx` code, read and log the HTTP response. | +| timeout | duration | Timeout for the HTTP POST. | +| skipSSLVerification | bool | Disables SSL verification for the POST request. | + +### Example: Handler file - Using a pre-configured endpoint +```yaml +id: handler-id +topic: topic-name +kind: post +options: + # Using the 'example' endpoint configured in the kapacitor.conf + endpoint: example +``` + +### Example: Handler file - Defining post options "inline" +```yaml +id: handler-id +topic: topic-name +kind: post +options: + # Defining post options "inline" + url: http://example.com/path + headers: + 'Example1': 'example1' + 'Example2': 'example2' + capture-response: true + timeout: 10s + skipSSLVerification: true +``` + +### Example: TICKscript - Using a pre-configured endpoint +```js +|alert() + // ... + // Using the 'example' endpoint configured in the kapacitor.conf + .post() + .endpoint('example') +``` + +### Example: TICKscript - Defining post options "inline" +```js +|alert() + // ... + // Defining post options "inline" + .post('https://example.com/path') + .header('Example1', 'example1') + .header('Example2', 'example2') + .captureResponse() + .timeout(10s) + .skipSSLVerification() +``` + +## Using the Post event handler +The post event handler can be used in both TICKscripts and handler files to post +alert and HTTP POST data to an HTTP endpoint. +The examples below deal with alerts and use the same `[[httppost]]` configuration +defined in the `kapacitor.conf`: + +_**HTTP POST settings in kapacitor.conf**_ +```toml +[[httppost]] + endpoint = "api-alert" + url = "http://mydomain.com/api/alerts" + headers = { From = "alerts@mydomain.com" } + alert-template = "{{.Message}}:{{range .Data.Series}}{{.Tags}},{{range .Values}}{{.}}{{end}}{{end}}" +``` + +### Post alerts from a TICKscript +The following TICKscripts use the `.post()` event handler to post the message, +"Hey, check your CPU", whenever idle CPU usage drops below 10%. + +_**post-cpu-alert.tick**_ +```js +stream + |from() + .measurement('cpu') + |alert() + .crit(lambda: "usage_idle" < 10) + .message('Hey, check your CPU') + .post() + .endpoint('api-alerts') +``` + +If you don't want to use the `[[httppost]]` settings defined in the `kapacitor.conf`, +you can specify your post options inline. + +_**post-cpu-alert.tick**_ +```js +stream + |from() + .measurement('cpu') + |alert() + .crit(lambda: "usage_idle" < 10) + .message('Hey, check your CPU') + .post('https://example.com/path') + .header('Example1', 'example1') + .header('Example2', 'example2') + .captureResponse() + .timeout(10s) + .skipSSLVerification() +``` + + +### Post alerts from a defined handler +The following setup sends an alert to the `cpu` topic with the message, "Hey, +check your CPU". +A post handler is added that subscribes to the `cpu` topic and posts all alert +messages to the url and endpoint defined in the `kapacitor.conf`. + +Create a TICKscript that publishes alert messages to a topic. +The TICKscript below sends an alert message to the `cpu` topic any time idle CPU +usage drops below 10%. + +_**cpu\_alert.tick**_ +```js +stream + |from() + .measurement('cpu') + |alert() + .crit(lambda: "usage_idle" < 10) + .message('Hey, check your CPU') + .topic('cpu') +``` + +Add and enable the TICKscript: + +```bash +kapacitor define cpu_alert -tick cpu_alert.tick +kapacitor enable cpu_alert +``` + +Create a handler file that subscribes to the `cpu` topic and uses the post event +handler to post alerts to an HTTP endpoint. + +_**post\_cpu\_handler.yaml**_ +```yaml +id: post-cpu-alert +topic: cpu +kind: post +options: + url: 'http://example.com/path' + headers: + 'From': 'alert@mydomain.com' +``` + +Add the handler: + +```bash +kapacitor define-topic-handler post_cpu_handler.yaml +``` + + +## Post templating +The post event handler allows you to customize the content and structure of +POSTs with alert and row templates. + +### Alert templates +Alert templates are used to construct a custom HTTP body. +They are only used with post [alert](/kapacitor/v1.5/nodes/alert_node/) handlers +as they consume alert data. +Templates are defined either inline in the `kapacitor.conf` using the +[`alert-template`](#alert-template) configuration or in a separate file and referenced +using the [`alert-template-file`](#alert-template-file) config. + +Alert templates use [Golang Template](https://golang.org/pkg/text/template/) and +have access to the following fields: + +| Field | Description | +| ----- | ----------- | +| .ID | The unique ID for the alert. | +| .Message | The message of the alert. | +| .Details | The details of the alert. | +| .Time | The time the alert event occurred. | +| .Duration | The duration of the alert event. | +| .Level | The level of the alert, i.e INFO, WARN, or CRITICAL. | +| .Data | The data that triggered the alert. | +| .PreviousLevel | The previous level of the alert, i.e INFO, WARN, or CRITICAL. | +| .Recoverable | Indicates whether or not the alert is auto-recoverable. | + +#### Inline alert template +_**kapacitor.conf**_ +```toml +[[httppost]] + endpoint = "example" + url = "http://example.com/path" + alert-template = "{{.Message}}:{{range .Data.Series}}{{.Tags}},{{range .Values}}{{.}}{{end}}{{end}}" +``` + +#### Alert template file +_**kapacitor.conf**_ +```toml +[[httppost]] + endpoint = "example" + url = "http://example.com/path" + alert-template-file = "/etc/templates/alert.html" +``` + +_**/etc/templates/alert.html**_ +```html +{{.Message}}:{{range .Data.Series}}{{.Tags}},{{range .Values}}{{.}}{{end}}{{end}} +``` + +### Row templates +Row templates are used to construct a custom HTTP body. +They are only used with [httpPost](/kapacitor/v1.5/nodes/http_post_node/) +handlers as they consume a row at a time. +Templates are defined either inline in the `kapacitor.conf` using the +[`row-template`](#row-template) configuration or in a separate file and referenced +using the [`row-template-file`](#row-template-file) config. + +Row templates use [Golang Template](https://golang.org/pkg/text/template/) and +have access to the following fields: + +| Field | Description | +| ----- | ----------- | +| .Name | The measurement name of the data stream | +| .Tags | A map of tags on the data. | +| .Values | A list of values; each a map containing a "time" key for the time of the point and keys for all other fields on the point. | + +#### Inline row template +_**kapacitor.conf**_ +```toml +[[httppost]] + endpoint = "example" + url = "http://example.com/path" + row-template = '{{.Name}} host={{index .Tags "host"}}{{range .Values}} {{index . "time"}} {{index . "value"}}{{end}}' +``` + +#### Row template file +_**kapacitor.conf**_ +```toml +[[httppost]] + endpoint = "example" + url = "http://example.com/path" + row-template-file = "/etc/templates/row.html" +``` + +_**/etc/templates/row.html**_ +```html +{{.Name}} host={{index .Tags \"host\"}}{{range .Values}} {{index . "time"}} {{index . "value"}}{{end}} +``` diff --git a/content/kapacitor/v1.5/event_handlers/publish.md b/content/kapacitor/v1.5/event_handlers/publish.md new file mode 100644 index 000000000..f4febe87b --- /dev/null +++ b/content/kapacitor/v1.5/event_handlers/publish.md @@ -0,0 +1,78 @@ +--- +title: Publish event handler +description: The "publish" event handler allows you to publish Kapacitor alerts messages to mulitple Kapacitor topics. This page includes options and usage examples. +menu: + kapacitor_1_5_ref: + name: Publish + weight: 1200 + parent: Event handlers +--- + +The publish event handler publishes events to another topic. + +## Options +The following publish event handler options can be set in a +[handler file](/kapacitor/v1.5/event_handlers/#handler-file). + +| Name | Type | Description | +| ---- | ---- | ----------- | +| topics | list of string | List of topic names to publish events. | + +### Example: handler file +```yaml +id: handler-id +topic: topic-name +kind: publish +options: + topics: + - system + - ops_team +``` + +## Using the publish event handler +The following setup sends an alert to the `cpu` topic with the message, +"Hey, check your CPU". +A publish handler is added that subscribes to the `cpu` topic and publishes new +alerts to other topics. + +Create a TICKscript that publishes alert messages to a topic. +The TICKscript below sends an alert message to the `cpu` topic any time idle CPU +usage drops below 10%. + +_**cpu\_alert.tick**_ +```js +stream + |from() + .measurement('cpu') + |alert() + .crit(lambda: "usage_idle" < 10) + .message('Hey, check your CPU') + .topic('cpu') +``` + +Add and enable the TICKscript: + +```bash +kapacitor define cpu_alert -tick cpu_alert.tick +kapacitor enable cpu_alert +``` + +Create a handler file that subscribes to the `cpu` topic and uses the publish +event handler to publish alerts to other topics. + +_**publish\_cpu\_alerts\_handler.yaml**_ +```yaml +id: publish-cpu-alert +topic: cpu +kind: publish +options: + topics: + - system + - ops_team +``` + +Add the handler: + +```bash +kapacitor define-topic-handler publish_cpu_alerts_handler.yaml +``` diff --git a/content/kapacitor/v1.5/event_handlers/pushover.md b/content/kapacitor/v1.5/event_handlers/pushover.md new file mode 100644 index 000000000..f24f4b36f --- /dev/null +++ b/content/kapacitor/v1.5/event_handlers/pushover.md @@ -0,0 +1,165 @@ +--- +title: Pushover event handler +description: The Pushover event handler allows you to send Kapacitor alerts to Pushover. This page includes configuration options and usage examples. +menu: + kapacitor_1_5_ref: + name: Pushover + weight: 1300 + parent: Event handlers +--- + +[Pushover](https://pushover.net/) is a service that sends instant push +notifications to phone and tablets. +Kapacitor can be configured to send alert messages to Pushover. + +## Configuration +Configuration as well as default [option](#options) values for the Pushover +event handler are set in your `kapacitor.conf`. +Below is an example configuration: + +```toml +[pushover] + enabled = true + token = "mysupersecrettoken" + user-key = "myuserkey" + url = "https://api.pushover.net/1/messages.json" +``` + +#### `enabled` +Set to `true` to enable the Pushover event handler. + +#### `token` +Your Pushover API token. + +#### `user-key` +Your Pushover USER_TOKEN. + +#### `url` +The URL for the Pushover API. _**This should not need to be changed.**_ + +## Options +The following Pushover event handler options can be set in a +[handler file](/kapacitor/v1.5/event_handlers/#handler-file) or when using +`.pushover()` in a TICKscript. + +| Name | Type | Description | +| ---- | ---- | ----------- | +| device | string | Specific list of users' devices rather than all of a users' devices. Multiple device names may be separated by a comma. | +| title | string | The message title. By default, the app's name is used. | +| url | string | A supplementary URL to show with the message. | +| url-title | string | A title for a supplementary URL, otherwise just the URL is shown. | +| sound | string | The name of one of the sounds supported by the device clients to override the user's default sound choice. | + + +### Example: handler file +```yaml +id: handler-id +topic: topic-name +kind: pushover +options: + device: device1, device2, device3 + title: Alert from Kapacitor + url: http://example.com + url-title: This is an example title + sound: siren +``` + +### Example: TICKscript +```js +|alert() + // ... + .pushover() + .device('device1, device2, device3') + .title('Alert from Kapacitor') + .URL('http://example.com') + .URLTitle('This is an example title') + .sound('siren') +``` + +### Pushover Priority Levels +Pushover expects priority levels with each alert. +Kapacitor alert levels are mapped to the following priority levels: + +| Alert Level | Priority Level | +| ----------- | -------------- | +| **OK** | -2 priority level. | +| **Info** | -1 priority level. | +| **Warning** | 0 priority level. | +| **Critical** | 1 priority level. | + +## Pushover Setup +[Register your application with Pushover](https://pushover.net/apps/build) to +get a Pushover token. +Include the token in the `[pushover]` configuration section of your `kapacitor.conf`. + +## Using the Pushover event handler +With the Pushover event handler enabled and configured in your `kapacitor.conf`, +use the `.pushover()` attribute in your TICKscripts to send alerts to Pushover +or define a Pushover handler that subscribes to a topic and sends published +alerts to Pushover. + +### Send alerts to Pushover from a TICKscript + +The following TICKscript sends the message, "Hey, check your CPU", to Pushover +whenever idle CPU usage drops below 10% using the `.pushover()` event handler. + +_**pushover-cpu-alert.tick**_ +```js +stream + |from() + .measurement('cpu') + |alert() + .crit(lambda: "usage_idle" < 10) + .message('Hey, check your CPU') + .pushover() + .title('Alert from Kapacitor') + .sound('siren') +``` + +### Send alerts to Pushover from a defined handler + +The following setup sends an alert to the `cpu` topic with the message, "Hey, +check your CPU". +A Pushover handler is added that subscribes to the `cpu` topic and publishes all +alert messages to Pushover. + +Create a TICKscript that publishes alert messages to a topic. +The TICKscript below sends an alert message to the `cpu` topic any time idle CPU +usage drops below 10%. + +_**cpu\_alert.tick**_ +```js +stream + |from() + .measurement('cpu') + |alert() + .crit(lambda: "usage_idle" < 10) + .message('Hey, check your CPU') + .topic('cpu') +``` + +Add and enable the TICKscript: + +```bash +kapacitor define cpu_alert -tick cpu_alert.tick +kapacitor enable cpu_alert +``` + +Create a handler file that subscribes to the `cpu` topic and uses the Pushover +event handler to send alerts to Pushover. + +_**pushover\_cpu\_handler.yaml**_ +```yaml +id: pushover-cpu-alert +topic: cpu +kind: pushover +options: + title: Alert from Kapacitor + sound: siren +``` + +Add the handler: + +```bash +kapacitor define-topic-handler pushover_cpu_handler.yaml +``` diff --git a/content/kapacitor/v1.5/event_handlers/sensu.md b/content/kapacitor/v1.5/event_handlers/sensu.md new file mode 100644 index 000000000..14b4d72e5 --- /dev/null +++ b/content/kapacitor/v1.5/event_handlers/sensu.md @@ -0,0 +1,158 @@ +--- +title: Sensu event handler +description: The Sensu event handler allows you to send Kapacitor alerts to Sensu. This page includes configuration options and usage examples. +menu: + kapacitor_1_5_ref: + name: Sensu + weight: 1400 + parent: Event handlers +--- + +[Sensu](https://sensu.io/) is a service that provides infrastructure, service, +and application monitoring as well as other metrics. +Kapacitor can be configured to send alert messages to Sensu. + +## Configuration +Configuration as well as default [option](#options) values for the Sensu event +handler are set in your `kapacitor.conf`. +Below is an example configuration: + +```toml +[sensu] + enabled = true + addr = "sensu-client:3030" + source = "Kapacitor" + handlers = ["hander1-name", "handler2-name"] +``` + +#### `enabled` +Set to `true` to enable the Sensu event handler. + +#### `addr` +The Sensu Client `host:port` address. + +#### `source` +Default "Just-in-Time" (JIT) source. + +#### `handlers` +List of [Sensu handlers](https://docs.sensu.io/sensu-core/1.3/guides/intro-to-handlers/) to use. + + +## Options +The following Sensu event handler options can be set in a +[handler file](/kapacitor/v1.5/event_handlers/#handler-file) or when using +`.sensu()` in a TICKscript. + +| Name | Type | Description | +| ---- | ---- | ----------- | +| source | string | Sensu source for which to post messages. | +| handlers | list of strings | Sensu handler list. If empty, uses the handler list from the configuration. | +| metadata | map of key value pairs | Adds key values pairs to the Sensu API request. | + +### Example: handler file +```yaml +id: handler-id +topic: topic-name +kind: sensu +options: + source: Kapacitor + handlers: + - handler1-name + - handler2-name + metadata: + key1: value1 + key2: 5 + key3: 5.0 +``` + +### Example: TICKscript +```js +|alert() + // ... + .sensu() + .source('Kapacitor') + .handlers('handler1-name', 'handler2-name') + .metadata('key1', 'value1') + .metadata('key2', 5) + .metadata('key3', 5.0) +``` + +## Using the Sensu event handler +With the Sensu event handler enabled and configured in your `kapacitor.conf`, +use the `.sensu()` attribute in your TICKscripts to send alerts to Sensu or +define a Sensu handler that subscribes to a topic and sends published alerts +to Sensu. + +_**Sensu settings in kapacitor.conf**_ +```toml +[sensu] + enabled = true + addr = "123.45.67.89:3030" + source = "Kapacitor" + handlers = ["tcp", "transport"] +``` + +### Send alerts to Sensu from a TICKscript +The following TICKscript uses the `.sensu()` event handler to send the message, +"Hey, check your CPU", to Sensu whenever idle CPU usage drops below 10%. + +_**sensu-cpu-alert.tick**_ +```js +stream + |from() + .measurement('cpu') + |alert() + .crit(lambda: "usage_idle" < 10) + .message('Hey, check your CPU') + .sensu() +``` + +### Send alerts to Sensu from a defined handler + +The following setup sends an alert to the `cpu` topic with the message, +"Hey, check your CPU". +A Sensu handler is added that subscribes to the `cpu` topic and publishes all +alert messages to Sensu. + +Create a TICKscript that publishes alert messages to a topic. +The TICKscript below sends an alert message to the `cpu` topic any time idle CPU +usage drops below 10%. + +_**cpu\_alert.tick**_ +```js +stream + |from() + .measurement('cpu') + |alert() + .crit(lambda: "usage_idle" < 10) + .message('Hey, check your CPU') + .topic('cpu') +``` + +Add and enable the TICKscript: + +```bash +kapacitor define cpu_alert -tick cpu_alert.tick +kapacitor enable cpu_alert +``` + +Create a handler file that subscribes to the `cpu` topic and uses the Sensu +event handler to send alerts to Sensu. + +_**sensu\_cpu\_handler.yaml**_ +```yaml +id: sensu-cpu-alert +topic: cpu +kind: sensu +options: + source: Kapacitor + handlers: + - tcp + - transport +``` + +Add the handler: + +```bash +kapacitor define-topic-handler sensu_cpu_handler.yaml +``` diff --git a/content/kapacitor/v1.5/event_handlers/slack.md b/content/kapacitor/v1.5/event_handlers/slack.md new file mode 100644 index 000000000..8d5248f7b --- /dev/null +++ b/content/kapacitor/v1.5/event_handlers/slack.md @@ -0,0 +1,297 @@ +--- +title: Slack event handler +description: The Slack event handler allows you to send Kapacitor alerts to Slack. This page includes configuration options and usage examples. +menu: + kapacitor_1_5_ref: + name: Slack + weight: 1500 + parent: Event handlers +--- + +[Slack](https://slack.com) is a widely used "digital workspace" that facilitates +communication among team members. +Kapacitor can be configured to send alert messages to Slack. + +## Configuration +Configuration as well as default [option](#options) values for the Slack event +handler are set in your `kapacitor.conf`. +Below is an example configuration: + +```toml +[[slack]] + enabled = true + default = true + workspace = "example.slack.com" + url = "https://hooks.slack.com/xxxx/xxxx/xxxx" + channel = "#alerts" + username = "kapacitor" + global = false + state-changes-only = false + ssl-ca = "/path/to/ca.crt" + ssl-cert = "/path/to/cert.crt" + ssl-key = "/path/to/private-key.key" + insecure-skip-verify = false +``` + +> Multiple Slack clients may be configured by repeating `[[slack]]` sections. +The `workspace` acts as a unique identifier for each configured Slack client. + +#### `enabled` +Set to `true` to enable the Slack event handler. + +#### `default` +Identify one of the Slack configurations as the default if there are multiple +Slack configurations. + +#### `workspace` +The Slack workspace ID. +This can be any string that identifies this particular Slack configuration. +A logical choice is the name of the Slack workspace, e.g. `.slack.com`. + +#### `url` +The Slack webhook URL. This can be obtained by adding an Incoming Webhook integration. +Login to your Slack workspace in your browser and +[add a new webhook](https://slack.com/services/new/incoming-webhook) for Kapacitor. +Slack will provide you the webhook URL. + +#### `channel` +Default channel for messages. + +#### `username` +The username of the Slack bot. + +#### `global` +If true all the alerts will be sent to Slack without explicitly specifying Slack +in the TICKscript. + +#### `state-changes-only` +Sets all alerts in state-changes-only mode, meaning alerts will only be sent if +the alert state changes. +_Only applies if `global` is `true`._ + +#### `ssl-ca` +Path to certificate authority file. + +#### `ssl-cert` +Path to host certificate file. + +#### `ssl-key` +Path to certificate private key file. + +#### `insecure-skip-verify` +Use SSL but skip chain and host verification. +_This is necessary if using a self-signed certificate._ + +## Options +The following Slack event handler options can be set in a +[handler file](/kapacitor/v1.5/event_handlers/#handler-file) or when using +`.slack()` in a TICKscript. + +| Name | Type | Description | +| ---- | ---- | ----------- | +| workspace | string | Specifies which Slack configuration to use when there are multiple. | +| channel | string | Slack channel in which to post messages. If empty uses the channel from the configuration. | +| username | string | Username of the Slack bot. If empty uses the username from the configuration. | +| icon-emoji | string | IconEmoji is an emoji name surrounded in ':' characters. The emoji image will replace the normal user icon for the slack bot. | + +### Example: handler file +```yaml +id: handler-id +topic: topic-name +kind: slack +options: + workspace: 'workspace.slack.com' + channel: '#alerts' + username: 'kapacitor' + icon-emoji: ':smile:' +``` + +### Example: TICKscript +```js +|alert() + // ... + .slack() + .workspace('workspace.slack.com') + .channel('#alerts') + .username('kapacitor') + .iconEmoji(':smile:') +``` + +## Slack Setup +To allow Kapacitor to send alerts to Slack, login to your Slack workspace and +[create a new incoming webhook](https://slack.com/services/new/incoming-webhook ) +for Kapacitor. Add the generated webhook URL as the `url` in the `[[slack]]` +configuration section of your `kapacitor.conf`. + +## Using the Slack event handler +With one or more Slack event handlers enabled and configured in your +`kapacitor.conf`, use the `.slack()` attribute in your TICKscripts to send +alerts to Slack or define a Slack handler that subscribes to a topic and sends +published alerts to Slack. + +> To avoid posting a message every alert interval, use +> [AlertNode.StateChangesOnly](/kapacitor/v1.5/nodes/alert_node/#statechangesonly) +> so only events where the alert changed state are sent to Slack. + +The examples below use the following Slack configurations defined in the `kapacitor.conf`: + +_**Slack settings in kapacitor.conf**_ +```toml +[[slack]] + enabled = true + default = true + workspace = "alerts" + url = "https://hooks.slack.com/xxxx/xxxx/example1" + channel = "#alerts" + username = "AlertBot" + global = false + state-changes-only = false + +[[slack]] + enabled = true + default = false + workspace = "error-reports" + url = "https://hooks.slack.com/xxxx/xxxx/example2" + channel = "#error-reports" + username = "StatsBot" + global = false + state-changes-only = false +``` + +### Send alerts to Slack from a TICKscript +The following TICKscript uses the `.slack()` event handler to send the message, +"Hey, check your CPU", to the `#alerts` Slack channel whenever idle CPU usage +drops below 20%. + +_**slack-cpu-alert.tick**_ +```js +stream + |from() + .measurement('cpu') + |alert() + .warn(lambda: "usage_idle" < 20) + .stateChangesOnly() + .message('Hey, check your CPU') + .slack() + .iconEmoji(':exclamation:') +``` + +### Send alerts to Slack from a defined handler + +The following setup sends an alert to the `cpu` topic with the message, +"Hey, check your CPU". +A Slack handler is added that subscribes to the `cpu` topic and publishes all +alert messages to Slack. + +Create a TICKscript that publishes alert messages to a topic. +The TICKscript below sends an critical alert message to the `cpu` topic any time +idle CPU usage drops below 5%. + +_**cpu\_alert.tick**_ +```js +stream + |from() + .measurement('cpu') + |alert() + .crit(lambda: "usage_idle" < 5) + .stateChangesOnly() + .message('Hey, check your CPU') + .topic('cpu') +``` + +Add and enable the TICKscript: + +```bash +kapacitor define cpu_alert -tick cpu_alert.tick +kapacitor enable cpu_alert +``` + +Create a handler file that subscribes to the `cpu` topic and uses the Slack +event handler to send alerts to Slack. This handler using the non-default Slack +handler, "critical-alerts", which sends messages to the #critical-alerts channel +in Slack. + +_**slack\_cpu\_handler.yaml**_ +```yaml +id: slack-cpu-alert +topic: cpu +kind: slack +options: + workspace: 'alerts' + icon-emoji: ':fire:' +``` + +Add the handler: + +```bash +kapacitor define-topic-handler slack_cpu_handler.yaml +``` + +### Using multiple Slack configurations +Kapacitor can use multiple Slack integrations, each identified by the value of +the [`workspace`](#workspace) config. The TICKscript below illustrates how +multiple Slack integrations can be used. + +In the `kapacitor.conf` [above](#using-the-slack-event-handler), there are two +Slack configurations; one for alerts and the other for daily stats. The +`workspace` configuration for each Slack configuration act as a unique identifiers. + +The following TICKscript sends alerts to the `alerts` Slack workspace. + +_**slack-cpu-alert.tick**_ +```js +stream + |from() + .measurement('cpu') + |alert() + .crit(lambda: "usage_idle" < 5) + .stateChangesOnly() + .message('Hey, I think the machine is on fire.') + .slack() + .workspace('alerts') + .iconEmoji(':fire:') +``` + +Error rates are also being stored in the same InfluxDB instance and we want to +send daily reports of `500` errors to the `error-reports` Slack workspace. +The following TICKscript collects `500` error occurances and publishes them to +the `500-errors` topic. + +_**500_errors.tick**_ +```js +stream + |from() + .measurement('errors') + .groupBy('500') + |alert() + .info(lamda: 'count' > 0) + .noRecoveries() + .topic('500-errors') +``` + +Below is an [aggregate](/kapacitor/v1.5/event_handlers/aggregate/) handler that +subscribes to the `500-errors` topic, aggregates the number of 500 errors over a +24 hour period, then publishes an aggregate message to the `500-errors-24h` topic. + +_**500\_errors\_24h.yaml**_ +```yaml +id: 500-errors-24h +topic: 500-errors +kind: aggregate +options: + interval: 24h + topic: 500-errors-24h + message: '{{ .Count }} 500 errors last 24 hours.' +``` + +Last, but not least, a Slack handler that subscribes to the `500-errors-24h` +topic and publishes aggregated count messages to the `error-reports` Slack workspace: + +_**slack\_500\_errors\_daily.yaml**_ +```yaml +id: slack-500-errors-daily +topic: 500-errors-24h +kind: slack +options: + workspace: error-reports +``` diff --git a/content/kapacitor/v1.5/event_handlers/snmptrap.md b/content/kapacitor/v1.5/event_handlers/snmptrap.md new file mode 100644 index 000000000..d9faf8362 --- /dev/null +++ b/content/kapacitor/v1.5/event_handlers/snmptrap.md @@ -0,0 +1,163 @@ +--- +title: SNMP trap event handler +description: The "snmptrap" event handler allows you to send Kapacitor alerts SNMP traps. This page includes configuration options and usage examples. +menu: + kapacitor_1_5_ref: + name: SNMP Trap + weight: 1600 + parent: Event handlers +--- + +The SNMP trap event handler sends alert messages as SNMP traps. + +## Configuration +Configuration as well as default [option](#options) values for the SNMP trap +event handler are set in your `kapacitor.conf`. +Below is an example configuration: + +```toml +[snmptrap] + enabled = true + addr = "localhost:162" + community = "kapacitor" + retries = 1 +``` + +#### `enabled` +Set to `true` to enable the SNMP trap event handler. + +#### `addr` +The `host:port` address of the SNMP trap server. + +#### `community` +The community to use for traps. + +#### `retries` +Number of retries when sending traps. + + +## Options +The following SNMP trap event handler options can be set in a +[handler file](/kapacitor/v1.5/event_handlers/#handler-file) or when using +`.snmpTrap()` in a TICKscript. + +| Name | Type | Description | +| ---- | ---- | ----------- | +| trap-oid | string | OID of the trap. | +| data-list | object | Each data object has `oid`, `type`, and `value` fields. Each field is a string. | + +### SNMP Trap Data Types +The SNMP trap event handler supports the following data types: + +| Abbreviation | Datatype | +| ------------ | -------- | +| c | Counter | +| i | Integer | +| n | Null | +| s | String | +| t | Time ticks | + +### Example: handler file +```yaml +id: handler-id +topic: topic-name +kind: snmptrap +options: + trap-oid: 1.3.6.1.4.1.1 + data-list: + - oid: 1.3.6.1.4.1.1.5 + type: s + value: '{{ .Level }}' + - oid: 1.3.6.1.4.1.1.6 + type: i + value: 50 + - oid: 1.3.6.1.4.1.1.7 + type: c + value: '{{ index .Fields "num_requests" }}' + - oid: 1.3.6.1.4.1.1.8 + type: s + value: '{{ .Message }}' +``` + +### Example: TICKscript +```js +|alert() + // ... + .snmpTrap('1.3.6.1.4.1.1') + .data('1.3.6.1.4.1.1.5', 's', '{{ .Level }}') + .data('1.3.6.1.4.1.1.6', 'i', '50') + .data('1.3.6.1.4.1.1.7', 'c', '{{ index .Fields "num_requests" }}') + .data('1.3.6.1.4.1.1.8', 's', '{{ .Message }}') +``` + +## Using the SNMP trap event handler +The SNMP trap event handler can be used in both TICKscripts and handler files +to send alerts as SNMP traps. + +### Sending SNMP traps from a TICKscript + +The following TICKscript uses the `.snmptrap()` event handler to send alerts as +SNMP traps whenever idle CPU usage drops below 10%. + +_**snmptrap-cpu-alert.tick**_ +```js +stream + |from() + .measurement('cpu') + |alert() + .crit(lambda: "usage_idle" < 10) + .message('Hey, check your CPU') + .snmpTrap('1.3.6.1.2.1.1') + .data('1.3.6.1.2.1.1.7', 'i', '{{ index .Field "value" }}') +``` + +### Publish to multiple topics from a defined handler + +The following setup sends an alert to the `cpu` topic with the message, +"Hey, check your CPU". +An SNMP trap handler is added that subscribes to the `cpu` topic and sends new +alerts as SNMP traps. + +Create a TICKscript that publishes alert messages to a topic. +The TICKscript below sends an alert message to the `cpu` topic any time idle CPU +usage drops below 10%. + +_**cpu\_alert.tick**_ +```js +stream + |from() + .measurement('cpu') + |alert() + .crit(lambda: "usage_idle" < 10) + .message('Hey, check your CPU') + .topic('cpu') +``` + +Add and enable the TICKscript: + +```bash +kapacitor define cpu_alert -tick cpu_alert.tick +kapacitor enable cpu_alert +``` + +Create a handler file that subscribes to the `cpu` topic and uses the SNMP trap +event handler to send alerts as SNMP traps. + +_**snmptrap\_cpu\_handler.yaml**_ +```yaml +id: snmptrap-cpu-alert +topic: cpu +kind: snmptrap +options: + trap-oid: '1.3.6.1.2.1.1' + data-list: + - oid: '1.3.6.1.2.1.1.7' + type: i + value: '{{ index .Field "value" }}' +``` + +Add the handler: + +```bash +kapacitor define-topic-handler snmptrap_cpu_handler.yaml +``` diff --git a/content/kapacitor/v1.5/event_handlers/talk.md b/content/kapacitor/v1.5/event_handlers/talk.md new file mode 100644 index 000000000..31efebf3e --- /dev/null +++ b/content/kapacitor/v1.5/event_handlers/talk.md @@ -0,0 +1,144 @@ +--- +title: Talk event handler +description: The Talk event handler allows you to send Kapacitor alerts to Talk. This page includes configuration options and usage examples. +menu: + kapacitor_1_5_ref: + name: Talk + weight: 1700 + parent: Event handlers +--- + +[Talk](https://jianliao.com/site) is a service that aggregates information into +a centralized hub. +Kapacitor can be configured to send alert messages to Talk. + +## Conifiguration +Configuration as well as default [option](#options) values for the Talk event +handler are set in your `kapacitor.conf`. +Below is an example configuration: + +```toml +[talk] + enabled = true + url = "https://jianliao.com/v2/services/webhook/uuid" + author_name = "Kapacitor" +``` + +#### `enabled` +Set to `true` to enable the Talk event handler. + +#### `url` +The Talk webhook URL. + +#### `author_name` +The default authorName. + + +## Options +The following Talk event handler options can be set in a +[handler file](/kapacitor/v1.5/event_handlers/#handler-file) or when using +`.talk()` in a TICKscript. + +| Name | Type | Description | +| ---- | ---- | ----------- | +| Title | string | Message title. | +| Text | string | Message text. | + +### Example: handler file +```yaml +id: handler-id +topic: topic-name +kind: talk +options: + title: 'Message Title' + text: 'This is the text included in the message.' +``` + +### Example: TICKscript +```js +|alert() + // ... + .talk() + .title('Message Title') + .text('This is the text included in the message.') +``` + +## Talk Setup +Create a new incoming webhook to allow Kapacitor to send alerts to Talk. + +1. [Sign into your Talk account](https:/account.jianliao.com/signin). +2. Under the "Team" tab, click “Integrations”. +3. Select “Customize service” and click the Incoming Webhook “Add” button. +4. Choose the topic to connect with and click “Confirm Add” button. +5. Once the service is created, you’ll see the “Generate Webhook url”. +6. Place the generated Webhook URL as the `url` in the `[talk]` section of your + `kapacitor.conf`. + +## Using the Talk event handler +With the Talk event handler enabled and configured in your `kapacitor.conf`, +use the `.talk()` attribute in your TICKscripts to send alerts to Talk or define +a Talk handler that subscribes to a topic and sends published alerts to Talk. + +### Send alerts to Talk from a TICKscript + +The following TICKscript sends the message, "Hey, check your CPU", to Talk +whenever idle CPU usage drops below 10% using the `.talk()` event handler. + +_**talk-cpu-alert.tick**_ +```js +stream + |from() + .measurement('cpu') + |alert() + .crit(lambda: "usage_idle" < 10) + .message('Hey, check your CPU') + .talk() + .title('Alert from Kapacitor') +``` + +### Send alerts to Talk from a defined handler + +The following setup sends an alert to the `cpu` topic with the message, +"Hey, check your CPU". +A Talk handler is added that subscribes to the `cpu` topic and publishes all +alert messages to Talk. + +Create a TICKscript that publishes alert messages to a topic. +The TICKscript below sends an alert message to the `cpu` topic any time idle CPU +usage drops below 10%. + +_**cpu\_alert.tick**_ +```js +stream + |from() + .measurement('cpu') + |alert() + .crit(lambda: "usage_idle" < 10) + .message('Hey, check your CPU') + .topic('cpu') +``` + +Add and enable the TICKscript: + +```bash +kapacitor define cpu_alert -tick cpu_alert.tick +kapacitor enable cpu_alert +``` + +Create a handler file that subscribes to the `cpu` topic and uses the Talk event +handler to send alerts to Talk. + +_**talk\_cpu\_handler.yaml**_ +```yaml +id: talk-cpu-alert +topic: cpu +kind: talk +options: + title: Alert from Kapacitor +``` + +Add the handler: + +```bash +kapacitor define-topic-handler talk_cpu_handler.yaml +``` diff --git a/content/kapacitor/v1.5/event_handlers/tcp.md b/content/kapacitor/v1.5/event_handlers/tcp.md new file mode 100644 index 000000000..691ba73e5 --- /dev/null +++ b/content/kapacitor/v1.5/event_handlers/tcp.md @@ -0,0 +1,98 @@ +--- +title: TCP event handler +description: The "tcp" event handler allows you to send Kapacitor alert data to a TCP endpoint. This page includes options and usage examples. +menu: + kapacitor_1_5_ref: + name: TCP + weight: 1800 + parent: Event handlers +--- + +The TCP event handler sends JSON encoded alert data to a TCP endpoint. + +## Options +The following TCP event handler options can be set in a [handler file](/kapacitor/v1.5/event_handlers/#handler-file) or when using `.tcp()` in a TICKscript. + +| Name | Type | Description | +| ---- | ---- | ----------- | +| address | string | Address of TCP endpoint. | + +### Example: handler file +```yaml +id: handler-id +topic: topic-name +kind: tcp +options: + address: 127.0.0.1:7777 +``` + +### Example: TICKscript +```js +|alert() + // ... + .tcp('127.0.0.1:7777') +``` + +## Using the TCP event handler +The TCP event handler can be used in both TICKscripts and handler files to send +alert data to TCP endpoint. + +### Send alert data to a TCP endpoint from a TICKscript +The following TICKscript uses the `.tcp()` event handler to send alert data +whenever idle CPU usage drops below 10%. + +_**tcp-cpu-alert.tick**_ +```js +stream + |from() + .measurement('cpu') + |alert() + .crit(lambda: "usage_idle" < 10) + .message('Hey, check your CPU') + .tcp('127.0.0.1:7777') +``` + +### Send alert data to a TCP endpoint from a defined handler +The following setup sends an alert to the `cpu` topic with the message, +"Hey, check your CPU". A TCP handler is added that subscribes to the `cpu` topic +and sends all alert messages to a TCP endpoint. + +Create a TICKscript that publishes alert messages to a topic. +The TICKscript below sends an alert message to the `cpu` topic any time idle CPU +usage drops below 10%. + +_**cpu\_alert.tick**_ +```js +stream + |from() + .measurement('cpu') + |alert() + .crit(lambda: "usage_idle" < 10) + .message('Hey, check your CPU') + .topic('cpu') +``` + +Add and enable the TICKscript: + +```bash +kapacitor define cpu_alert -tick cpu_alert.tick +kapacitor enable cpu_alert +``` + +Create a handler file that subscribes to the `cpu` topic and uses the TCP event +handler to send alert data to a TCP endpoint. + +_**tcp\_cpu\_handler.yaml**_ +```yaml +id: tcp-cpu-alert +topic: cpu +kind: tcp +options: + address: 127.0.0.1:7777 +``` + +Add the handler: + +```bash +kapacitor define-topic-handler tcp_cpu_handler.yaml +``` diff --git a/content/kapacitor/v1.5/event_handlers/telegram.md b/content/kapacitor/v1.5/event_handlers/telegram.md new file mode 100644 index 000000000..921f7e654 --- /dev/null +++ b/content/kapacitor/v1.5/event_handlers/telegram.md @@ -0,0 +1,321 @@ +--- +title: Telegram event handler +description: The Telegram event handler allows you to send Kapacitor alerts to Telegram. This page includes configuration options and usage examples. +menu: + kapacitor_1_5_ref: + name: Telegram + weight: 1900 + parent: Event handlers +--- + +[Telegram](https://telegram.org/) is a messaging app built with a focus on +security and speed. +Kapacitor can be configured to send alert messages to a Telegram bot. + +## Configuration +Configuration as well as default [option](#options) values for the Telegram +alert handler are set in your `kapacitor.conf`. +Below is an example configuration: + +```toml +[telegram] + enabled = false + url = "https://api.telegram.org/bot" + token = "" + chat-id = "" + parse-mode = "Markdown" + disable-web-page-preview = false + disable-notification = false + global = false + state-changes-only = false +``` + +#### `enabled` +Set to `true` to enable the Telegram event handler. + +#### `url` +The Telegram Bot URL. +_**This should not need to be changed.** + +#### `token` +Telegram bot token. +_[Contact @BotFather](https://telegram.me/botfather) to obtain a bot token._ + +#### `chat-id` +Default recipient for messages. +_[Contact @myidbot](https://telegram.me/myidbot) on Telegram to get an ID._ + +#### `parse-mode` +Specifies the syntax used to format messages. Options are `Markdown` or `HTML` +which allow Telegram apps to show bold, italic, fixed-width text or inline URLs +in alert message. + +#### `disable-web-page-preview` +Disable link previews for links in this message. + +#### `disable-notification` +Sends the message silently. iOS users will not receive a notification. +Android users will receive a notification with no sound. + +#### `global` +If `true`, all alerts will be sent to Telegram without explicitly specifying +Telegram in the TICKscript. + +#### `state-changes-only` +If `true`, alerts will only be sent to Telegram if the alert state changes. +This only applies if the `global` is also set to `true`. + + +## Options +The following Telegram event handler options can be set in a +[handler file](/kapacitor/v1.5/event_handlers/#handler-file) or when using +`.telegram()` in a TICKscript. + +| Name | Type | Description | +| ---- | ---- | ----------- | +| chat-id | string | Telegram user/group ID to post messages to. If empty uses the chati-d from the configuration. | +| parse-mode | string | Parse node, defaults to Markdown. If empty uses the parse-mode from the configuration. | +| disable-web-page-preview | bool | Web Page preview. If empty uses the disable-web-page-preview from the configuration. | +| disable-notification | bool | Disables Notification. If empty uses the disable-notification from the configuration. | + +### Example: handler file +```yaml +topic: topic-name +id: handler-id +kind: telegram +options: + chat-id: '123456789' + parse-mode: 'Markdown' + disable-web-page-preview: false + disable-notification: false +``` + +### Example: TICKscript +```js +|alert() + // ... + .telegram() + .chatId('123456789') + .disableNotification() + .disableWebPagePreview() + .parseMode('Markdown') +``` + + +## Telegram Setup + +### Requirements + +To configure Kapacitor with Telegram, the following is needed: + +* a Telegram bot +* a Telegram API access token +* a Telegram chat ID + + +### Create a Telegram bot +1. Search for the `@BotFather` username in your Telegram application +2. Click `Start` to begin a conversation with `@BotFather` +3. Send `/newbot` to `@BotFather`. `@BotFather` will respond: + + --- + + _Alright, a new bot. How are we going to call it? Please choose a name for your bot._ + + --- + + `@BotFather` will prompt you through the rest of the bot-creation process; + feel free to follow his directions or continue with our version of the steps + below. Both setups result in success! + +4. Send your bot's name to `@BotFather`. Your bot's name can be anything. + + > Note that this is not your bot's Telegram `@username`. You will create the + > username in step 5. + + `@BotFather` will respond: + + --- + + _Good. Now let's choose a username for your bot. It must end in `bot`. + Like this, for example: TetrisBot or tetris\_bot._ + + --- + +5. Send your bot's username to `@BotFather`. `BotFather` will respond: + + --- + + _Done! Congratulations on your new bot. + You will find it at t.me/. + You can now add a description, about section and profile picture for your + bot, see /help for a list of commands. By the way, when you've finished creating + your cool bot, ping our Bot Support if you want a better username for it. + Just make sure the bot is fully operational before you do this._ + + _Use this token to access the HTTP API: + \_ + + _For a description of the Bot API, see this page: + [https://core.telegram.org/bots/api](https://core.telegram.org/bots/api)_ + + --- + +6. Begin a conversation with your bot. + Click on the `t.me/` link in `@BotFather`'s response and click + `Start` at the bottom of your Telegram application. + Your newly-created bot will appear in the chat list on the left side of the application. + + +### Get a Telegram API access token +Telegram's `@BotFather` bot sent you an API access token when you created your bot. +See the `@BotFather` response in step 5 of the previous section for where to find your token. +If you can't find the API access token, create a new token with the following steps +below. + +1. Send `/token` to `@BotFather` +2. Select the relevant bot at the bottom of your Telegram application. + `@BotFather` responds with a new API access token: + + --- + + _You can use this token to access HTTP API: + \_ + + _For a description of the Bot API, see this page: + [https://core.telegram.org/bots/api](https://core.telegram.org/bots/api)_ + + --- + +### Get your Telegram chat ID +1. Paste the following link in your browser. Replace `` with + the API access token that you identified or created in the previous section: + + ``` + https://api.telegram.org/bot/getUpdates?offset=0 + ``` + +2. Send a message to your bot in the Telegram application. + The message text can be anything. + Your chat history must include at least one message to get your chat ID. +3. Refresh your browser. +4. Identify the numerical chat ID by finding the `id` inside the `chat` JSON object. + In the example below, the chat ID is `123456789`. + + ```json + { + "ok":true, + "result":[ + { + "update_id":XXXXXXXXX, + "message":{ + "message_id":2, + "from":{ + "id":123456789, + "first_name":"Mushroom", + "last_name":"Kap" + }, + "chat":{ + "id":123456789, + "first_name":"Mushroom", + "last_name":"Kap", + "type":"private" + }, + "date":1487183963, + "text":"hi" + } + } + ] + } + ``` + + +## Using the Telegram event handler +With the Telegram event handler enabled and configured in your `kapacitor.conf`, +use the `.telegram()` attribute in your TICKscripts to send alerts to your +Telegram bot or define a Telegram handler that subscribes to a topic and sends +published alerts to your Telegram bot. + +> To avoid posting a message every alert interval, use +> [AlertNode.StateChangesOnly](/kapacitor/v1.5/nodes/alert_node/#statechangesonly) +> so only events where the alert changed state are sent to Telegram. + +The examples below use the following Telegram configuration defined in the `kapacitor.conf`: + +_**Telegram settings in kapacitor.conf**_ +```toml +[telegram] + enabled = true + url = "https://api.telegram.org/bot" + token = "mysupersecretauthtoken" + chat-id = "" + parse-mode = "Markdown" + disable-web-page-preview = false + disable-notification = false + global = false + state-changes-only = false +``` + +### Send alerts to a Telegram bot from a TICKscript + +The following TICKscript uses the `.telegram()` event handler to send the message, +"Hey, check your CPU" to a Telegram bot whenever idle CPU usage drops below 10%. +It uses the default Telegram settings defined in the `kapacitor.conf`. + +_**telegram-cpu-alert.tick**_ +```js +stream + |from() + .measurement('cpu') + |alert() + .crit(lambda: "usage_idle" < 10) + .stateChangesOnly() + .message('Hey, check your CPU') + .telegram() +``` + +### Send alerts to the Telegram bot from a defined handler + +The following setup sends the message, "Hey, check your CPU" to a Telgram bot +with the `123456789` chat-ID. + +Create a TICKscript that publishes alert messages to a topic. +The TICKscript below sends an alert message to the `cpu` topic any time CPU +idle usage drops below 10% _(or CPU usage is above 90%)_. + +_**cpu\_alert.tick**_ +```js +stream + |from() + .measurement('cpu') + |alert() + .crit(lambda: "usage_idle" < 10) + .stateChangesOnly() + .message('Hey, check your CPU') + .topic('cpu') +``` + +Add and enable the TICKscript: + +```bash +kapacitor define cpu_alert -tick cpu_alert.tick +kapacitor enable cpu_alert +``` + +Create a handler file that subscribes to the `cpu` topic and uses the Telegram +event handler to send alerts to the `123456789` chat-ID in Telegram. + +_**telegram\_cpu\_handler.yaml**_ +```yaml +id: telegram-cpu-alert +topic: cpu +kind: telegram +options: + chat-id: '123456789' +``` + +Add the handler: + +```bash +kapacitor define-topic-handler telegram_cpu_handler.yaml +``` diff --git a/content/kapacitor/v1.5/event_handlers/victorops.md b/content/kapacitor/v1.5/event_handlers/victorops.md new file mode 100644 index 000000000..5548d4acf --- /dev/null +++ b/content/kapacitor/v1.5/event_handlers/victorops.md @@ -0,0 +1,171 @@ +--- +title: VictorOps event handler +description: The VictorOps event handler allows you to send Kapacitor alerts to VictorOps. This page includes configuration options and usage examples. +menu: + kapacitor_1_5_ref: + name: VictorOps + weight: 2000 + parent: Event handlers +--- + +[VictorOps](https://victorops.com/) is an incident management platform that +provides observability, collaboration, & real-time alerting. +Kapacitor can be configured to send alert messages to VictorOps. + +## Configuration +Configuration as well as default [option](#options) values for the VictorOps +event handler are set in your `kapacitor.conf`. +Below is an example configuration: + +```toml +[victorops] + enabled = true + api-key = "xxxx" + routing-key = "xxxx" + url = "https://alert.victorops.com/integrations/generic/20131114/alert" + json-data = false + global = false +``` + +#### `enabled` +Set to `true` to enable the VictorOps event handler. + +#### `api-key` +Your VictorOps API Key. + +#### `routing-key` +Default VictorOps routing key, can be overridden per alert. + +#### `url` +The VictorOps API URL. _**This should not need to be changed.**_ + +#### `json-data` +Use JSON for the "data" field. + +> New VictorOps installations will want to set this to `true` as it makes +the data that triggered the alert available within VictorOps. +The default is `false` for backwards compatibility. + +#### `global` +If true the all alerts will be sent to VictorOps without explicitly specifying +VictorOps in the TICKscript. +_The routing key can still be overridden._ + + +## Options +The following VictorOpas event handler options can be set in a +[handler file](/kapacitor/v1.5/event_handlers/#handler-file) or when using +`.victorOps()` in a TICKscript. + +| Name | Type | Description | +| ---- | ---- | ----------- | +| routing-key | string | The routing key of the alert event. | + +### Example: handler file +```yaml +id: handler-id +topic: topic-name +kind: victorops +options: + routing-key: ops_team +``` + +### Example: TICKscript +```js +|alert() + // ... + .victorOps() + .routingKey('team_rocket') +``` + +## VictorOps Setup +To allow Kapacitor to send alerts to VictorOps, do the following: + +1. Enable the "Alert Ingestion API" in the "Integrations" section of your + VictorOps dashboard. +2. Use provided API key as the `api-key` in the `[victorops]` section of your + `kapacitor.conf`. + +## Using the VictorOps event handler +With the VictorOps event handler enabled and configured in your `kapacitor.conf`, +use the `.victorOps()` attribute in your TICKscripts to send alerts to VictorOps +or define a VictorOps handler that subscribes to a topic and sends published +alerts to VictorOps. + +The examples below use the following VictorOps configuration defined in the `kapacitor.conf`: + +_**VictorOps settings in kapacitor.conf**_ +```toml +[victorops] + enabled = true + api-key = "mysupersecretapikey" + routing-key = "team_rocket" + url = "https://alert.victorops.com/integrations/generic/20131114/alert" + json-data = true + global = false +``` + +### Send alerts to an VictorOps room from a TICKscript + +The following TICKscript uses the `.victorOps()` event handler to send the +message, "Hey, check your CPU", to VictorOps whenever idle CPU usage drops +below 10%. + +_**victorops-cpu-alert.tick**_ +```js +stream + |from() + .measurement('cpu') + |alert() + .crit(lambda: "usage_idle" < 10) + .message('Hey, check your CPU') + .victorOps() + .routingKey('team_rocket') +``` + +### Send alerts to an VictorOps room from a defined handler + +The following setup sends an alert to the `cpu` topic with the message, +"Hey, check your CPU". +A VictorOps handler is added that subscribes to the `cpu` topic and publishes +all alert messages to VictorOps using default settings defined in the `kapacitor.conf`. + +Create a TICKscript that publishes alert messages to a topic. +The TICKscript below sends an alert message to the `cpu` topic any time idle +CPU usage drops below 10%. + +_**cpu\_alert.tick**_ +```js +stream + |from() + .measurement('cpu') + |alert() + .crit(lambda: "usage_idle" < 10) + .message('Hey, check your CPU') + .topic('cpu') +``` + +Add and enable the TICKscript: + +```bash +kapacitor define cpu_alert -tick cpu_alert.tick +kapacitor enable cpu_alert +``` + +Create a handler file that subscribes to the `cpu` topic and uses the VictorOps +event handler to send alerts VictorOps. + +_**victorops\_cpu\_handler.yaml**_ +```yaml +topic: cpu +id: victorops-cpu-alert +kind: victorops +options: + routing-key: 'team_rocket' +``` + +Add the handler: + +```bash +kapacitor define-topic-handler victorops_cpu_handler.yaml +``` diff --git a/content/kapacitor/v1.5/guides/_index.md b/content/kapacitor/v1.5/guides/_index.md new file mode 100644 index 000000000..e5e9f9d76 --- /dev/null +++ b/content/kapacitor/v1.5/guides/_index.md @@ -0,0 +1,47 @@ +--- +title: Guides +aliases: + - kapacitor/v1.5/examples/ +menu: + kapacitor_1_5: + name: Guides + identifier: guides + weight: 35 +--- + +The following is a list of examples in no particular order that demonstrate some of the features of Kapacitor. +These guides assume you're familiar with the basics of defining, recording, replaying and enabling tasks within Kapacitor. +See the [getting started](/kapacitor/v1.5/introduction/getting-started/) guide if you need a refresher. + +### [Calculating rates across joined series + backfill](/kapacitor/v1.5/guides/join_backfill/) + +Learn how to join two series and calculate a combined results, plus how to perform that operation on historical data. + +### [Live leaderboard of game scores](/kapacitor/v1.5/guides/live_leaderboard/) + +See how you can use Kapacitor to create a live updating leaderboard for a game. + +### [Load directory](/kapacitor/v1.5/guides/load_directory/) + +Put TICKscripts, TICKscript templates, and handler definitions in a directory, +from where they will be loaded when the Kapcitor daemon boots. + +### [Custom anomaly detection](/kapacitor/v1.5/guides/anomaly_detection/) + +Integrate your custom anomaly detection algorithm with Kapacitor. + +### [Continuous Queries](/kapacitor/v1.5/guides/continuous_queries/) + +See how to use Kapacitor as a continuous query engine. + +### [Socket-based UDF](/kapacitor/v1.5/guides/socket_udf/) + +Learn how to write a simple socket-based user-defined function (UDF). + +### [Template tasks](/kapacitor/v1.5/guides/template_tasks/) + +Use task templates to reduce the amount of TICKscripts you need to write. + +### [Reference TICKscripts](/kapacitor/v1.5/guides/reference_scripts/) + +Some examples of TICKscripts built against common Telegraf plugin data. diff --git a/content/kapacitor/v1.5/guides/anomaly_detection.md b/content/kapacitor/v1.5/guides/anomaly_detection.md new file mode 100644 index 000000000..0bdcbe54c --- /dev/null +++ b/content/kapacitor/v1.5/guides/anomaly_detection.md @@ -0,0 +1,742 @@ +--- +title: Custom anomaly detection using Kapacitor +aliases: + - kapacitor/v1.5/examples/anomaly_detection/ +menu: + kapacitor_1_5: + name: Custom anomaly detection + identifier: anomaly_detection + weight: 20 + parent: guides +--- + + +Everyone has their own anomaly detection algorithm, so we have built +Kapacitor to integrate easily with which ever algorithm fits your +domain. Kapacitor calls these custom algorithms UDFs for User Defined +Functions. This guide will walk through the necessary steps for +writing and using your own UDFs within Kapacitor. + +If you haven't already, we recommend following the [getting started +guide](/kapacitor/v1.5/introduction/getting-started/) for Kapacitor +prior to continuing. + +## 3D printing + +If you own or have recently purchased a 3D printer, you may know that +3D printing requires the environment to be at certain temperatures in +order to ensure quality prints. Prints can also take a long time +(some can take more than 24 hours), so you can't just watch the +temperature graphs the whole time to make sure the print is going +well. Also, if a print goes bad early, you want to make sure and stop +it so that you can restart it, and not waste materials on continuing a +bad print. + +Due to the physical limitations of 3D printing, the printer software +is typically designed to keep the temperatures within certain +tolerances. For the sake of argument, let's say that you don't trust +the software to do it's job (or want to create your own), and want to +be alerted when the temperature reaches an abnormal level. + +There are three temperatures when it comes to 3D printing: + +1. The temperature of the hot end (where the plastic is melted before being printed). +2. The temperature of the bed (where the part is being printed). +3. The temperature of the ambient air (the air around the printer). + +All three of these temperatures affect the quality of the print (some +being more important than others), but we want to make sure and track +all of them. + +To keep our anomaly detection algorithm simple, let's compute a +`p-value` for each window of data we receive, and then emit a single +data point with that `p-value`. To compute the `p-value`, we will use +[Welch's t-test](https://en.wikipedia.org/wiki/Welch%27s_t_test). For +a null hypothesis, we will state that a new window is from the same +population as the historical windows. If the `p-value` drops low +enough, we can reject the null hypothesis and conclude that the window +must be from something different than the historical data population, or +_an anomaly_. This is an oversimplified approach, but we are learning +how to write UDFs, not statistics. + +## Writing a user-defined function (UDF) + +Now that we have an idea of what we want to do, let's understand how +Kapacitor wants to communicate with our process. From the [UDF +README](https://github.com/influxdata/kapacitor/tree/master/udf/agent) +we learn that Kapacitor will spawn a process called an `agent`. The +`agent` is responsible for describing what options it has, and then +initializing itself with a set of options. As data is received by the +UDF, the `agent` performs its computation and then returns the +resulting data to Kapacitor. All of this communication occurs over +STDIN and STDOUT using protocol buffers. As of this writing, Kapacitor +has agents implemented in Go and Python that take care of the +communication details and expose an interface for doing the actual +work. For this guide, we will be using the Python agent. + +### The Handler interface + +Here is the Python handler interface for the agent: + +```python +# The Agent calls the appropriate methods on the Handler as requests are read off STDIN. +# +# Throwing an exception will cause the Agent to stop and an ErrorResponse to be sent. +# Some *Response objects (like SnapshotResponse) allow for returning their own error within the object itself. +# These types of errors will not stop the Agent and Kapacitor will deal with them appropriately. +# +# The Handler is called from a single thread, meaning methods will not be called concurrently. +# +# To write Points/Batches back to the Agent/Kapacitor use the Agent.write_response method, which is thread safe. +class Handler(object): + def info(self): + pass + def init(self, init_req): + pass + def snapshot(self): + pass + def restore(self, restore_req): + pass + def begin_batch(self): + pass + def point(self): + pass + def end_batch(self, end_req): + pass +``` + +### The Info method + +Let's start with the `info` method. When Kapacitor starts up it will +call `info` and expect in return some information about how this UDF +behaves. Specifically, Kapacitor expects the kind of edge the UDF +wants and provides. + +> **Remember**: within Kapacitor, data is transported in streams or +batches, so the UDF must declare what it expects. + +In addition, UDFs can accept certain options so that they are +individually configurable. The `info` response can contain a list of +options, their names, and expected arguments. + +For our example UDF, we need to know three things: + +1. The field to operate on. +2. The size of the historical window to keep. +3. The significance level or `alpha` being used. + +Below we have the implementation of the `info` method for our handler that defines the edge types and options available: + +```python +... + def info(self): + """ + Respond with which type of edges we want/provide and any options we have. + """ + response = udf_pb2.Response() + + # We will consume batch edges aka windows of data. + response.info.wants = udf_pb2.BATCH + # We will produce single points of data aka stream. + response.info.provides = udf_pb2.STREAM + + # Here we can define options for the UDF. + # Define which field we should process. + response.info.options['field'].valueTypes.append(udf_pb2.STRING) + + # Since we will be computing a moving average let's make the size configurable. + # Define an option 'size' that takes one integer argument. + response.info.options['size'].valueTypes.append(udf_pb2.INT) + + # We need to know the alpha level so that we can ignore bad windows. + # Define an option 'alpha' that takes one double valued argument. + response.info.options['alpha'].valueTypes.append(udf_pb2.DOUBLE) + + return response +... +``` + +When Kapacitor starts, it will spawn our UDF process and request +the `info` data and then shutdown the process. Kapacitor will +remember this information for each UDF. This way, Kapacitor can +understand the available options for a given UDF before its executed +inside of a task. + +### The Init method + +Next let's implement the `init` method, which is called once the task +starts executing. The `init` method receives a list of chosen +options, which are then used to configure the handler appropriately. +In response, we indicate whether the `init` request was successful, +and, if not, any error messages if the options were invalid. + +```python +... + def init(self, init_req): + """ + Given a list of options initialize this instance of the handler + """ + success = True + msg = '' + size = 0 + for opt in init_req.options: + if opt.name == 'field': + self._field = opt.values[0].stringValue + elif opt.name == 'size': + size = opt.values[0].intValue + elif opt.name == 'alpha': + self._alpha = opt.values[0].doubleValue + + if size <= 1: + success = False + msg += ' must supply window size > 1' + if self._field == '': + success = False + msg += ' must supply a field name' + if self._alpha == 0: + success = False + msg += ' must supply an alpha value' + + # Initialize our historical window + # We will define MovingStats in the next step + self._history = MovingStats(size) + + response = udf_pb2.Response() + response.init.success = success + response.init.error = msg[1:] + + return response +... +``` + +When a task starts, Kapacitor spawns a new process for the UDF and +calls `init`, passing any specified options from the TICKscript. Once +initialized, the process will remain running and Kapacitor will begin +sending data as it arrives. + +### The Batch and Point methods + +Our task wants a `batch` edge, meaning it expects to get data in +batches or windows. To send a batch of data to the UDF process, +Kapacitor first calls the `begin_batch` method, which indicates that all +subsequent points belong to a batch. Once the batch is complete, the +`end_batch` method is called with some metadata about the batch. + +At a high level, this is what our UDF code will do for each of the +`begin_batch`, `point`, and `end_batch` calls: + +* `begin_batch`: mark the start of a new batch and initialize a structure for it +* `point`: store the point +* `end_batch`: perform the `t-test` and then update the historical data + +### The Complete UDF script + +What follows is the complete UDF implementation with our `info`, +`init`, and batching methods (as well as everything else we need). + +```python + +from kapacitor.udf.agent import Agent, Handler +from scipy import stats +import math +import kapacitor.udf.udf_pb2 +import sys + +class TTestHandler(Handler): + """ + Keep a rolling window of historically normal data + When a new window arrives use a two-sided t-test to determine + if the new window is statistically significantly different. + """ + def __init__(self, agent): + self._agent = agent + + self._field = '' + self._history = None + + self._batch = None + + self._alpha = 0.0 + + def info(self): + """ + Respond with which type of edges we want/provide and any options we have. + """ + response = udf_pb2.Response() + # We will consume batch edges aka windows of data. + response.info.wants = udf_pb2.BATCH + # We will produce single points of data aka stream. + response.info.provides = udf_pb2.STREAM + + # Here we can define options for the UDF. + # Define which field we should process + response.info.options['field'].valueTypes.append(udf_pb2.STRING) + + # Since we will be computing a moving average let's make the size configurable. + # Define an option 'size' that takes one integer argument. + response.info.options['size'].valueTypes.append(udf_pb2.INT) + + # We need to know the alpha level so that we can ignore bad windows + # Define an option 'alpha' that takes one double argument. + response.info.options['alpha'].valueTypes.append(udf_pb2.DOUBLE) + + return response + + def init(self, init_req): + """ + Given a list of options initialize this instance of the handler + """ + success = True + msg = '' + size = 0 + for opt in init_req.options: + if opt.name == 'field': + self._field = opt.values[0].stringValue + elif opt.name == 'size': + size = opt.values[0].intValue + elif opt.name == 'alpha': + self._alpha = opt.values[0].doubleValue + + if size <= 1: + success = False + msg += ' must supply window size > 1' + if self._field == '': + success = False + msg += ' must supply a field name' + if self._alpha == 0: + success = False + msg += ' must supply an alpha value' + + # Initialize our historical window + self._history = MovingStats(size) + + response = udf_pb2.Response() + response.init.success = success + response.init.error = msg[1:] + + return response + + def begin_batch(self, begin_req): + # create new window for batch + self._batch = MovingStats(-1) + + def point(self, point): + self._batch.update(point.fieldsDouble[self._field]) + + def end_batch(self, batch_meta): + pvalue = 1.0 + if self._history.n != 0: + # Perform Welch's t test + t, pvalue = stats.ttest_ind_from_stats( + self._history.mean, self._history.stddev(), self._history.n, + self._batch.mean, self._batch.stddev(), self._batch.n, + equal_var=False) + + + # Send pvalue point back to Kapacitor + response = udf_pb2.Response() + response.point.time = batch_meta.tmax + response.point.name = batch_meta.name + response.point.group = batch_meta.group + response.point.tags.update(batch_meta.tags) + response.point.fieldsDouble["t"] = t + response.point.fieldsDouble["pvalue"] = pvalue + self._agent.write_response(response) + + # Update historical stats with batch, but only if it was normal. + if pvalue > self._alpha: + for value in self._batch._window: + self._history.update(value) + + +class MovingStats(object): + """ + Calculate the moving mean and variance of a window. + Uses Welford's Algorithm. + """ + def __init__(self, size): + """ + Create new MovingStats object. + Size can be -1, infinite size or > 1 meaning static size + """ + self.size = size + if not (self.size == -1 or self.size > 1): + raise Exception("size must be -1 or > 1") + + + self._window = [] + self.n = 0.0 + self.mean = 0.0 + self._s = 0.0 + + def stddev(self): + """ + Return the standard deviation + """ + if self.n == 1: + return 0.0 + return math.sqrt(self._s / (self.n - 1)) + + def update(self, value): + + # update stats for new value + self.n += 1.0 + diff = (value - self.mean) + self.mean += diff / self.n + self._s += diff * (value - self.mean) + + if self.n == self.size + 1: + # update stats for removing old value + old = self._window.pop(0) + oldM = (self.n * self.mean - old)/(self.n - 1) + self._s -= (old - self.mean) * (old - oldM) + self.mean = oldM + self.n -= 1 + + self._window.append(value) + +if __name__ == '__main__': + # Create an agent + agent = Agent() + + # Create a handler and pass it an agent so it can write points + h = TTestHandler(agent) + + # Set the handler on the agent + agent.handler = h + + # Anything printed to STDERR from a UDF process gets captured into the Kapacitor logs. + print >> sys.stderr, "Starting agent for TTestHandler" + agent.start() + agent.wait() + print >> sys.stderr, "Agent finished" + +``` + +That was a lot, but now we are ready to configure Kapacitor to run our +code. Create a scratch dir for working through the rest of this +guide: + +```bash +mkdir /tmp/kapacitor_udf +cd /tmp/kapacitor_udf +``` + +Save the above UDF python script into `/tmp/kapacitor_udf/ttest.py`. + +### Configuring Kapacitor for our UDF + +Add this snippet to your Kapacitor configuration file (typically located at `/etc/kapacitor/kapacitor.conf`): + +``` +[udf] +[udf.functions] + [udf.functions.tTest] + # Run python + prog = "/usr/bin/python2" + # Pass args to python + # -u for unbuffered STDIN and STDOUT + # and the path to the script + args = ["-u", "/tmp/kapacitor_udf/ttest.py"] + # If the python process is unresponsive for 10s kill it + timeout = "10s" + # Define env vars for the process, in this case the PYTHONPATH + [udf.functions.tTest.env] + PYTHONPATH = "/tmp/kapacitor_udf/kapacitor/udf/agent/py" +``` + +In the configuration we called the function `tTest`. That is also how +we will reference it in the TICKscript. + +Notice that our Python script imported the `Agent` object, and we set +the `PYTHONPATH` in the configuration. Clone the Kapacitor source +into the tmp dir so we can point the `PYTHONPATH` at the necessary +python code. This is typically overkill since it's just two Python +files, but it makes it easy to follow: + +``` +git clone https://github.com/influxdata/kapacitor.git /tmp/kapacitor_udf/kapacitor +``` + +### Running Kapacitor with the UDF + +Restart the Kapacitor daemon to make sure everything is configured +correctly: + +```bash +service kapacitor restart +``` + +Check the logs (`/var/log/kapacitor/`) to make sure you see a +*Listening for signals* line and that no errors occurred. If you +don't see the line, it's because the UDF process is hung and not +responding. It should be killed after a timeout, so give it a moment +to stop properly. Once stopped, you can fix any errors and try again. + +### The TICKscript + +If everything was started correctly, then it's time to write our +TICKscript to use the `tTest` UDF method: + +```js +dbrp "printer"."autogen" + +// This TICKscript monitors the three temperatures for a 3d printing job, +// and triggers alerts if the temperatures start to experience abnormal behavior. + +// Define our desired significance level. +var alpha = 0.001 + +// Select the temperatures measurements +var data = stream + |from() + .measurement('temperatures') + |window() + .period(5m) + .every(5m) + +data + //Run our tTest UDF on the hotend temperature + @tTest() + // specify the hotend field + .field('hotend') + // Keep a 1h rolling window + .size(3600) + // pass in the alpha value + .alpha(alpha) + |alert() + .id('hotend') + .crit(lambda: "pvalue" < alpha) + .log('/tmp/kapacitor_udf/hotend_failure.log') + +// Do the same for the bed and air temperature. +data + @tTest() + .field('bed') + .size(3600) + .alpha(alpha) + |alert() + .id('bed') + .crit(lambda: "pvalue" < alpha) + .log('/tmp/kapacitor_udf/bed_failure.log') + +data + @tTest() + .field('air') + .size(3600) + .alpha(alpha) + |alert() + .id('air') + .crit(lambda: "pvalue" < alpha) + .log('/tmp/kapacitor_udf/air_failure.log') + +``` + +Notice that we have called `tTest` three times. This means that +Kapacitor will spawn three different Python processes and pass the +respective init option to each one. + +Save this script as `/tmp/kapacitor_udf/print_temps.tick` and define +the Kapacitor task: + +```bash +kapacitor define print_temps -tick print_temps.tick +``` + +### Generating test data + +To simulate our printer for testing, we will write a simple Python +script to generate temperatures. This script generates random +temperatures that are normally distributed around a target +temperature. At specified times, the variation and offset of the +temperatures changes, creating an anomaly. + +> Don't worry too much about the details here. It would be much better +to use real data for testing our TICKscript and UDF, but this is +faster (and much cheaper than a 3D printer). + +```python +#!/usr/bin/python2 + +from numpy import random +from datetime import timedelta, datetime +import sys +import time +import requests + + +# Target temperatures in C +hotend_t = 220 +bed_t = 90 +air_t = 70 + +# Connection info +write_url = 'http://localhost:9092/write?db=printer&rp=autogen&precision=s' +measurement = 'temperatures' + +def temp(target, sigma): + """ + Pick a random temperature from a normal distribution + centered on target temperature. + """ + return random.normal(target, sigma) + +def main(): + hotend_sigma = 0 + bed_sigma = 0 + air_sigma = 0 + hotend_offset = 0 + bed_offset = 0 + air_offset = 0 + + # Define some anomalies by changing sigma at certain times + # list of sigma values to start at a specified iteration + hotend_anomalies =[ + (0, 0.5, 0), # normal sigma + (3600, 3.0, -1.5), # at one hour the hotend goes bad + (3900, 0.5, 0), # 5 minutes later recovers + ] + bed_anomalies =[ + (0, 1.0, 0), # normal sigma + (28800, 5.0, 2.0), # at 8 hours the bed goes bad + (29700, 1.0, 0), # 15 minutes later recovers + ] + air_anomalies = [ + (0, 3.0, 0), # normal sigma + (10800, 5.0, 0), # at 3 hours air starts to fluctuate more + (43200, 15.0, -5.0), # at 12 hours air goes really bad + (45000, 5.0, 0), # 30 minutes later recovers + (72000, 3.0, 0), # at 20 hours goes back to normal + ] + + # Start from 2016-01-01 00:00:00 UTC + # This makes it easy to reason about the data later + now = datetime(2016, 1, 1) + second = timedelta(seconds=1) + epoch = datetime(1970,1,1) + + # 24 hours of temperatures once per second + points = [] + for i in range(60*60*24+2): + # update sigma values + if len(hotend_anomalies) > 0 and i == hotend_anomalies[0][0]: + hotend_sigma = hotend_anomalies[0][1] + hotend_offset = hotend_anomalies[0][2] + hotend_anomalies = hotend_anomalies[1:] + + if len(bed_anomalies) > 0 and i == bed_anomalies[0][0]: + bed_sigma = bed_anomalies[0][1] + bed_offset = bed_anomalies[0][2] + bed_anomalies = bed_anomalies[1:] + + if len(air_anomalies) > 0 and i == air_anomalies[0][0]: + air_sigma = air_anomalies[0][1] + air_offset = air_anomalies[0][2] + air_anomalies = air_anomalies[1:] + + # generate temps + hotend = temp(hotend_t+hotend_offset, hotend_sigma) + bed = temp(bed_t+bed_offset, bed_sigma) + air = temp(air_t+air_offset, air_sigma) + points.append("%s hotend=%f,bed=%f,air=%f %d" % ( + measurement, + hotend, + bed, + air, + (now - epoch).total_seconds(), + )) + now += second + + # Write data to Kapacitor + r = requests.post(write_url, data='\n'.join(points)) + if r.status_code != 204: + print >> sys.stderr, r.text + return 1 + return 0 + +if __name__ == '__main__': + exit(main()) + +``` + +Save the above script as `/tmp/kapacitor_udf/printer_data.py`. + +> This Python script has two Python dependencies: `requests` and `numpy`. +They can easily be installed via `pip` or your package manager. + +At this point we have a task ready to go, and a script to generate +some fake data with anomalies. Now we can create a recording of our +fake data so that we can easily iterate on the task: + +```sh +# Start the recording in the background +kapacitor record stream -task print_temps -duration 24h -no-wait +# Grab the ID from the output and store it in a var +rid=7bd3ced5-5e95-4a67-a0e1-f00860b1af47 +# Run our python script to generate data +chmod +x ./printer_data.py +./printer_data.py +``` + +We can verify it worked by listing information about the recording. +Our recording came out to `1.6MB`, so yours should come out somewhere +close to that: + +``` +$ kapacitor list recordings $rid +ID Type Status Size Date +7bd3ced5-5e95-4a67-a0e1-f00860b1af47 stream finished 1.6 MB 04 May 16 11:44 MDT +``` + +### Detecting anomalies + +Finally, let's run the play against our task and see how it works: + +``` +kapacitor replay -task print_temps -recording $rid -rec-time +``` + +Check the various log files to see if the algorithm caught the +anomalies: + +``` +cat /tmp/kapacitor_udf/{hotend,bed,air}_failure.log +``` + +Based on the `printer_data.py` script above, there should be anomalies at: + +* 1hr: hotend +* 8hr: bed +* 12hr: air + +There may be some false positives as well, but, since we want this to +work with real data (not our nice clean fake data), it doesn't help +much to tweak it at this point. + +Well, there we have it. We can now get alerts when the temperatures +for our prints deviates from the norm. Hopefully you now have a +better understanding of how Kapacitor UDFs work, and have a good +working example as a launching point into further work with UDFS. + +The framework is in place, now go plug in a real anomaly detection +algorithm that works for your domain! + +## Extending the example + +There are a few things that we have left as exercises to the reader: + +1. Snapshot/Restore: Kapacitor will regularly snapshot the state of + your UDF process so that it can be restored if the process is + restarted. The examples + [here](https://github.com/influxdata/kapacitor/tree/master/udf/agent/examples/) + have implementations for the `snapshot` and `restore` methods. + Implement them for the `TTestHandler` handler as an exercise. + +2. Change the algorithm from a t-test to something more fitting for + your domain. Both `numpy` and `scipy` have a wealth of algorithms. + +3. The options returned by the `info` request can contain multiple + arguments. Modify the `field` option to accept three field names + and change the `TTestHandler` to maintain historical data and + batches for each field instead of just the one. That way only one + ttest.py process needs to be running. diff --git a/content/kapacitor/v1.5/guides/continuous_queries.md b/content/kapacitor/v1.5/guides/continuous_queries.md new file mode 100644 index 000000000..a825223da --- /dev/null +++ b/content/kapacitor/v1.5/guides/continuous_queries.md @@ -0,0 +1,201 @@ +--- +title: Kapacitor as a Continuous Query engine +aliases: + - kapacitor/v1.5/examples/continuous_queries/ +menu: + kapacitor_1_5: + name: Kapacitor as a Continuous Query engine + identifier: continuous_queries + weight: 30 + parent: guides +--- + +Kapacitor can be used to do the same work as Continuous Queries (CQ) in InfluxDB. +Today we are going to explore reasons to use one over the other and the basics of using Kapacitor for CQ-type workloads. + +## An Example + +First, lets take a simple CQ and rewrite it as a Kapacitor TICKscript. + +Here is a CQ that computes the mean of the `cpu.usage_idle` every 5 minutes and stores it in the new measurement `mean_cpu_idle`. + +``` +CREATE CONTINUOUS QUERY cpu_idle_mean ON telegraf BEGIN SELECT mean("usage_idle") as usage_idle INTO mean_cpu_idle FROM cpu GROUP BY time(5m),* END +``` + +To do the same with Kapacitor here is a streaming TICKscript. + +```js +dbrp "telegraf"."autogen" + +stream + |from() + .database('telegraf') + .measurement('cpu') + .groupBy(*) + |window() + .period(5m) + .every(5m) + .align() + |mean('usage_idle') + .as('usage_idle') + |influxDBOut() + .database('telegraf') + .retentionPolicy('autogen') + .measurement('mean_cpu_idle') + .precision('s') +``` + +The same thing can also be done as a batch task in Kapacitor. + +```js +dbrp "telegraf"."autogen" + +batch + |query('SELECT mean(usage_idle) as usage_idle FROM "telegraf"."autogen".cpu') + .period(5m) + .every(5m) + .groupBy(*) + |influxDBOut() + .database('telegraf') + .retentionPolicy('autogen') + .measurement('mean_cpu_idle') + .precision('s') +``` + +All three of these methods will produce the same results. + +## Questions + +At this point there are a few questions we should answer: + +1. When should we use Kapacitor instead of CQs? +2. When should we use stream tasks vs batch tasks in Kapacitor? + +### When should we use Kapacitor instead of CQs? + +There are a few reasons to use Kapacitor instead of CQs. + +* You are performing a significant number of CQs and want to isolate the work load. + By using Kapacitor to perform the aggregations InfluxDB's performance profile can remain more stable and isolated from Kapacitor's. +* You need to do more than just perform a query, for example maybe you only want to store only outliers from an aggregation instead of all of them. + Kapacitor can do significantly more with the data than CQs so you have more flexibility in transforming your data. + +There are a few use cases where using CQs almost always makes sense. + +* Performing downsampling for retention policies. + This is what CQs are designed for and do well. + No need to add another moving piece (i.e. Kapacitor) to your infrastructure if you do not need it. + Keep it simple. +* You only have a handful of CQs, again keep it simple, do not add more moving parts to your setup unless you need it. + +### When should we use stream tasks vs batch tasks in Kapacitor? + +Basically the answer boils down to two things, the available RAM and time period being used. + +A stream task will have to keep all data in RAM for the specified period. +If this period is too long for the available RAM then you will first need to store the data in InfluxDB and then query using a batch task. + +A stream task does have one slight advantage in that since its watching the stream of data it understands time by the timestamps on the data. +As such there are no race conditions for whether a given point will make it into a window or not. +If you are using a batch task it is still possible for a point to arrive late and be missed in a window. + + +## Another Example + +Create a continuous query to downsample across retention policies. + +``` +CREATE CONTINUOUS QUERY cpu_idle_median ON telegraf BEGIN SELECT median("usage_idle") as usage_idle INTO "telegraf"."sampled_5m"."median_cpu_idle" FROM "telegraf"."autogen"."cpu" GROUP BY time(5m),* END +``` + +The stream TICKscript: + +```js +dbrp "telegraf"."autogen" + +stream + |from() + .database('telegraf') + .retentionPolicy('autogen') + .measurement('cpu') + .groupBy(*) + |window() + .period(5m) + .every(5m) + .align() + |median('usage_idle') + .as('usage_idle') + |influxDBOut() + .database('telegraf') + .retentionPolicy('sampled_5m') + .measurement('median_cpu_idle') + .precision('s') +``` + +And the batch TICKscript: + +```js +dbrp "telegraf"."autogen" + +batch + |query('SELECT median(usage_idle) as usage_idle FROM "telegraf"."autogen"."cpu"') + .period(5m) + .every(5m) + .groupBy(*) + |influxDBOut() + .database('telegraf') + .retentionPolicy('sampled_5m') + .measurement('median_cpu_idle') + .precision('s') +``` + + +## Summary + +Kapacitor is a powerful tool. +If you need more flexibility than CQs offer, use it. +For more information and help writing TICKscripts from InfluxQL queries take a look at these [docs](https://docs.influxdata.com/kapacitor/latest/nodes/influx_q_l_node/) on the InfluxQL node in Kapacitor. +Every function available in the InfluxDB query language is available in Kapacitor, so you can convert any query into a Kapacitor TICKscript. + +## Important to Know + +### Continuous queries and Kapacitor tasks may produce different results +For some types of queries, CQs (InfluxDB) and TICKscripts (Kapacitor) may return different results due to how each selects time boundaries. +Kapacitor chooses the maximum timestamp (tMax) while InfluxDB chooses the minimum timestamp (tMin). +The choice between using tMax or tMin is somewhat arbitrary for InfluxDB, however the same cannot be said for Kapacitor. + +Kapacitor has the ability to do complex joining operations on overlapping time windows. +For example, if you were to join the mean over the last month with the the mean over the last day, +you would need their resulting values to occur at the same time, using the most recent time, tMax. +However, Kapacitor would use tMin and the resulting values would not occur at the same time. +One would be at the beginning of the last month, while the other would be at the beginning of the last day. + +Consider the following query run as both an InfluxQL query and as a TICKscript: + +#### InfluxQL + +```sql +SELECT mean(*) FROM ... time >= '2017-03-13T17:50:00Z' AND time < '2017-03-13T17:51:00Z' +``` + +#### TICKscript + +``` js +batch + |query('SELECT queryDurationNs FROM "_internal".monitor.queryExecutor') + .period(1m) + .every(1m) + .align() + |mean('queryDurationNs') +``` + +#### Query Results +| Query Method | Time | Mean | +|:------------ |:---- |:---- | +| Continuous Query | 2017-03-13T17:50:00Z | 8.083532716666666e+08 | +| TICKscript | 2017-03-13T17:51:00Z | 8.083532716666666e+08 | + +> Note the difference between the returned timestamps. + +This is a known issue discussed in [Issue #1258](https://github.com/influxdata/kapacitor/issues/1258) on Github. diff --git a/content/kapacitor/v1.5/guides/event-handler-setup.md b/content/kapacitor/v1.5/guides/event-handler-setup.md new file mode 100644 index 000000000..da621e4a9 --- /dev/null +++ b/content/kapacitor/v1.5/guides/event-handler-setup.md @@ -0,0 +1,440 @@ +--- +title: Event handler setup + +menu: + kapacitor_1_5: + weight: 70 + parent: guides +--- + +Integrate Kapacitor into your monitoring system by sending [alert messages](/kapacitor/latest/nodes/alert_node/#message) +to supported event handlers. +Currently, Kapacitor can send alert messages to specific log files and specific URLs, +as well as to applications such as [Slack](https://slack.com/) and [HipChat](https://www.hipchat.com/). + +This document offers step-by-step instructions for setting up event handlers with Kapacitor, +including relevant configuration options and [TICKscript](/kapacitor/latest/tick/) syntax. +Currently, this document doesn't cover every supported event handler, but we will +continue to add content to this page over time. +For a complete list of the supported event handlers and for additional information, +please see the [event handler reference documentation](/kapacitor/latest/nodes/alert_node/). + +[HipChat Setup](#hipchat-setup) +[Telegram Setup](#telegram-setup") + +## HipChat setup + +[HipChat](https://www.hipchat.com/) is Atlassian's web service for group chat, +video chat, and screen sharing. +Configure Kapacitor to send alert messages to a HipChat room. + +### Requirements + +To configure Kapacitor with HipChat, you need: + +- Your HipChat subdomain name +- Your HipChat room name +- A HipChat API access token for sending notifications + +#### HipChat API access token +The following steps describe how to create the API access token. + +1. From the HipChat home page, access **Account settings** by clicking on the +person icon in the top right corner. + +2. Select **API access** from the items in the left menu sidebar. + +3. Under **Create new token**, enter a label for your token (it can be anything). + +4. Under **Create new token**, select **Send Notification** as the Scope. + +5. Click **Create**. + + Your token appears in the table just above the **Create new token** section: + + ![HipChat token](/img/kapacitor/hipchat-token.png) + +### Configuration + +In the `[hipchat]` section of Kapacitor's configuration file, set: + +- `enabled` to `true` +- `subdomain` in the `url` setting to your HipChat subdomain + +The optional configuration settings are: + +`room` +Set to your HipChat room. +This serves as the default chat ID if the TICKscript doesn't specify a chat ID. + +`token` +Set to your HipChat [API access token](#hipchat-api-access-token). +This serves as the default token if the TICKscript doesn't specify an API access token. + +`global` +Set to `true` to send all alerts to HipChat without needing to specify HipChat in TICKscripts. + +`state-changes-only` +Set to `true` to only send an alert to HipChat if the alert state changes. +This setting only applies if the `global` setting is also set to `true`. + +#### Sample configuration +```toml +[hipchat] + enabled = true + url = "https://my-subdomain.hipchat.com/v2/room" + room = "my-room" + token = "mytokentokentokentoken" + global = false + state-changes-only = false +``` + +#### TICKscript syntax +```js +|alert() + .hipChat() + .room('') + .token('') +``` + +The `.room()` and `.token()` specifications are optional. +If they aren't set in the TICKscript, they default to the `room` and +`token` settings in the `[hipchat]` section of the `kapacitor.conf`. + +> If `global` is set to `true` in the configuration file, there's no +> need to specify `.hipChat()` in the TICKscript. +> Kapacitor sends all alerts to HipChat by default. + +`.room('')` +Sets the HipChat room. + +`.token('')` +Sets the HipChat [API access token](#hipchat-api-access-token). + + +### Examples + +#### Send alerts to the HipChat room set in the configuration file + +_**Configuration file**_ +```toml +[hipchat] + enabled = true + url = "https://testtest.hipchat.com/v2/room" + room = "my-alerts" + token = "tokentokentokentokentoken" + global = false + state-changes-only = true +``` + +_**TICKscript**_ +```js +stream + |from() + .measurement('cpu') + |alert() + .crit(lambda: "usage_idle" < 97) + .message('Hey, check your CPU') + .hipChat() +``` + +The setup sends `Hey, check your CPU` to the **my-alerts** room associated with +the `testest` subdomain. + +#### Send alerts to the HipChat room set in the TICKscript + +_**Configuration file**_ +```toml +[hipchat] + enabled = true + url = "https://testtest.hipchat.com/v2/room" + room = "my-alerts" + token = "tokentokentokentokentoken" + global = false + state-changes-only = true +``` + +_**TICKscript**_ +```js +stream + |from() + .measurement('cpu') + |alert() + .crit(lambda: "usage_idle" < 97) + .message('Hey, check your CPU') + .hipChat() + .room('random') +``` + +The setup sends `Hey, check your CPU` to the **random** room associated with +the `testest` subdomain. +Notice that `.room()` in the TICKscript overrides the `room` setting in the +configuration file. + +## Telegram setup + +[Telegram](https://telegram.org/) is a messaging app. +Configure Kapacitor to send alert messages to a Telegram bot. + +### Requirements + +To configure Kapacitor with Telegram, you need: + +- A Telegram bot +- A Telegram API access token +- Your Telegram chat ID + +#### Telegram bot + +The following steps describe how to create a new Telegram bot. + +1. Search for the **@BotFather** username in your Telegram application. + +2. Click **Start** to begin a conversation with **@BotFather**. + +3. Send `/newbot` to **@BotFather**. + + **@BotFather** responds: + + _Alright, a new bot. How are we going to call it? Please choose a name for your bot._ + + **@BotFather** will prompt you through the rest of the bot-creation process; feel + free to follow his directions or continue with our version of the steps below. + Both setups result in success! + +4. Send your bot's name to **@BotFather**. + + Your bot's name can be anything. + Note that this is not your bot's Telegram `@username`; you'll create the username + in step 5. + + **@BotFather** responds: + + _Good. Now let's choose a username for your bot. It must end in `bot`. Like this, for example: TetrisBot or tetris\_bot._ + +5. Send your bot's username to **@BotFather**. + + Your bot's username must end in `bot`. + For example: `mushroomKap_bot`. + + `BotFather` responds: + + _Done! Congratulations on your new bot. You will find it at t.me/. You can now add a description, about section and profile picture for your bot, see /help for a list of commands. By the way, when you've finished creating your cool bot, ping our Bot Support if you want a better username for it. Just make sure the bot is fully operational before you do this._ + + Use this token to access the HTTP API: + + + For a description of the Bot API, see this page: https://core.telegram.org/bots/api + +6. Begin a conversation with your bot. + + Click on the `t.me/` link in **@BotFather**'s response + and click **Start** at the bottom of your Telegram application. + + Your newly created bot will appear in the chat list on the left side of the application. + +#### Telegram API access token + +The following section describes how to identify or create the API access token. + +Telegram's **@BotFather** bot sent you an API access token when you created your bot. +See the **@BotFather** response in step 5 of the previous section for where to find your token. +If you can't find the API access token, create a new token with the steps below. + +1. Send `/token` to **@BotFather** + +2. Select the relevant bot at the bottom of your Telegram application. + + **@BotFather** responds with a new API access token: + + You can use this token to access HTTP API: + + + For a description of the Bot API, see this page: https://core.telegram.org/bots/api + + +#### Telegram chat ID + +The following steps describe how to identify your chat ID. + +1. Paste the following link in your browser. + Replace `` with the API access token that you identified + or created in the previous section: + + `https://api.telegram.org/bot/getUpdates?offset=0` + +2. Send a message to your bot in the Telegram application. + The message text can be anything; your chat history must include at least + one message to get your chat ID. + +3. Refresh your browser. + +4. Identify the numerical chat ID in the JSON provided in the browser. + In the formatted example below, the chat ID is `123456789`. + + ```json + { + "ok": true, + "result": [ + { + "update_id": 101010101, + "message": { + "message_id": 2, + "from": { + "id": 123456789, + "first_name": "Mushroom", + "last_name": "Kap" + }, + "chat": { + "id": 123456789, + "first_name": "Mushroom", + "last_name": "Kap", + "type": "private" + }, + "date": 1487183963, + "text": "hi" + } + } + ] + } + ``` + + +### Configuration + +In the `[telegram]` section of Kapacitor's configuration file set: + +- `enabled` to `true` +- `token` to your [API access token](#telegram-api-access-token) + +The default `url` setting (`https://api.telegram.org/bot`) requires no additional configuration. + +The optional configuration settings are: + +`chat_id` +Set to your Telegram [chat ID](#telegram-chat-id). This serves as the default chat ID if the TICKscript doesn't specify a chat ID. + +`parse-mode` +Set to `Markdown` or `HTML` for Markdown-formatted or HTML-formatted alert messages. +The default `parse-mode` is `Markdown`. + +`disable-web-page-preview` +Set to `true` to disable [link previews](https://telegram.org/blog/link-preview) in alert messages. + +`disable-notification` +Set to `true` to disable notifications on iOS devices and disable sounds on Android devices. +When set to `true`, Android users continue to receive notifications. + +`global` +Set to `true` to send all alerts to Telegram without needing to specify Telegram in TICKscripts. + +`state-changes-only` +Set to `true` to only send an alert to Telegram if the alert state changes. +This setting only applies if the `global` setting is also set to `true`. + + +#### Sample configuration +```toml +[telegram] + enabled = true + url = "https://api.telegram.org/bot" + token = "abcdefghi:xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + chat-id = "123456789" + parse-mode = Markdown + disable-web-page-preview = true + disable-notification = false + global = true + state-changes-only = true +``` + +#### TICKscript syntax +```js +|alert() + .telegram() + .chatId('') + .disableNotification() + .disableWebPagePreview() + .parseMode(['Markdown' | 'HTML']) +``` + +The `.chatId()`, `.disableNotification()`, `.disableWebPagePreview()`, and `.parseMode()` specifications are optional. +If they aren't set in the TICKscript, they default to the `chat-id`, `disable-notification`, +`disable-web-page-preview`, and `parse-mode` settings in the `[telegram]` section of the configuration file. +Note that if `global` is set to `true` in the configuration file, there's no need to specify +`.telegram()` in the TICKscript; Kapacitor sends all alerts to Telegram by default. + +`.chatId('')` +Sets the Telegram [chat ID](#telegram-chat-id). + +`.disableNotification()` +Disables notifications on iOS devices and disables sounds on Android devices. +Android users continue to receive notifications. + +`.disableWebPagePreview()` +Disables [link previews](https://telegram.org/blog/link-preview) in alert messages. + +`.parseMode(['Markdown' | 'HTML'])` +Sets `Markdown` or `HTML` as the format for alert messages. + + +### Examples + +#### Send alerts to the Telegram chat ID set in the configuration file + +_**Configuration file**_ +```toml +[telegram] + enabled = true + url = "https://api.telegram.org/bot" + token = "abcdefghi:xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + chat-id = "123456789" + parse-mode = "Markdown" + disable-web-page-preview = false + disable-notification = false + global = false + state-changes-only = false +``` + +_**TICKscript**_ +```js +stream + |from() + .measurement('cpu') + |alert() + .crit(lambda: "usage_idle" < 97) + .message('Might want to check your CPU') + .telegram() +``` + +The setup sends `Might want to check your CPU` to the Telegram bot associated +with the chat ID `123456789` and API access token `abcdefghi:xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx`. + +#### Send alerts to the Telegram chat ID set in the TICKscript + +_**Configuration file**_ +```toml +[telegram] + enabled = true + url = "https://api.telegram.org/bot" + token = "abcdefghi:xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + chat-id = "" + parse-mode = "Markdown" + disable-web-page-preview = false + disable-notification = false + global = false + state-changes-only = false +``` + +_**TICKscript**_ +```js +stream + |from() + .measurement('cpu') + |alert() + .crit(lambda: "usage_idle" < 97) + .message('Might want to check your CPU') + .telegram() + .chatId('123456789') +``` + +The setup sends `Might want to check your CPU` to the Telegram bot associated with the chat ID `123456789` and API access token `abcdefghi:xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx`. diff --git a/content/kapacitor/v1.5/guides/hierarchical-alert-suppression.md b/content/kapacitor/v1.5/guides/hierarchical-alert-suppression.md new file mode 100644 index 000000000..3123e64b1 --- /dev/null +++ b/content/kapacitor/v1.5/guides/hierarchical-alert-suppression.md @@ -0,0 +1,82 @@ +--- +title: Suppressing Kapacitor alerts based on hierarchy +description: Kapacitor's '.inhibit()' allows you to create hierarchical alerting architectures by suppressing alerts with matching tags in a specified alert category. +menu: + kapacitor_1_5: + name: Hierarchical alert suppression + identifier: hierarchical_alert_suppression + weight: 30 + parent: guides +--- + +Kapacitor allows you to build out a robust monitoring and alerting solution with +multiple "levels" or "tiers" of alerts. +However, an issue arises when an event triggers both high-level and low-level alerts +and you end up getting multiple alerts from different contexts. +The [AlertNode's `.inhibit()`](/kapacitor/v1.5/nodes/alert_node/#inhibit) method +allows you to suppress other alerts when an alert is triggered. + +For example, let's say you are monitoring a cluster of servers. +As part of your alerting architecture, you have host-level alerts such as CPU usage +alerts, RAM usage alerts, disk I/O, etc. +You also have cluster-level alerts that monitor network health, host uptime, etc. + +If a CPU spike on a host in your cluster takes the machine offline, rather than +getting a host-level alert for the CPU spike _**and**_ a cluster-level alert for +the offline node, you'd get a single alert – the alert that the node is offline. +The cluster-level alert would suppress the host-level alert. + +## Using the `.inhibit()` method to suppress alerts +The `.inhibit()` method uses alert categories and tags to inhibit or suppress other alerts. + +```js +// ... + |alert() + .inhibit('', '') +``` + +`category` +The category for which this alert inhibits or suppresses alerts. + +`tags` +A comma-delimited list of tags that must be matched in order for alerts to be +inhibited or suppressed. + +### Example hierarchical alert suppression +The following TICKscripts represent two alerts in a layered alert architecture. +The first is a host specific CPU alert that triggers an alert to the `system_alerts` +category whenever idle CPU usage is less than 10%. +Streamed data points are grouped by the `host` tag, which identifies the host the +data point is coming from. + +_**cpu\_alert.tick**_ +```js +stream + |from() + .measurement('cpu') + .groupBy('host') + |alert() + .category('system_alerts') + .crit(lambda: "usage_idle" < 10.0) +``` + +The following TICKscript is a cluster-level alert that monitors the uptime of hosts in the cluster. +It uses the [`deadman()`](/kapacitor/v1.5/nodes/alert_node/#deadman) function to +create an alert when a host is unresponsive or offline. +The `.inhibit()` method in the deadman alert suppresses all alerts to the `system_alerts` +category that include a matching `host` tag, meaning they are from the same host. + +_**host\_alert.tick**_ +```js +stream + |from() + .measurement('uptime') + .groupBy('host') + |deadman(0.0, 1m) + .inhibit('system_alerts', 'host') +``` + +With this alert architecture, a host may be unresponsive due to a CPU bottleneck, +but because the deadman alert inhibits system alerts from the same host, you won't +get alert notifications for both the deadman and the high CPU usage; just the +deadman alert for that specific host. diff --git a/content/kapacitor/v1.5/guides/join_backfill.md b/content/kapacitor/v1.5/guides/join_backfill.md new file mode 100644 index 000000000..4375bd73c --- /dev/null +++ b/content/kapacitor/v1.5/guides/join_backfill.md @@ -0,0 +1,261 @@ +--- +title: Calculating rates across joined series + backfill +aliases: + - kapacitor/v1.5/examples/join_backfill/ +menu: + kapacitor_1_5: + name: Calculating rates across series + weight: 10 + parent: guides +--- + +Collecting a set of time series data where each time series is counting a particular event is a common scenario. +Using Kapacitor, multiple time series in a set can be joined and used to calculate a combined value, which can then be stored as a new time series. + +This guide shows how to use a prepared data generator in python to combine two generated +time series into a new calculated measurement, then +store that measurement back into InfluxDB using Kapacitor. + +It uses as its example a hypothetical high-volume website for which two measurements +are taken: + +* `errors` -- the number of page views that had an error. +* `views` -- the number of page views that had no errror. + +### The Data generator + +Data for such a website can be primed and generated to InfluxDB using the Python +3 script rolled into [page.zip](/downloads/pages.zip)([sha256](/downloads/pages.zip.sha256)) and created for this purpose. +It leverages the [InfluxDB-Python](https://github.com/influxdata/influxdb-python) library. +See that Github project for instructions on how to install the library in Python. + +Once unzipped, this script can be used to create a database called `pages`, which +uses the default retention policy `autogen`. It can be used to create a backlog +of data and then to set the generator running, walking along randomly generated +`view` and `error` counts. + +It can be started with a backlog of two days worth of random data as follows: + +``` +$ ./pages_db.py --silent true pnr --start 2d +Created database pages +priming and running +data primed +generator now running. CTRL+C to stop +.......................................... +``` + +Priming two days worth of data can take about a minute. + +### Joining with batch data + +Having simple counts may not be sufficient for a site administrator. More +important would be to know the percent of page views that are resulting in error. +The process is to select both existing measurements, join them and calculate an +error percentage. The error percentage can then be stored in +InfluxDB as a new measurement. + +The two measurements, `errors` and `views`, need to be queried. +```javascript +// Get errors batch data +var errors = batch + |query('SELECT sum(value) FROM "pages"."autogen".errors') + .period(1h) + .every(1h) + .groupBy(time(1m), *) + .fill(0) + +// Get views batch data +var views = batch + |query('SELECT sum(value) FROM "pages"."autogen".views') + .period(1h) + .every(1h) + .groupBy(time(1m), *) + .fill(0) +``` + +The join process skips points that do not have a matching point in time from the other source. +As a result it is important to both `groupBy` and `fill` the data while joining batch data. +Grouping the data by time ensures that each source has data points at consistent time periods. +Filling the data ensures every point will have a match with a sane default. + +In this example the `groupBy` method uses the wildcard `*` to group results by all tags. +This can be made more specific by declaring individual tags, and since the generated +demo data contains only one tag, `page`, the `groupBy` statement could be written +as follows: `.groupBy(time(1m), 'page')`. + +With two batch sources for each measurement they need to be joined like so. + +```javascript +// Join errors and views +errors + |join(views) + .as('errors', 'views') +``` + +The data is joined by time, meaning that as pairs of batches arrive from each source +they are combined into a single batch. As a result the fields from each source +need to be renamed to properly namespace the fields. This is done via the +`.as('errors', 'views')` line. In this example each measurement has only one field +named `sum`. The joined fields are called `errors.sum` and `views.sum` respectively. + +Now that the data is joined the percentage can be calculated. +Using the new names for the fields, the following expression can be used to calculate +the desired percentage. + +```javascript + //Calculate percentage + |eval(lambda: "errors.sum" / ("views.sum" + "errors.sum")) + // Give the resulting field a name + .as('value') +``` + + Finally, this data is stored back into InfluxDB. + +```javascript + |influxDBOut() + .database('pages') + .measurement('error_percent') +``` + +Here is the complete TICKscript for the batch task: + +```javascript +dbrp "pages"."autogen" + +// Get errors batch data +var errors = batch + |query('SELECT sum(value) FROM "pages"."autogen".errors') + .period(1h) + .every(1h) + .groupBy(time(1m), *) + .fill(0) + +// Get views batch data +var views = batch + |query('SELECT sum(value) FROM "pages"."autogen".views') + .period(1h) + .every(1h) + .groupBy(time(1m), *) + .fill(0) + +// Join errors and views +errors + |join(views) + .as('errors', 'views') + //Calculate percentage + |eval(lambda: ("errors.sum" / ("views.sum" + "errors.sum")) * 100) + // Give the resulting field a name + .as('value') + |influxDBOut() + .database('pages') + .measurement('error_percent') + +``` + +### Backfill +Now for a fun little trick. +Using Kapacitor's record/replay actions, this TICKscript can be run on historical data. +First, save the above script as `error_percent.tick` and define it. +Then, create a recording for the past time frame we want to fill. + +```bash +$ kapacitor define error_percent -tick error_percent.tick +$ kapacitor record batch -task error_percent -past 1d +``` + +Grab the recording ID and replay the historical data against the task. +Here specify the `-rec-time` flag to instruct Kapacitor to use the actual +time stored in the recording when processing the data instead of adjusting to the present time. + +```bash +$ kapacitor replay -task error_percent -recording RECORDING_ID -rec-time +``` + +If the data set is too large to keep in one recording, define a specific range of time to record +and then replay each range individually. + +```bash +rid=$(kapacitor record batch -task error_percent -start 2015-10-01 -stop 2015-10-02) +echo $rid +kapacitor replay -task error_percent -recording $rid -rec-time +kapacitor delete recordings $rid +``` + +Just loop through the above script for each time window and reconstruct all the historical data needed. +With that the `error_percent` for every minute will be backfilled for the historical data. + +### Stream method + +With the streaming case something similar can be done. Note that the command +`kapacitor record stream` does not include the same a historical option `-past`, +so backfilling using a _stream_ task directly in Kapacitor is not possible. If +backfilling is required, the command [`kapacitor record query`](#record-query-and-backfill-with-stream), +presented below, can also be used. + +Never the less the same TICKscript semantics can be used with a _stream_ task +to calculate and store a new calculated value, such as `error_percent`, in real time. + +The following is just such a TICKscript. + +```javascript +dbrp "pages"."autogen" + +// Get errors stream data +var errors = stream + |from() + .measurement('errors') + .groupBy(*) + |window() + .period(1m) + .every(1m) + |sum('value') + +// Get views stream data +var views = stream + |from() + .measurement('views') + .groupBy(*) + |window() + .period(1m) + .every(1m) + |sum('value') + +// Join errors and views +errors + |join(views) + .as('errors', 'views') + // Calculate percentage + |eval(lambda: "errors.sum" / ("views.sum" + "errors.sum") * 100.0) + // Give the resulting field a name + .as('value') + |influxDBOut() + .database('pages') + .measurement('error_percent') +``` + +### Record Query and backfill with stream + +To provide historical data to stream tasks that process multiple measurements, +use [multiple statements](/influxdb/latest/query_language/data_exploration/#multiple-statements) +when recording the data. + +First use `record query` following the pattern of this generic command: + +``` +kapacitor record query -query $'select field1,field2,field3 from "database_name"."autogen"."one" where time > \'YYYY-mm-ddTHH:MM:SSZ\' and time < \'YYYY-mm-ddTHH:MM:SSZ\' GROUP BY *; select field1,field2,field3 from "database_name"."autogen"."two" where time > \'YYYY-mm-ddTHH:MM:SSZ\' and time < \'YYYY-mm-ddTHH:MM:SSZ\' GROUP BY *' -type stream +``` +For example: + +```bash +$ kapacitor record query -query $'select value from "pages"."autogen"."errors" where time > \'2018-05-30T12:00:00Z\' and time < \'2018-05-31T12:00:00Z\' GROUP BY *; select value from "pages"."autogen"."views" where time > \'2018-05-30T12:00:00Z\' and time < \'2018-12-21T12:00:00Z\' GROUP BY *' -type stream +578bf299-3566-4813-b07b-744da6ab081a +``` + +The returned recording ID can then be used in a Kapacitor `replay` command using +the recorded time. + +```bash +$ kapacitor replay -task error_percent_s -recording 578bf299-3566-4813-b07b-744da6ab081a -rec-time +c623f73c-cf2a-4fce-be4c-9ab89f0c6045 +``` diff --git a/content/kapacitor/v1.5/guides/live_leaderboard.md b/content/kapacitor/v1.5/guides/live_leaderboard.md new file mode 100644 index 000000000..c17af2b83 --- /dev/null +++ b/content/kapacitor/v1.5/guides/live_leaderboard.md @@ -0,0 +1,291 @@ +--- +title: Live leaderboard of game scores +description: Tutorial on using Kapacitor stream processing and Chronograf to build a leaderboard for gamers to be able to see player scores in realtime. Historical data is also available for post-game analysis. +aliases: + - kapacitor/v1.5/examples/live_leaderboard/ +menu: + kapacitor_1_5: + name: Live leaderboard + identifier: live_leaderboard + weight: 10 + parent: guides +--- + +**If you do not have a running Kapacitor instance check out the [getting started guide](/kapacitor/v1.5/introduction/getting-started/) +to get Kapacitor up and running on localhost.** + +Today we are game developers. +We host a several game servers each running an instance of the game code with about a hundred players per game. + +We need to build a leaderboard so spectators can see the player's scores in real time. +We would also like to have historical data on leaders in order to do post game +analysis on who was leading for how long etc. + +We will use Kapacitor's stream processing to do the heavy lifting for us. +The game servers can send a UDP packet anytime a player's score changes +or at least every 10 seconds if the score hasn't changed. + +### Setup + +**All snippets below can be found [here](https://github.com/influxdb/kapacitor/tree/master/examples/scores)** + +Our first order of business is to configure Kapacitor to receive the stream of scores. +In this case the scores update too often to store all of them in InfluxDB so we will send them directly to Kapacitor. +Like InfluxDB you can configure a UDP listener. +Add this configuration section to the end of your Kapacitor configuration. + +``` +[[udp]] + enabled = true + bind-address = ":9100" + database = "game" + retention-policy = "autogen" +``` + +This configuration tells Kapacitor to listen on port `9100` for UDP packets in the line protocol format. +It will scope in incoming data to be in the `game.autogen` database and retention policy. +Start Kapacitor running with that added to the configuration. + +Here is a simple bash script to generate random score data so we can test it without +messing with the real game servers. + +```bash +#!/bin/bash + +# default options: can be overriden with corresponding arguments. +host=${1-localhost} +port=${2-9100} +games=${3-10} +players=${4-100} + +games=$(seq $games) +players=$(seq $players) +# Spam score updates over UDP +while true +do + for game in $games + do + game="g$game" + for player in $players + do + player="p$player" + score=$(($RANDOM % 1000)) + echo "scores,player=$player,game=$game value=$score" > /dev/udp/$host/$port + done + done + sleep 0.1 +done +``` + +Place the above script into a file `scores.sh` and run it: + +```bash +chmod +x ./scores.sh +./scores.sh +``` + +Now we are spamming Kapacitor with our fake score data. +We can just leave that running since Kapacitor will drop +the incoming data until it has a task that wants it. + +### Defining the Kapacitor task + +What does a leaderboard need to do? + +1. Get the most recent score per player per game. +1. Calculate the top X player scores per game. +1. Publish the results. +1. Store the results. + +To complete step one we need to buffer the incoming stream and return the most recent score update per player per game. +Our [TICKscript](/kapacitor/v1.5/tick/) will look like this: + +```js +var topPlayerScores = stream + |from() + .measurement('scores') + // Get the most recent score for each player per game. + // Not likely that a player is playing two games but just in case. + .groupBy('game', 'player') + |window() + // keep a buffer of the last 11s of scores + // just in case a player score hasn't updated in a while + .period(11s) + // Emit the current score per player every second. + .every(1s) + // Align the window boundaries to be on the second. + .align() + |last('value') +``` + +Place this script in a file called `top_scores.tick`. + +Now our `topPlayerScores` variable contains each player's most recent score. +Next to calculate the top scores per game we just need to group by game and run another map reduce job. +Let's keep the top 15 scores per game. +Add these lines to the `top_scores.tick` file. + +```js +// Calculate the top 15 scores per game +var topScores = topPlayerScores + |groupBy('game') + |top(15, 'last', 'player') +``` + +The `topScores` variable now contains the top 15 player's score per game. +All we need to be able to build our leaderboard. +Kapacitor can expose the scores over HTTP via the [HTTPOutNode](/kapacitor/v1.5/nodes/http_out_node/). +We will call our task `top_scores`; with the following addition the most recent scores will be available at +`http://localhost:9092/kapacitor/v1/tasks/top_scores/top_scores`. + +```js +// Expose top scores over the HTTP API at the 'top_scores' endpoint. +// Now your app can just request the top scores from Kapacitor +// and always get the most recent result. +// +// http://localhost:9092/kapacitor/v1/tasks/top_scores/top_scores +topScores + |httpOut('top_scores') +``` + +Finally we want to store the top scores over time so we can do in depth analysis to ensure the best game play. +But we do not want to store the scores every second as that is still too much data. +First we will sample the data and store scores only every 10 seconds. +Also let's do some basic analysis ahead of time since we already have a stream of all the data. +For now we will just do basic gap analysis where we will store the gap between the top player and the 15th player. +Add these lines to `top_scores.tick` to complete our task. + +```js +// Sample the top scores and keep a score once every 10s +var topScoresSampled = topScores + |sample(10s) + +// Store top fifteen player scores in InfluxDB. +topScoresSampled + |influxDBOut() + .database('game') + .measurement('top_scores') + +// Calculate the max and min of the top scores. +var max = topScoresSampled + |max('top') + +var min = topScoresSampled + |min('top') + +// Join the max and min streams back together and calculate the gap. +max + |join(min) + .as('max', 'min') + // Calculate the difference between the max and min scores. + // Rename the max and min fields to more friendly names 'topFirst', 'topLast'. + |eval(lambda: "max.max" - "min.min", lambda: "max.max", lambda: "min.min") + .as('gap', 'topFirst', 'topLast') + // Store the fields: gap, topFirst and topLast in InfluxDB. + |influxDBOut() + .database('game') + .measurement('top_scores_gap') +``` + +Since we are writing data back to InfluxDB create a database `game` for our results. + +``` +curl -G 'http://localhost:8086/query?' --data-urlencode 'q=CREATE DATABASE game' +``` + +Here is the complete task TICKscript if you don't want to copy paste as much :) + +```js +dbrp "game"."autogen" + +// Define a result that contains the most recent score per player. +var topPlayerScores = stream + |from() + .measurement('scores') + // Get the most recent score for each player per game. + // Not likely that a player is playing two games but just in case. + .groupBy('game', 'player') + |window() + // keep a buffer of the last 11s of scores + // just in case a player score hasn't updated in a while + .period(11s) + // Emit the current score per player every second. + .every(1s) + // Align the window boundaries to be on the second. + .align() + |last('value') + +// Calculate the top 15 scores per game +var topScores = topPlayerScores + |groupBy('game') + |top(15, 'last', 'player') + +// Expose top scores over the HTTP API at the 'top_scores' endpoint. +// Now your app can just request the top scores from Kapacitor +// and always get the most recent result. +// +// http://localhost:9092/kapacitor/v1/tasks/top_scores/top_scores +topScores + |httpOut('top_scores') + +// Sample the top scores and keep a score once every 10s +var topScoresSampled = topScores + |sample(10s) + +// Store top fifteen player scores in InfluxDB. +topScoresSampled + |influxDBOut() + .database('game') + .measurement('top_scores') + +// Calculate the max and min of the top scores. +var max = topScoresSampled + |max('top') + +var min = topScoresSampled + |min('top') + +// Join the max and min streams back together and calculate the gap. +max + |join(min) + .as('max', 'min') + // calculate the difference between the max and min scores. + |eval(lambda: "max.max" - "min.min", lambda: "max.max", lambda: "min.min") + .as('gap', 'topFirst', 'topLast') + // store the fields: gap, topFirst, and topLast in InfluxDB. + |influxDBOut() + .database('game') + .measurement('top_scores_gap') +``` + +Define and enable our task to see it in action: + +```bash +kapacitor define top_scores -tick top_scores.tick +kapacitor enable top_scores +``` + +First let's check that the HTTP output is working. + +```bash +curl 'http://localhost:9092/kapacitor/v1/tasks/top_scores/top_scores' +``` + +You should have a JSON result of the top 15 players and their scores per game. +Hit the endpoint several times to see that the scores are updating once a second. + +Now, let's check InfluxDB to see our historical data. + +```bash +curl \ + -G 'http://localhost:8086/query?db=game' \ + --data-urlencode 'q=SELECT * FROM top_scores WHERE time > now() - 5m GROUP BY game' + +curl \ + -G 'http://localhost:8086/query?db=game' \ + --data-urlencode 'q=SELECT * FROM top_scores_gap WHERE time > now() - 5m GROUP BY game' +``` + +Great! +The hard work is done. +All that is left is to configure the game server to send score updates to Kapacitor and update the spectator dashboard to pull scores from Kapacitor. diff --git a/content/kapacitor/v1.5/guides/load_directory.md b/content/kapacitor/v1.5/guides/load_directory.md new file mode 100644 index 000000000..4019ffc35 --- /dev/null +++ b/content/kapacitor/v1.5/guides/load_directory.md @@ -0,0 +1,181 @@ +--- +title: Load directory service +aliases: + - kapacitor/v1.5/examples/load_directory/ +menu: + kapacitor_1_5: + name: Load directory service + identifier: load_directory + weight: 15 + parent: guides +--- + +# File-based definitions of tasks, templates, and load handlers + +The load directory service enables file-based definitions of Kapacitor tasks, templates, and topic handlers that are loaded on startup or when a SIGHUP signal is sent to the process. + +## Configuration +The load directory service configuration is specified in the `[load]` section of the Kapacitor configuration file. + +``` +[load] + enabled = true + dir="/path/to/directory" +``` + +`dir` specifies the directory where the definition files are located. + +The service will attempt to load the definitions from three subdirectories. + +The `tasks` directory should contain task TICKscripts and the associated templated task definition files (either YAML or JSON). + +The `templates` directory should contain templated TICKscripts. + +The `handlers` directory will contain topic handler definitions in YAML or JSON. + +## Tasks + +Task files must be placed in the `tasks` subdirectory of the load service +directory. Task TICKscripts are specified based on the following scheme: + +* `id` - the file name without the `.tick` extension +* `type` - determined by introspection of the task (stream or batch) +* `dbrp` - defined using the `dbrp` keyword followed by a specified database and retention policy + +In the following example, the TICKscript will create a `stream` task named `my_task` for the dbrp `telegraf.autogen`. + +``` +// /path/to/directory/tasks/my_task.tick +dbrp "telegraf"."autogen" + +stream + |from() + .measurement('cpu') + .groupBy(*) + |alert() + .warn(lambda: "usage_idle" < 20) + .crit(lambda: "usage_idle" < 10) + // Send alerts to the `cpu` topic + .topic('cpu') +``` + + +## Task templates + +Template files must be placed in the `templates` subdirectory of the load service directory. +Task templates are defined according to the following scheme: + +* `id` - the file name without the tick extension +* `type` - determined by introspection of the task (stream or batch) +* `dbrp` - defined using the `dbrp` keyword followed by a specified database and retention policy + +The following TICKscript example will create a `stream` template named `my_template` for the dbrp `telegaf.autogen`. + +``` +// /path/to/directory/templates/my_template.tick +dbrp "telegraf"."autogen" + +var measurement string +var where_filter = lambda: TRUE +var groups = [*] +var field string +var warn lambda +var crit lambda +var window = 5m +var slack_channel = '#alerts' + +stream + |from() + .measurement(measurement) + .where(where_filter) + .groupBy(groups) + |window() + .period(window) + .every(window) + |mean(field) + |alert() + .warn(warn) + .crit(crit) + .slack() + .channel(slack_channel) +``` + +### Templated tasks + +Templated task files must be placed in the `tasks` subdirectory of the load service directory. +Templated tasks are defined according to the following scheme: + +* `id` - filename without the `yaml`, `yml`, or `json` extension +* `dbrps` - required if not specified in template +* `template-id` - required +* `vars` - list of template vars + +In this example, the templated task YAML file creates a `stream` task, named `my_templated_task`, for the dbrp `telegraf.autogen`. + +```yaml +# /path/to/directory/tasks/my_templated_task.tick +dbrps: + - { db: "telegraf", rp: "autogen"} +template-id: my_template +vars: + measurement: + type: string + value: cpu + where_filter: + type: lambda + value: "\"cpu\" == 'cpu-total'" + groups: + type: list + value: + - type: string + value: host + - type: string + value: dc + field: + type: string + value : usage_idle + warn: + type: lambda + value: "\"mean\" < 30.0" + crit: + type: lambda + value: "\"mean\" < 10.0" + window: + type: duration + value : 1m + slack_channel: + type: string + value: "#alerts_testing" +``` + +The same task can also be created using JSON, as in this example: + +```json +{ + "dbrps": [{"db": "telegraf", "rp": "autogen"}], + "template-id": "my_template", + "vars": { + "measurement": {"type" : "string", "value" : "cpu" }, + "where_filter": {"type": "lambda", "value": "\"cpu\" == 'cpu-total'"}, + "groups": {"type": "list", "value": [{"type":"string", "value":"host"},{"type":"string", "value":"dc"}]}, + "field": {"type" : "string", "value" : "usage_idle" }, + "warn": {"type" : "lambda", "value" : "\"mean\" < 30.0" }, + "crit": {"type" : "lambda", "value" : "\"mean\" < 10.0" }, + "window": {"type" : "duration", "value" : "1m" }, + "slack_channel": {"type" : "string", "value" : "#alerts_testing" } + } +} +``` + +## Topic handlers + +Topic handler files must be placed in the `handlers` subdirectory of the load service directory. + +``` +id: handler-id +topic: cpu +kind: slack +match: changed() == TRUE +options: + channel: '#alerts' +``` diff --git a/content/kapacitor/v1.5/guides/reference_scripts.md b/content/kapacitor/v1.5/guides/reference_scripts.md new file mode 100644 index 000000000..5f8caf5f3 --- /dev/null +++ b/content/kapacitor/v1.5/guides/reference_scripts.md @@ -0,0 +1,17 @@ +--- +title: Reference TICKscripts +aliases: + - kapacitor/v1.5/examples/reference_scripts/ +menu: + kapacitor_1_5: + name: Reference TICKscripts + identifier: reference_scripts + weight: 20 + parent: guides +--- + +The Kapacitor repository has a number of [example TICKscripts](https://github.com/influxdata/kapacitor/tree/master/examples/telegraf). +These scripts use common [Telegraf plugins](https://github.com/influxdata/telegraf/tree/master/plugins/inputs) +as the data source and show how to build common alerts. +Telegraf plugins with example scripts include "cpu", "disk", "mem", and +"netstat" metrics from the [`system` plugin](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/system). diff --git a/content/kapacitor/v1.5/guides/scheduled-downtime.md b/content/kapacitor/v1.5/guides/scheduled-downtime.md new file mode 100644 index 000000000..d89ccad0f --- /dev/null +++ b/content/kapacitor/v1.5/guides/scheduled-downtime.md @@ -0,0 +1,207 @@ +--- +title: Handling Kapacitor alerts during scheduled downtime +description: This guide walks through building Kapacitor TICKscripts that gracefully handle scheduled downtime without triggering unnecessary alerts. +menu: + kapacitor_1_5: + name: Handling scheduled downtime + parent: guides + weight: 100 +--- + +In many cases, infrastructure downtime is necessary to perform system maintenance. +This type of downtime is typically scheduled beforehand, but can trigger unnecessary +alerts if the affected hosts are monitored by Kapacitor. +This guide walks through creating TICKscripts that gracefully handle scheduled downtime +without triggering alerts. + +## Sideload +Avoid unnecessary alerts during scheduled downtime by using the +[`sideload`](/kapacitor/v1.5/nodes/sideload_node) node to load information from +files in the filesystem and set fields and tags on data points which can then be used in alert logic. +The `sideload` node adds fields and tags to points based on hierarchical data +from various file-based sources. + +Kapacitor searches the specified files for a given field or tag key. +If it finds the field or tag key in the loaded files, it uses the value in the files to +set the field or tag on data points. +If it doesn't find the field or tag key, it sets them to the default value defined +in the [`field` or `tag` properties](#field). + +### Relevant sideload properties +The following properties of `sideload` are relevant to gracefully handling scheduled downtime: + +#### source +`source` specifies a directory in which source files live. + +#### order +`order` specifies both files that are loaded and searched and the order +in which they are loaded and searched. +_Filepaths are relative to the `source` directory. +Files should be either JSON or YAML._ + +#### field +`field` defines a field key that Kapacitor should search for and the default value +it should use if it doesn't find a matching field key in the loaded files. + +#### tag +`tag` defines a tag key that Kapacitor should search for and the default value +it should use if it doesn't find a matching tag key in the loaded files. + + +## Setup +With the `sideload` function, you can create what is essentially a white- or +black-list of hosts to ignore during scheduled downtime. +For this example, assume that maintenance will happen on both individual hosts +and hostgroups, both of which are included as tags on each point in the data set. + +_In most cases, this can be done simply by host, but to illustrate how the `order` +property works, we'll use both host and hostgroup._ + +### Sideload source files +On the host on which Kapacitor is running, create a source directory that will +house the JSON or YAML files. +For example, `/usr/kapacitor/scheduled-maintenance` +(_It can be whatever you want as long as the `kapacitord` process can access it)._ + +Inside this directory, create a file for each host or host group that will be +offline during the scheduled downtime. +For the sake of organization, create `hosts` and `hostgroups` directories +and store the YAML or JSON files in each. +The names of each file should match a value of a `host` or `hostgroup` tag +for hosts that will be taken offline. + +For this example, assume the **host1**, **host2**, **host3** hosts and the +**cluster7** and **cluster8** hostgroups will be taken offline. +Create a file for each of these hosts and host groups in their respective directories: + +``` +/usr/ +└── kapacitor/ + └── scheduled-maintenance/ + │ + ├── hosts/ + │ ├── host1.yml + │ ├── host2.yml + │ └── host3.yml + │ + └── hostgroups/ + ├── cluster7.yml + └── cluster8.yml +``` + +> You only need to create files for hosts or hostgroups that will be offline. + +The contents of the file should contain one or more key-value pairs. +The key is the field or tag key that will be set on each matching point. +The value is the field or tag value that will be set on matching points. + +For this example, set the `maintenance` field to `true`. +Each of the source files will look like the following: + +###### host1.yml +```yaml +maintenance: true +``` + +## TICKscript +Create a TICKscript that uses the `sideload` node to load in the maintenance state where ever it is needed. + +### Define the sideload source +The `source` should use the `file://` URL protocol to reference the absolute path +of the directory containing the files that should be loaded. + +```js +|sideload() + .source('file:///usr/kapacitor/scheduled-maintenance') +``` + +### Define the sideload order +The `order` property has access to template data which should be used to populate +the filepaths for loaded files (relative to the [`source`](#define-the-sideload-source)). +This allows Kapacitor to dynamically search for files based on the tag name used in the template. + +In this case, use the `host` and `hostgroup` tags. +Kapacitor will iterate through the different values for each tag and search for +matching files in the source directory. + +```js +|sideload() + .source('file:///usr/kapacitor/scheduled-maintenance') + .order('hosts/{{.host}}.yml' , 'hostgroups/{{.hostgroup}}.yml') +``` + +The order of file path templates in the `order` property define +the precedence in which file paths are checked. +Those listed first, from left to right, are checked first. + +### Define the sideload field +The `field` property requires two arguments: + +```js +|sideload() + // ... + .field('', ) +``` + +###### key +The key that Kapacitor looks for in the source files and the field for which it +defines a value on each data point. + +###### default-value +The default value used if no matching file and key are found in the source files. + +In this example, use the `maintenance` field and set the default value to `FALSE`. +This assumes hosts are not undergoing maintenance by default. + +```js +|sideload() + .source('file:///usr/kapacitor/scheduled-maintenance') + .order('hosts/{{.host}}.yml' , 'hostgroups/{{.hostgroup}}.yml') + .field('maintenance', FALSE) +``` + +> You can use the `tag` property instead of `field` if you prefer to set a tag +> on each data point rather than a field. + +### Update alert logic +The `sideload` node will now set the `maintenance` field on every data point processed by the TICKscript. +For those that have `host` or `hostgroup` tags matching the filenames of the source files, +the `maintenance` field will be set to the value defined in the source file. + +Update the alert logic in your TICKscript to ensure `maintenance` is **not** `true` +before sending an alert: + +```js +stream + // ... + |alert() + .crit(lambda: !"maintenance" AND "usage_idle" < 30) + .warn(lambda: !"maintenance" AND "usage_idle" < 50) + .info(lambda: !"maintenance" AND "usage_idle" < 70) +``` + +### Full TICKscript example +```js +stream + |from() + .measurement('cpu') + .groupBy(*) + // Use sideload to maintain the host maintenance state. + // By default we assume a host is not undergoing maintenance. + |sideload() + .source('file:///usr/kapacitor/scheduled-maintenance') + .order('hosts/{{.host}}.yml' , 'hostgroups/{{.hostgroup}}.yml') + .field('maintenance', FALSE) + |alert() + // Add the `!"maintenance"` condition to the alert. + .crit(lambda: !"maintenance" AND "usage_idle" < 30) + .warn(lambda: !"maintenance" AND "usage_idle" < 50) + .info(lambda: !"maintenance" AND "usage_idle" < 70) +``` + +## Prepare for scheduled downtime +[Define a new Kapacitor task](/kapacitor/v1.5/working/cli_client/#tasks-and-task-templates) using your updated TICKscript. + +As your scheduled downtime begins, update the `maintenance` value in the appropriate +host and host group source files and reload sideload to avoid alerts being triggered +for those specific hosts and host groups. diff --git a/content/kapacitor/v1.5/guides/socket_udf.md b/content/kapacitor/v1.5/guides/socket_udf.md new file mode 100644 index 000000000..89514b6a6 --- /dev/null +++ b/content/kapacitor/v1.5/guides/socket_udf.md @@ -0,0 +1,655 @@ +--- +title: Writing socket-based user-defined functions (UDFs) +aliases: + - kapacitor/v1.5/examples/socket_udf/ +menu: + kapacitor_1_5: + name: Writing socket-based UDFs + identifier: socket_udf + weight: 40 + parent: guides +--- + +In [another example](/kapacitor/v1.5/guides/anomaly_detection/) we saw how to write a process based UDF for custom anomaly detection workloads. +In this example we are going to learn how to write a simple socket based UDF. + +## What is a user-defined function (UDF)? + +A UDF is a user defined function that can communicate with Kapacitor to process data. +Kapacitor will send it data and the UDF can respond with new or modified data. +A UDF can be written in any language that has [protocol buffer](https://developers.google.com/protocol-buffers/) support. + +## What is the difference between a socket UDF and a process UDF? + +* A process UDF, is a child process of Kapacitor that communicates over STDIN/STDOUT with Kapacitor and is completely managed by Kapacitor. +* A socket UDF is process external to Kapacitor that communicates over a configured unix domain socket. The process itself is not managed by Kapacitor. + +Using a process UDF can be simpler than a socket UDF because Kapacitor will spawn the process and manage everything for you. +On the other hand you may want more control over the UDF process itself and rather expose only a socket to Kapacitor. +One use case that is common is running Kapacitor in a Docker container and the UDF in another container that exposes the socket via a Docker volume. + +In both cases the protocol is the same the only difference is the transport mechanism. +Also note that since multiple Kapacitor tasks can use the same UDF, for a process based UDF a new child process will be spawned for each use of the UDF. +In contrast for a socket based UDF, a new connection will be made to the socket for each use of the UDF. +If you have many uses of the same UDF it may be better to use a socket UDF to keep the number of running processes low. + + +## Writing a UDF + +A UDF communicates with Kapacitor via a protocol buffer request/response system. +We provide implementations of that communication layer in both Go and Python. +Since the other example used Python we will use the Go version here. + +Our example is going to implement a `mirror` UDF which simply reflects all data it receives back to the Kapacitor server. +This example is actually part of the test suite and a Python and Go implementation can be found [here](https://github.com/influxdata/kapacitor/tree/master/udf/agent/examples/mirror). + + +### Lifecycle + +Before we write any code lets look at the lifecycle of a socket UDF: + +1. The UDF process is started, independently from Kapacitor. +2. The process listens on a unix domain socket. +3. Kapacitor connects to the socket and queries basic information about the UDFs options. +4. A Kapacitor task is enabled that uses the UDF and Kapacitor makes a new connection to the socket. +5. The task reads and writes data over the socket connection. +6. If the task is stopped for any reason the socket connection is closed. + +### The Main method + +We need to write a program that starts up and listens on a socket. +The following code is a main function that listens on a socket at +a default path, or on a custom path specified as the `-socket` flag. + + +```go +package main + +import ( + "flag" + "log" + "net" +) + + +var socketPath = flag.String("socket", "/tmp/mirror.sock", "Where to create the unix socket") + +func main() { + flag.Parse() + + // Create unix socket + addr, err := net.ResolveUnixAddr("unix", *socketPath) + if err != nil { + log.Fatal(err) + } + l, err := net.ListenUnix("unix", addr) + if err != nil { + log.Fatal(err) + } + + // More to come here... +} +``` + + +Place the above code in a scratch directory called `main.go`. +This above code can be run via `go run main.go`, but at this point it will exit immediately after listening on the socket. + +### The Agent + +As mentioned earlier, Kapacitor provides an implementation of the communication layer for UDFs called the `agent`. +Our code need only implement an interface in order to take advantage of the `agent` logic. + +The interface we need to implement is as follows: + +```go +// The Agent calls the appropriate methods on the Handler as it receives requests over a socket. +// +// Returning an error from any method will cause the Agent to stop and an ErrorResponse to be sent. +// Some *Response objects (like SnapshotResponse) allow for returning their own error within the object itself. +// These types of errors will not stop the Agent and Kapacitor will deal with them appropriately. +// +// The Handler is called from a single goroutine, meaning methods will not be called concurrently. +// +// To write Points/Batches back to the Agent/Kapacitor use the Agent.Responses channel. +type Handler interface { + // Return the InfoResponse. Describing the properties of this Handler + Info() (*agent.InfoResponse, error) + // Initialize the Handler with the provided options. + Init(*agent.InitRequest) (*agent.InitResponse, error) + // Create a snapshot of the running state of the handler. + Snapshot() (*agent.SnapshotResponse, error) + // Restore a previous snapshot. + Restore(*agent.RestoreRequest) (*agent.RestoreResponse, error) + + // A batch has begun. + BeginBatch(*agent.BeginBatch) error + // A point has arrived. + Point(*agent.Point) error + // The batch is complete. + EndBatch(*agent.EndBatch) error + + // Gracefully stop the Handler. + // No other methods will be called. + Stop() +} +``` + +### The Handler + +Let's define our own type so we can start implementing the `Handler` interface. +Update the `main.go` file as follows: + +```go +package main + +import ( + "flag" + "log" + "net" + + "github.com/influxdata/kapacitor/udf/agent" +) + + + +// Mirrors all points it receives back to Kapacitor +type mirrorHandler struct { + // We need a reference to the agent so we can write data + // back to Kapacitor. + agent *agent.Agent +} + +func newMirrorHandler(agent *agent.Agent) *mirrorHandler { + return &mirrorHandler{agent: agent} +} + +var socketPath = flag.String("socket", "/tmp/mirror.sock", "Where to create the unix socket") + +func main() { + flag.Parse() + + // Create unix socket + addr, err := net.ResolveUnixAddr("unix", *socketPath) + if err != nil { + log.Fatal(err) + } + l, err := net.ListenUnix("unix", addr) + if err != nil { + log.Fatal(err) + } + + // More to come here... +} +``` + +Now let's add in each of the methods needed to initialize the UDF. +These next methods implement the behavior described in Step 3 of the UDF Lifecycle above, +where Kapacitor connects to the socket in order to query basic information about the UDF. + +Add these methods to the `main.go` file: + +```go + +// Return the InfoResponse. Describing the properties of this UDF agent. +func (*mirrorHandler) Info() (*agent.InfoResponse, error) { + info := &agent.InfoResponse{ + // We want a stream edge + Wants: agent.EdgeType_STREAM, + // We provide a stream edge + Provides: agent.EdgeType_STREAM, + // We expect no options. + Options: map[string]*agent.OptionInfo{}, + } + return info, nil +} + +// Initialze the handler based of the provided options. +func (*mirrorHandler) Init(r *agent.InitRequest) (*agent.InitResponse, error) { + // Since we expected no options this method is trivial + // and we return success. + init := &agent.InitResponse{ + Success: true, + Error: "", + } + return init, nil +} +``` + +For now, our simple mirroring UDF doesn't need any options, so these methods are trivial. +At the end of this example we will modify the code to accept a custom option. + +Now that Kapacitor knows which edge types and options our UDF uses, we need to implement the methods +for handling data. + +Add this method to the `main.go` file which sends back every point it receives to Kapacitor via the agent: + +```go +func (h *mirrorHandler) Point(p *agent.Point) error { + // Send back the point we just received + h.agent.Responses <- &agent.Response{ + Message: &agent.Response_Point{ + Point: p, + }, + } + return nil +} +``` + +Notice that the `agent` has a channel for responses, this is because your UDF can send data to Kapacitor +at any time, so it does not need to be in a response to receive a point. + +As a result, we need to close the channel to let the `agent` know +that we will not be sending any more data, which can be done via the `Stop` method. +Once the `agent` calls `Stop` on the `handler`, no other methods will be called and the `agent` won't stop until +the channel is closed. +This gives the UDF the chance to flush out any remaining data before it's shutdown: + +```go +// Stop the handler gracefully. +func (h *mirrorHandler) Stop() { + // Close the channel since we won't be sending any more data to Kapacitor + close(h.agent.Responses) +} +``` + +Even though we have implemented the majority of the handler implementation, there are still a few missing methods. +Specifically, the methods around batching and snapshot/restores are missing, but, since we don't need them, we will just give them trivial implementations: + +```go +// Create a snapshot of the running state of the process. +func (*mirrorHandler) Snapshot() (*agent.SnapshotResponse, error) { + return &agent.SnapshotResponse{}, nil +} +// Restore a previous snapshot. +func (*mirrorHandler) Restore(req *agent.RestoreRequest) (*agent.RestoreResponse, error) { + return &agent.RestoreResponse{ + Success: true, + }, nil +} + +// Start working with the next batch +func (*mirrorHandler) BeginBatch(begin *agent.BeginBatch) error { + return errors.New("batching not supported") +} +func (*mirrorHandler) EndBatch(end *agent.EndBatch) error { + return nil +} +``` + +### The Server + +At this point we have a complete implementation of the `Handler` interface. +In step #4 of the Lifecycle above, Kapacitor makes a new connection to the UDF for each use in a task. Since it's possible that our UDF process can handle multiple connections simultaneously, we need a mechanism for creating a new `agent` and `handler` per connection. + +A `server` is provided for this purpose, which expects an implementation of the `Accepter` interface: + +```go +type Accepter interface { + // Accept new connections from the listener and handle them accordingly. + // The typical action is to create a new Agent with the connection as both its in and out objects. + Accept(net.Conn) +} +``` + +Here is a simple `accepter` that creates a new `agent` and `mirrorHandler` +for each new connection. Add this to the `main.go` file: + +```go +type accepter struct { + count int64 +} + +// Create a new agent/handler for each new connection. +// Count and log each new connection and termination. +func (acc *accepter) Accept(conn net.Conn) { + count := acc.count + acc.count++ + a := agent.New(conn, conn) + h := newMirrorHandler(a) + a.Handler = h + + log.Println("Starting agent for connection", count) + a.Start() + go func() { + err := a.Wait() + if err != nil { + log.Fatal(err) + } + log.Printf("Agent for connection %d finished", count) + }() +} +``` + +Now with all the pieces in place, we can update our `main` function to +start up the `server`. Replace the previously provided `main` function with: + +```go +func main() { + flag.Parse() + + // Create unix socket + addr, err := net.ResolveUnixAddr("unix", *socketPath) + if err != nil { + log.Fatal(err) + } + l, err := net.ListenUnix("unix", addr) + if err != nil { + log.Fatal(err) + } + + // Create server that listens on the socket + s := agent.NewServer(l, &accepter{}) + + // Setup signal handler to stop Server on various signals + s.StopOnSignals(os.Interrupt, syscall.SIGTERM) + + log.Println("Server listening on", addr.String()) + err = s.Serve() + if err != nil { + log.Fatal(err) + } + log.Println("Server stopped") +} +``` + +## Start the UDF + +At this point we are ready to start the UDF. +Here is the complete `main.go` file for reference: + +```go +package main + +import ( + "errors" + "flag" + "log" + "net" + "os" + "syscall" + + "github.com/influxdata/kapacitor/udf/agent" +) + +// Mirrors all points it receives back to Kapacitor +type mirrorHandler struct { + agent *agent.Agent +} + +func newMirrorHandler(agent *agent.Agent) *mirrorHandler { + return &mirrorHandler{agent: agent} +} + +// Return the InfoResponse. Describing the properties of this UDF agent. +func (*mirrorHandler) Info() (*agent.InfoResponse, error) { + info := &agent.InfoResponse{ + Wants: agent.EdgeType_STREAM, + Provides: agent.EdgeType_STREAM, + Options: map[string]*agent.OptionInfo{}, + } + return info, nil +} + +// Initialze the handler based of the provided options. +func (*mirrorHandler) Init(r *agent.InitRequest) (*agent.InitResponse, error) { + init := &agent.InitResponse{ + Success: true, + Error: "", + } + return init, nil +} + +// Create a snapshot of the running state of the process. +func (*mirrorHandler) Snapshot() (*agent.SnapshotResponse, error) { + return &agent.SnapshotResponse{}, nil +} + +// Restore a previous snapshot. +func (*mirrorHandler) Restore(req *agent.RestoreRequest) (*agent.RestoreResponse, error) { + return &agent.RestoreResponse{ + Success: true, + }, nil +} + +// Start working with the next batch +func (*mirrorHandler) BeginBatch(begin *agent.BeginBatch) error { + return errors.New("batching not supported") +} + +func (h *mirrorHandler) Point(p *agent.Point) error { + // Send back the point we just received + h.agent.Responses <- &agent.Response{ + Message: &agent.Response_Point{ + Point: p, + }, + } + return nil +} + +func (*mirrorHandler) EndBatch(end *agent.EndBatch) error { + return nil +} + +// Stop the handler gracefully. +func (h *mirrorHandler) Stop() { + close(h.agent.Responses) +} + +type accepter struct { + count int64 +} + +// Create a new agent/handler for each new connection. +// Count and log each new connection and termination. +func (acc *accepter) Accept(conn net.Conn) { + count := acc.count + acc.count++ + a := agent.New(conn, conn) + h := newMirrorHandler(a) + a.Handler = h + + log.Println("Starting agent for connection", count) + a.Start() + go func() { + err := a.Wait() + if err != nil { + log.Fatal(err) + } + log.Printf("Agent for connection %d finished", count) + }() +} + +var socketPath = flag.String("socket", "/tmp/mirror.sock", "Where to create the unix socket") + +func main() { + flag.Parse() + + // Create unix socket + addr, err := net.ResolveUnixAddr("unix", *socketPath) + if err != nil { + log.Fatal(err) + } + l, err := net.ListenUnix("unix", addr) + if err != nil { + log.Fatal(err) + } + + // Create server that listens on the socket + s := agent.NewServer(l, &accepter{}) + + // Setup signal handler to stop Server on various signals + s.StopOnSignals(os.Interrupt, syscall.SIGTERM) + + log.Println("Server listening on", addr.String()) + err = s.Serve() + if err != nil { + log.Fatal(err) + } + log.Println("Server stopped") +} +``` + +Run `go run main.go` to start the UDF. +If you get an error about the socket being in use, +just delete the socket file and try running the UDF again. + +## Configure Kapacitor to Talk to the UDF + +Now that our UDF is ready, we need to tell Kapacitor +where our UDF socket is, and give it a name so that we can use it. +Add this to your Kapacitor configuration file: + +``` +[udf] +[udf.functions] + [udf.functions.mirror] + socket = "/tmp/mirror.sock" + timeout = "10s" +``` + +## Start Kapacitor + +Start up Kapacitor and you should see it connect to your UDF in both the Kapacitor logs and the UDF process logs. + +## Try it out + +Take an existing task and add `@mirror()` at any point in the TICKscript pipeline to see it in action. + +Here is an example TICKscript, which will need to be saved to a file: + +```js +dbrp "telegraf"."autogen" + +stream + |from() + .measurement('cpu') + @mirror() + |alert() + .crit(lambda: "usage_idle" < 30) +``` + +Define the above alert from your terminal like so: + +```sh +kapacitor define mirror_udf_example -tick path/to/above/script.tick +``` + +Start the task: + +```sh +kapacitor enable mirror_udf_example +``` + +Check the status of the task: + +```sh +kapacitor show mirror_udf_example +``` + + +## Adding a Custom Field + +Now let's change the UDF to add a field to the data. +We can use the `Info/Init` methods to define and consume an option on the UDF, so let's specify the name of the field to add. + +Update the `mirrorHandler` type and the methods `Info` and `Init` as follows: + +```go +// Mirrors all points it receives back to Kapacitor +type mirrorHandler struct { + agent *agent.Agent + name string + value float64 +} + +// Return the InfoResponse. Describing the properties of this UDF agent. +func (*mirrorHandler) Info() (*agent.InfoResponse, error) { + info := &agent.InfoResponse{ + Wants: agent.EdgeType_STREAM, + Provides: agent.EdgeType_STREAM, + Options: map[string]*agent.OptionInfo{ + "field": {ValueTypes: []agent.ValueType{ + agent.ValueType_STRING, + agent.ValueType_DOUBLE, + }}, + }, + } + return info, nil +} + +// Initialze the handler based of the provided options. +func (h *mirrorHandler) Init(r *agent.InitRequest) (*agent.InitResponse, error) { + init := &agent.InitResponse{ + Success: true, + Error: "", + } + for _, opt := range r.Options { + switch opt.Name { + case "field": + h.name = opt.Values[0].Value.(*agent.OptionValue_StringValue).StringValue + h.value = opt.Values[1].Value.(*agent.OptionValue_DoubleValue).DoubleValue + } + } + + if h.name == "" { + init.Success = false + init.Error = "must supply field" + } + return init, nil +} +``` + +Now we can set the field with its name and value on the points. +Update the `Point` method: + +```go +func (h *mirrorHandler) Point(p *agent.Point) error { + // Send back the point we just received + if p.FieldsDouble == nil { + p.FieldsDouble = make(map[string]float64) + } + p.FieldsDouble[h.name] = h.value + + h.agent.Responses <- &agent.Response{ + Message: &agent.Response_Point{ + Point: p, + }, + } + return nil +} +``` + +Restart the UDF process and try it out again. +Specify which field name and value to use with the `.field(name, value)` method. +You can add a `|log()` after the `mirror` UDF to see that the new field has indeed been created. + +```js +dbrp "telegraf"."autogen" + +stream + |from() + .measurement('cpu') + @mirror() + .field('mycustom_field', 42.0) + |log() + |alert() + .cirt(lambda: "usage_idle" < 30) +``` + +## Summary + +At this point, you should be able to write custom UDFs using either the socket or process-based methods. +UDFs have a wide range of uses, from custom downsampling logic as part of a continuous query, +custom anomaly detection algorithms, or simply a system to "massage" your data a bit. + +### Next Steps + +If you want to learn more, here are a few places to start: + +* Modify the mirror UDF, to function like the [DefaultNode](/kapacitor/v1.5/nodes/default_node/). + Instead of always overwriting a field, only set it if the field is not absent. + Also add support for setting tags as well as fields. +* Change the mirror UDF to work on batches instead of streams. + This requires changing the edge type in the `Info` method as well as implementing the `BeginBatch` and `EndBatch` methods. +* Take a look at the other [examples](https://github.com/influxdata/kapacitor/tree/master/udf/agent/examples) and modify one to do something similar to one of your existing requirements. diff --git a/content/kapacitor/v1.5/guides/two-measurement-alert.md b/content/kapacitor/v1.5/guides/two-measurement-alert.md new file mode 100644 index 000000000..618847694 --- /dev/null +++ b/content/kapacitor/v1.5/guides/two-measurement-alert.md @@ -0,0 +1,131 @@ +--- +title: Triggering alerts by comparing two measurements +description: Kapacitor allows you to create alerts triggered by comparisons between two or more measurements. This guide walks through how to join the measurements, trigger alerts, and create visualizations for the data comparison. +menu: + kapacitor_1_5: + name: Alerts based on two measurements + identifier: two-measurement-alert + weight: 20 + parent: guides +--- + +Kapacitor allows you to create alerts based on two or more measurements. +In this guide, we are going to compare two measurements, `m1` and `m2`, and create +an alert whenever the two measurements are different. +As an added bonus, we'll also include a query that can be used to graph the percentage +difference between the two measurements. + +## Comparing measurements and creating an alert +The following [TICKscript](/kapacitor/latest/tick/) streams the `m1` and `m2` measurements, +joins them, compares them, and triggers an alert if the two measurements are different. + +```js +var window_size = 1m + +// Stream m1 +var m1 = stream + |from() + .measurement('m1') + |window() + .period(window_size) + .every(window_size) + .align() + |count('value') + .as('value') + +// Stream m2 +var m2 = stream + |from() + .measurement('m2') + |window() + .period(window_size) + .every(window_size) + .align() + |count('value') + .as('value') + +// Join m1 and m2 +var data = m1 + |join(m2) + .as('m1', 'm2') + +// Compare the joined stream and alert when m1 and m2 values are different +data + |alert() + .crit(lambda: "m1.value" != "m2.value") + .message('values were not equal m1 value is {{ index .Fields "m1.value" }} m2 value is {{ index .Fields "m2.value" }}') +``` + +## Graphing the percentage difference between the measurements +Use the `data` stream defined in the TICKscript above to calculate the difference +between `m1` and `m2`, transform it into a float, divide that difference by the +actual values of `m1` and `m2`, then multiply them by 100. +This will give you the percentage difference for each. +Store the difference as new fields in the `diffs` measurement: + +```js +data + // Calculate the difference between m1 and m2 + |eval(lambda: "m1.value" - "m2.value") + .as('value_diff') + .keep() + // Calculate the % difference of m1 and m2 + |eval(lambda: (float("value_diff") / float("m1.value")) * 100.0, lambda: (float("value_diff") / float("m2.value")) * 100.0) + .as('diff_percentage_m1', 'diff_percentage_m2') + // Store the calculated differences in the 'diffs' measurement + |influxDBOut() + .measurement('diffs') + .database('mydb') + .create() +``` + +This can be used to create visualizations similar to: + +Graphing the percentage difference between two measurements + +## The full TICKscript +Below is the entire, uncommented TICKscript: + +```js +var window_size = 1m + +var m1 = stream + |from() + .measurement('m1') + |window() + .period(window_size) + .every(window_size) + .align() + |count('value') + .as('value') + +var m2 = stream + |from() + .measurement('m2') + |window() + .period(window_size) + .every(window_size) + .align() + |count('value') + .as('value') + +var data = m1 + |join(m2) + .as('m1', 'm2') + +data + |alert() + .crit(lambda: "m1.value" != "m2.value") + .message('values were not equal m1 value is {{ index .Fields "m1.value" }} m2 value is {{ index .Fields "m2.value" }}') + +data + |eval(lambda: "m1.value" - "m2.value") + .as('value_diff') + .keep() + |eval(lambda: (float("value_diff") / float("m1.value")) * 100.0, lambda: (float("value_diff") / float("m2.value")) * 100.0) + .as('diff_percentage_m1', 'diff_percentage_m2') + |influxDBOut() + .measurement('diffs') + .database('mydb') + .create() +``` diff --git a/content/kapacitor/v1.5/introduction/_index.md b/content/kapacitor/v1.5/introduction/_index.md new file mode 100644 index 000000000..adadf112b --- /dev/null +++ b/content/kapacitor/v1.5/introduction/_index.md @@ -0,0 +1,16 @@ +--- +title: Introducing Kapacitor +aliases: + - /kapacitor/v1.5/introduction/downloading +menu: + kapacitor_1_5: + name: Introduction + weight: 10 +--- + +To get up and running with Kapacitor, complete the following tasks: + +## Download Kapacitor +For information about downloading Kapacitor, visit the [InfluxData downloads page](https://portal.influxdata.com/downloads). + +{{< children hlevel="h2">}} diff --git a/content/kapacitor/v1.5/introduction/getting-started.md b/content/kapacitor/v1.5/introduction/getting-started.md new file mode 100644 index 000000000..55ca2ff85 --- /dev/null +++ b/content/kapacitor/v1.5/introduction/getting-started.md @@ -0,0 +1,524 @@ +--- +title: Getting started with Kapacitor +weight: 20 +menu: + kapacitor_1_5: + parent: Introduction +--- + +Use Kapacitor to import (stream or batch) time series data, and then transform, analyze, and act on the data. To get started using Kapacitor, use Telegraf to collect system metrics on your local machine and store them in InfluxDB. Then, use Kapacitor to process your system data. + +- [Overview](#overview) +- [Start InfluxDB and collect Telegraf data](#start-influxdb-and-collect-telegraf-data) +- [Start Kapacitor](#start-kapacitor) +- Kapacitor tasks + - [Execute a task](#execute-a-task) + - [Trigger an alert from stream data](#trigger-alerts-from-stream-data) + - [Example alert on CPU usage](#example-alert-on-cpu-usage) + - [Gotcha - single versus double quotes](#gotcha-single-versus-double-quotes) + - [Extending TICKscripts](#extending-tickscripts) + - [A real world example](#a-real-world-example) + - [Trigger an alert from batch data](#trigger-alerts-from-batch-data) + - [Load tasks](#load-tasks-with-kapacitor) + +## Overview + +Kapacitor tasks define work to do on a set of data using [TICKscript](/kapacitor/v1.5/tick/) syntax. Kapacitor tasks include: + +- `stream` tasks. A stream task replicates data written to InfluxDB in Kapacitor. Offloads query overhead and requires Kapacitor to store the data on disk. +- `batch` tasks. A batch task queries and processes data for a specified interval. + +To get started, do the following: + +1. If you haven't already, [download and install the InfluxData TICK stack (OSS)](/platform/install-and-deploy/install/oss-install). +2. [Start InfluxDB and start Telegraf](#start-influxdb-and-collect-telegraf-data). By default, Telegraf starts sending system metrics to InfluxDB and creates a 'telegraf' database. +3. Start Kapacitor. + +> **Note:** Example commands in the following procedures are written for Linux. + +## Start InfluxDB and collect Telegraf data + +1. Start InfluxDB by running the following command: + + ```bash + $ sudo systemctl start influxdb + ``` + +2. In the Telegraf configuration file (`/etc/telegraf/telegraf.conf`), configure `[[outputs.influxd]]` to specify how to connect to InfluxDB and the destination database. + + ```sh + [[outputs.influxdb]] + ## InfluxDB url is required and must be in the following form: http/udp "://" host [ ":" port] + ## Multiple urls can be specified as part of the same cluster; only ONE url is written to each interval. + ## InfluxDB url + urls = ["http://localhost:8086"] + + ## The target database for metrics is required (Telegraf creates if one doesn't exist). + database = "telegraf" + ``` + +3. Run the following command to start Telegraf: + + ``` + $ sudo systemctl start telegraf + ``` + + InfluxDB and Telegraf are now running on localhost. + +4. After a minute, run the following command to use the InfluxDB API to query for the Telegraf data: + + ```bash + $ curl -G 'http://localhost:8086/query?db=telegraf' --data-urlencode 'q=SELECT mean(usage_idle) FROM cpu' + ``` + + Results similar to the following appear: + + ``` + {"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","mean"],"values":[["1970-01-01T00:00:00Z",91.82304336748372]]}]}]} + ``` + +## Start Kapacitor + +1. Run the following command to generate a Kapacitor configuration file: + + ```bash + kapacitord config > kapacitor.conf + ``` + By default, the Kapacitor configuration file is saved in `/etc/kapacitor/kapacitor.conf`. If you save the file to another location, specify the location when starting Kapacitor. + + > The Kapacitor configuration is a [TOML](https://github.com/toml-lang/toml) file. Inputs configured for InfluxDB also work for Kapacitor. + +2. Start the Kapacitor service: + + ```bash + $ sudo systemctl start kapacitor + ``` + + Because InfluxDB is running on `http://localhost:8086`, Kapacitor finds it during start up and creates several [subscriptions](/kapacitor/v1.5/administration/subscription-management/) on InfluxDB. + Subscriptions tell InfluxDB to send data to Kapacitor. + +3. (Optional) To view log data, run the following command: + + ``` + $ sudo tail -f -n 128 /var/log/kapacitor/kapacitor.log + + ``` + + Kapacitor listens on an HTTP port and posts data to InfluxDB. Now, InfluxDB streams data from Telegraf to Kapacitor. + +### Execute a task + +- At the beginning of a TICKscript, specify the database and retention policy +that contain data: + + ```js + dbrp "telegraf"."autogen" + + // ... + ``` + + When Kapacitor receives data from a database and retention policy that matches those + specified, Kapacitor executes the TICKscript. + + > Kapacitor supports executing tasks based on database and retention policy (no other conditions). + +## Trigger alerts from stream data + +Triggering an alert is a common Kapacitor use case. The database and retention policy to alert on must be defined. + +##### Example alert on CPU usage + +1. Copy the following TICKscript into a file called `cpu_alert.tick`: + + ```js + dbrp "telegraf"."autogen" + + stream + // Select the CPU measurement from the `telegraf` database. + |from() + .measurement('cpu') + // Triggers a critical alert when the CPU idle usage drops below 70% + |alert() + .crit(lambda: int("usage_idle") < 70) + // Write each alert to a file. + .log('/tmp/alerts.log') + ``` + +2. In the command line, use the `kapacitor` CLI to define the task using the `cpu_alert.tick` TICKscript: + + ```bash + kapacitor define cpu_alert -tick cpu_alert.tick + ``` + + > If the database and retention policy aren't included in the TICKscript (for example, `dbrp "telegraf"."autogen"`), use the `kapacitor define` command with the `-dbrp` flag followed by "."" to specify them when adding the task. + +3. (Optional) Use the `list` command to verify the alert has been created: + + ``` + $ kapacitor list tasks + ID Type Status Executing Databases and Retention Policies + cpu_alert stream disabled false ["telegraf"."autogen"] + ``` + +4. (Optional) Use the `show` command to view details about the task: + + ``` + $ kapacitor show cpu_alert + ID: cpu_alert + Error: + Template: + Type: stream + Status: disabled + Executing: false + ... + ``` + +4. To ensure log files and communication channels aren't spammed with alerts, [test the task](#test-the-task). +5. Enable the task to start processing the live data stream: + + ```bash + kapacitor enable cpu_alert + ``` + + Alerts are written to the log in real time. + +6. Run the `show` command to verify the task is receiving data and behaving as expected: + + ```bash + $ kapacitor show cpu_alert + |from() + // Information about the state of the task and any error it may have encountered. + ID: cpu_alert + Error: + Type: stream + Status: Enabled + Executing: true + Created: 04 May 16 21:01 MDT + Modified: 04 May 16 21:04 MDT + LastEnabled: 04 May 16 21:03 MDT + Databases Retention Policies: [""."autogen"] + + // Displays the version of the TICKscript that Kapacitor has stored in its local database. + TICKscript: + stream + // Select just the cpu me + .measurement('cpu') + |alert() + .crit(lambda: "usage_idle" < 70) + // Whenever we get an alert write it to a file. + .log('/tmp/alerts.log') + + DOT: + digraph asdf { + graph [throughput="0.00 points/s"]; + + stream0 [avg_exec_time_ns="0" ]; + stream0 -> from1 [processed="12"]; + + from1 [avg_exec_time_ns="0" ]; + from1 -> alert2 [processed="12"]; + + alert2 [alerts_triggered="0" avg_exec_time_ns="0" ]; + } + ``` + +Returns a [graphviz dot](http://www.graphviz.org) formatted tree that shows the data processing pipeline defined by the TICKscript and key-value associative array entries with statistics about each node and links along an edge to the next node also including associative array statistical information. The *processed* key in the link/edge members indicates the number of data points that have passed along the specified edge of the graph. + +In the example above, the `stream0` node (aka the `stream` var from the TICKscript) has sent 12 points to the `from1` node. +The `from1` node has also sent 12 points on to the `alert2` node. Since Telegraf is configured to send `cpu` data, all 12 points match the database/measurement criteria of the `from1` node and are passed on. + + > If necessary, install graphviz on Debian or RedHat using the package provided by the OS provider. The packages offered on the graphviz site are not up-to-date. + + Now that the task is running with live data, here is a quick hack to use 100% of one core to generate some artificial cpu activity: + + ```bash + while true; do i=0; done + ``` + +##### Test the task + +Complete the following steps to ensure log files and communication channels aren't spammed with alerts. + +1. Record the data stream: + + ```bash + kapacitor record stream -task cpu_alert -duration 60s + ``` + + If a connection error appears, for example: `getsockopt: connection refused` (Linux) or `connectex: No connection could be made...` (Windows), + verify the Kapacitor service is running (see [Installing and Starting Kapacitor](#installing-and-starting-kapacitor)). + If Kapacitor is running, check the firewall settings of the host machine and ensure that port `9092` is accessible. + Also, check messages in `/var/log/kapacitor/kapacitor.log`. If there's an issue with the `http` or other configuration in `/etc/kapacitor/kapacitor.conf`, the issue appears in the log. + If the Kapacitor service is running on another host machine, set the `KAPACITOR_URL` environment variable in the local shell to the Kapacitor endpoint on the remote machine. + +2. Retrieve the returned ID and assign the ID to a bash variable to use later (the actual UUID returned is different): + + ```bash + rid=cd158f21-02e6-405c-8527-261ae6f26153 + ``` + +3. Confirm the recording captured some data by running: + + ```bash + kapacitor list recordings $rid + ``` + + The output should appear like: + + ``` + ID Type Status Size Date + cd158f21-02e6-405c-8527-261ae6f26153 stream finished 2.2 kB 04 May 16 11:44 MDT + ``` + + If the size is more than a few bytes, data has been captured. + If Kapacitor isn't receiving data, check each layer: Telegraf → InfluxDB → Kapacitor. + Telegraf logs errors if it cannot communicate to InfluxDB. + InfluxDB logs an error about `connection refused` if it cannot send data to Kapacitor. + Run the query `SHOW SUBSCRIPTIONS` against InfluxDB to find the endpoint that InfluxDB is using to send data to Kapacitor. + + In the following example, InfluxDB must be running on localhost:8086: + + ``` + $ curl -G 'http://localhost:8086/query?db=telegraf' --data-urlencode 'q=SHOW SUBSCRIPTIONS' + + {"results":[{"statement_id":0,"series":[{"name":"_internal","columns":["retention_policy","name","mode","destinations"],"values":[["monitor","kapacitor-ef3b3f9d-0997-4c0b-b1b6-5d0fb37fe509","ANY",["http://localhost:9092"]]]},{"name":"telegraf","columns":["retention_policy","name","mode","destinations"],"values":[["autogen","kapacitor-ef3b3f9d-0997-4c0b-b1b6-5d0fb37fe509","ANY",["http://localhost:9092"]]]}]}]} + ``` + +4. Use `replay` to test the recorded data for a specific task: + + ```bash + kapacitor replay -recording $rid -task cpu_alert + ``` + + > Use the flag `-real-clock` to set the replay time by deltas between the timestamps. Time is measured on each node by the data points it receives. + +5. Review the log for alerts: + + ```bash + sudo cat /tmp/alerts.log + ``` + + Each JSON line represents one alert, and includes the alert level and data that triggered the alert. + + > If the host machine is busy, it may take awhile to log alerts. + +6. (Optional) Modify the task to be really sensitive to ensure the alerts are working. + In the TICKscript, change the lamda function `.crit(lambda: "usage_idle" < 70)` to `.crit(lambda: "usage_idle" < 100)`, and run the `define` command with just the `TASK_NAME` and `-tick` arguments: + + ```bash + kapacitor define cpu_alert -tick cpu_alert.tick + ``` + Every data point received during the recording triggers an alert. + +7. Replay the modified task to verify the results. + + ```bash + kapacitor replay -recording $rid -task cpu_alert + ``` + + Once the `alerts.log` results verify that the task is working, change the `usage_idle` threshold back to a more reasonable level and redefine the task once more using the `define` command as shown in step 6. + +### Gotcha - single versus double quotes + +Single quotes and double quotes in TICKscripts do very different things: + +Note the following example: + +```js +var data = stream + |from() + .database('telegraf') + .retentionPolicy('autogen') + .measurement('cpu') + // NOTE: Double quotes on server1 + .where(lambda: "host" == "server1") +``` + +The result of this search will always be empty, because double quotes were used around "server1". This means that Kapacitor will search for a series where the field "host" is equal to the value held in _the field_ "server1". This is probably not what was intended. More likely the intention was to search for a series where tag "host" has _the value_ 'server1', so single quotes should be used. Double quotes denote data fields, single quotes string values. To match the _value_, the tick script above should look like this: + +```js +var data = stream + |from() + .database('telegraf') + .retentionPolicy('autogen') + .measurement('cpu') + // NOTE: Single quotes on server1 + .where(lambda: "host" == 'server1') +``` + +### Extending TICKscripts + +The TICKscript below will compute the running mean and compare current values to it. +It will then trigger an alert if the values are more than 3 standard deviations away from the mean. +Replace the `cpu_alert.tick` script with the TICKscript below: + +```js +stream + |from() + .measurement('cpu') + |alert() + // Compare values to running mean and standard deviation + .crit(lambda: sigma("usage_idle") > 3) + .log('/tmp/alerts.log') +``` + +Just like that, a dynamic threshold can be created, and, if cpu usage drops in the day or spikes at night, an alert will be issued. +Try it out. +Use `define` to update the task TICKscript. + +```bash +kapacitor define cpu_alert -tick cpu_alert.tick +``` + +>**Note:** If a task is already enabled, redefining the task with the `define` command automatically reloads (`reload`) the task. +To define a task without reloading it, use `-no-reload` + +Now tail the alert log: + +```bash +sudo tail -f /tmp/alerts.log +``` + +There should not be any alerts triggering just yet. +Next, start a while loop to add some load: + +```bash +while true; do i=0; done +``` + +An alert trigger should be written to the log shortly, once enough artificial load has been created. +Leave the loop running for a few minutes. +After canceling the loop, another alert should be issued indicating that cpu usage has again changed. +Using this technique, alerts can be generated for the raising and falling edges of cpu usage, as well as any outliers. + +### A real world example + +Now that the basics have been covered, here is a more real world example. +Once the metrics from several hosts are streaming to Kapacitor, it is possible to do something like: Aggregate and group +the cpu usage for each service running in each datacenter, and then trigger an alert +based off the 95th percentile. +In addition to just writing the alert to a log, Kapacitor can +integrate with third party utilities: currently Slack, PagerDuty, HipChat, VictorOps and more are supported. The alert can also be sent by email, be posted to a custom endpoint or can trigger the execution of a custom script. +Custom message formats can also be defined so that alerts have the right context and meaning. +The TICKscript for this would look like the following example. + +*Example - TICKscript for stream on multiple service cpus and alert on 95th percentile* +```js +stream + |from() + .measurement('cpu') + // create a new field called 'used' which inverts the idle cpu. + |eval(lambda: 100.0 - "usage_idle") + .as('used') + |groupBy('service', 'datacenter') + |window() + .period(1m) + .every(1m) + // calculate the 95th percentile of the used cpu. + |percentile('used', 95.0) + |eval(lambda: sigma("percentile")) + .as('sigma') + .keep('percentile', 'sigma') + |alert() + .id('{{ .Name }}/{{ index .Tags "service" }}/{{ index .Tags "datacenter"}}') + .message('{{ .ID }} is {{ .Level }} cpu-95th:{{ index .Fields "percentile" }}') + // Compare values to running mean and standard deviation + .warn(lambda: "sigma" > 2.5) + .crit(lambda: "sigma" > 3.0) + .log('/tmp/alerts.log') + + // Post data to custom endpoint + .post('https://alerthandler.example.com') + + // Execute custom alert handler script + .exec('/bin/custom_alert_handler.sh') + + // Send alerts to slack + .slack() + .channel('#alerts') + + // Sends alerts to PagerDuty + .pagerDuty() + + // Send alerts to VictorOps + .victorOps() + .routingKey('team_rocket') +``` + +Something so simple as defining an alert can quickly be extended to apply to a much larger scope. +With the above script, an alert will be triggered if any service in any datacenter deviates more than 3 +standard deviations away from normal behavior as defined by the historical 95th percentile of cpu usage, and will do so within 1 minute! + +For more information on how alerting works, see the [AlertNode](/kapacitor/v1.5/nodes/alert_node/) docs. + +## Trigger alerts from batch data + +In addition to processing data in streams, Kapacitor can also periodically query InfluxDB and process data in batches. + +While triggering an alert based off cpu usage is more suited for the streaming case, the basic idea +of how `batch` tasks work is demonstrated here by following the same use case. + +##### Example alert on batch data + +This TICKscript does roughly the same thing as the earlier stream task, but as a batch task: + +```js +dbrp "telegraf"."autogen" + +batch + |query(''' + SELECT mean(usage_idle) + FROM "telegraf"."autogen"."cpu" + ''') + .period(5m) + .every(5m) + .groupBy(time(1m), 'cpu') + |alert() + .crit(lambda: "mean" < 70) + .log('/tmp/batch_alerts.log') +``` + +1. Copy the script above into the file `batch_cpu_alert.tick`. + +2. Define the task: + + ```bash + kapacitor define batch_cpu_alert -tick batch_cpu_alert.tick + ``` +3. Verify its creation: + + ```bash + $ kapacitor list tasks + ID Type Status Executing Databases and Retention Policies + batch_cpu_alert batch disabled false ["telegraf"."autogen"] + cpu_alert stream enabled true ["telegraf"."autogen"] + ``` + + 4. Record the result of the query in the task (note, the actual UUID differs): + + ```bash + kapacitor record batch -task batch_cpu_alert -past 20m + # Save the id again + rid=b82d4034-7d5c-4d59-a252-16604f902832 + ``` + + This records the last 20 minutes of batches using the query in the `batch_cpu_alert` task. + In this case, since the `period` is 5 minutes, the last 4 batches are recorded and saved. + +5. Replay the batch recording the same way: + + ```bash + kapacitor replay -recording $rid -task batch_cpu_alert + ``` + +6. Check the alert log to make sure alerts were generated as expected. +The `sigma` based alert above can also be adapted for working with batch data. +Play around and get comfortable with updating, testing, and running tasks in Kapacitor. + +## Load tasks with Kapacitor + +To load a task with Kapacitor, save the TICKscript in a _load_ directory specified in `kapacitor.conf`. TICKscripts must include the database and retention policy declaration `dbrp`. + +TICKscripts in the load directory are automatically loaded when Kapacitor starts and do not need to be added with the kapacitor define command. + +For more information, see [Load Directory](/kapacitor/v1.5/guides/load_directory/). diff --git a/content/kapacitor/v1.5/introduction/install-docker.md b/content/kapacitor/v1.5/introduction/install-docker.md new file mode 100644 index 000000000..b7ea25e3e --- /dev/null +++ b/content/kapacitor/v1.5/introduction/install-docker.md @@ -0,0 +1,433 @@ +--- +title: Docker Install +weight: 70 +menu: + kapacitor_1_5: + parent: Introduction +--- + + +## Getting Started with TICK and Docker Compose + +This short tutorial will demonstrate starting TICK stack components (InfluxDB, Telegraf, Kapacitor) with Docker Compose and then using that stack to learn the rudiments of working with Kapacitor and the [TICKscript](/kapacitor/v1.5/tick/) domain specific language (DSL). The following discussion is based on the tutorial project package (named tik-docker-tutorial.tar.gz) that can be downloaded from [this location](/downloads/tik-docker-tutorial.tar.gz). It will create a running deployment of these applications that can be used for an initial evaluation and testing of Kapacitor. Chronograf is currently not included in the package. + +This tutorial depends on Docker Compose 3.0 to deploy the latest Docker 17.0+ compatible images of InfluxDB, Telegraf and Kapacitor. + +To use this package Docker and Docker Compose should be installed on the host machine where it will run. + +Docker installation is covered at the [Docker website](https://docs.docker.com/engine/installation/). + +Docker Compose installation is also covered at the [Docker website](https://docs.docker.com/compose/install/). + +In order to keep an eye on the log files, this document will describe running the reference package in two separate consoles. In the first console Docker Compose will be run. The second will be used to issue commands to demonstrate basic Kapacitor functionality. + +As of this writing, the package has only been tested on Linux(Ubuntu 16.04). It contains a `docker-compose.yml` and directories for configuration a test files. + +*Demo Package Contents* +``` +. +├── docker-compose.yml +├── etc +│   ├── kapacitor +│   │   └── kapacitor.conf +│   └── telegraf +│   └── telegraf.conf +├── home +│   └── kapacitor +│   ├── cpu_alert_batch.tick +│   └── cpu_alert_stream.tick +├── README.md +└── var + └── log + └── kapacitor + └── README.md + +``` + +Please clone or copy the package to the host machine and open two consoles to its install location before continuing. + +### Loading the stack with Docker Compose + +The core of the package is the `docker-compose.yml` file, which Docker Compose uses to pull the Docker images and then create and run the Docker containers. + +Standard Unix style directories have also been prepared. These are mapped into the docker containers to make it easy to access scripts and logs in the demonstrations that follow. One important directory is the volume `var/log/kapacitor`. Here the `kapacitor.log` and later the `alert-*.log` files will be made available for inspection. + + In the first console, in the root directory of the package, to start the stack and leave the logs visible run the following: + + ``` + $ docker-compose up + ``` +*Logs in standard console streams* +``` +Starting tik_influxdb_1 ... +Starting tik_telegraf_1 ... +Starting tik_telegraf_1 +Starting tik_influxdb_1 +Starting tik_kapacitor_1 ... +Starting tik_influxdb_1 ... done +Attaching to tik_telegraf_1, tik_kapacitor_1, tik_influxdb_1 +kapacitor_1 | +kapacitor_1 | '##:::'##::::'###::::'########:::::'###:::::'######::'####:'########::'#######::'########:: +kapacitor_1 | ##::'##::::'## ##::: ##.... ##:::'## ##:::'##... ##:. ##::... ##..::'##.... ##: ##.... ##: +kapacitor_1 | ##:'##::::'##:. ##:: ##:::: ##::'##:. ##:: ##:::..::: ##::::: ##:::: ##:::: ##: ##:::: ##: +kapacitor_1 | #####::::'##:::. ##: ########::'##:::. ##: ##:::::::: ##::::: ##:::: ##:::: ##: ########:: +kapacitor_1 | ##. ##::: #########: ##.....::: #########: ##:::::::: ##::::: ##:::: ##:::: ##: ##.. ##::: +kapacitor_1 | ##:. ##:: ##.... ##: ##:::::::: ##.... ##: ##::: ##:: ##::::: ##:::: ##:::: ##: ##::. ##:: +kapacitor_1 | ##::. ##: ##:::: ##: ##:::::::: ##:::: ##:. ######::'####:::: ##::::. #######:: ##:::. ##: +kapacitor_1 | ..::::..::..:::::..::..:::::::::..:::::..:::......:::....:::::..::::::.......:::..:::::..:: +kapacitor_1 | +kapacitor_1 | 2017/08/17 08:46:55 Using configuration at: /etc/kapacitor/kapacitor.conf +influxdb_1 | +influxdb_1 | 8888888 .d888 888 8888888b. 888888b. +influxdb_1 | 888 d88P" 888 888 "Y88b 888 "88b +influxdb_1 | 888 888 888 888 888 888 .88P +influxdb_1 | 888 88888b. 888888 888 888 888 888 888 888 888 8888888K. +influxdb_1 | 888 888 "88b 888 888 888 888 Y8bd8P' 888 888 888 "Y88b +influxdb_1 | 888 888 888 888 888 888 888 X88K 888 888 888 888 +influxdb_1 | 888 888 888 888 888 Y88b 888 .d8""8b. 888 .d88P 888 d88P +influxdb_1 | 8888888 888 888 888 888 "Y88888 888 888 8888888P" 8888888P" +influxdb_1 | +influxdb_1 | [I] 2017-08-17T08:46:55Z InfluxDB starting, version 1.3.3, branch HEAD, commit e37afaf09bdd91fab4713536c7bdbdc549ee7dc6 +influxdb_1 | [I] 2017-08-17T08:46:55Z Go version go1.8.3, GOMAXPROCS set to 8 +influxdb_1 | [I] 2017-08-17T08:46:55Z Using configuration at: /etc/influxdb/influxdb.conf +influxdb_1 | [I] 2017-08-17T08:46:55Z Using data dir: /var/lib/influxdb/data service=store +influxdb_1 | [I] 2017-08-17T08:46:56Z reading file /var/lib/influxdb/wal/_internal/monitor/1/_00001.wal, size 235747 engine=tsm1 service=cacheloader +influxdb_1 | [I] 2017-08-17T08:46:56Z reading file /var/lib/influxdb/wal/telegraf/autogen/2/_00001.wal, size 225647 engine=tsm1 service=cacheloader +telegraf_1 | 2017/08/17 08:46:55 I! Using config file: /etc/telegraf/telegraf.conf +telegraf_1 | 2017-08-17T08:46:56Z I! Starting Telegraf (version 1.3.3) +telegraf_1 | 2017-08-17T08:46:56Z I! Loaded outputs: influxdb +telegraf_1 | 2017-08-17T08:46:56Z I! Loaded inputs: inputs.kernel inputs.mem inputs.processes inputs.swap inputs.system inputs.cpu inputs.disk inputs.diskio +telegraf_1 | 2017-08-17T08:46:56Z I! Tags enabled: host=f1ba76bcbbcc +telegraf_1 | 2017-08-17T08:46:56Z I! Agent Config: Interval:10s, Quiet:false, Hostname:"f1ba76bcbbcc", Flush Interval:10s +influxdb_1 | [I] 2017-08-17T08:46:56Z reading file /var/lib/influxdb/wal/_internal/monitor/1/_00002.wal, size 0 engine=tsm1 service=cacheloader +influxdb_1 | [I] 2017-08-17T08:46:56Z /var/lib/influxdb/data/_internal/monitor/1 opened in 228.044556ms service=store + +... + +``` +### Verifying the stack + +The console logs should be similar to the above sample. In the second console the status can be confirmed by using docker directly. + +``` +$ docker ps +CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES +f1ba76bcbbcc telegraf:latest "/entrypoint.sh te..." 43 minutes ago Up 2 minutes 8092/udp, 8125/udp, 8094/tcp tik_telegraf_1 +432ce34e3b00 kapacitor:latest "/entrypoint.sh ka..." 43 minutes ago Up 2 minutes 9092/tcp tik_kapacitor_1 +2060eca01bb7 influxdb:latest "/entrypoint.sh in..." 43 minutes ago Up 2 minutes 8086/tcp tik_influxdb_1 + +``` +Take note of the container names, especially for Kapacitor. If the Kapacitor container name in the current deployment is not the same(i.e. `tik_kapacitor_1`), be sure to replace it in the Docker command line examples below. This also applies to the InfluxDB container name (`tik_influxdb_1`) which is used in the next example. + +### What is running? + +At this point there should be running on the host machine: InfluxDB, Telegraf and Kapacitor. Telegraf is configured using the configuration file `etc/telegraf/telegraf.conf`. Kapacitor is configured using the file `etc/kapacitor/kapacitor.conf`. A bridge network has been defined in the `docker-compose.yml` file. This bridge network features a simple name resolution service, that allows the container names to be used as the server names in the configuration files just mentioned. + +The running configuration can be further inspected by using the `influx` command line client directly from the InfluxDB Container. + +``` +$ docker exec -it tik_influxdb_1 influx --precision rfc3339 +Connected to http://localhost:8086 version 1.3.3 +InfluxDB shell version: 1.3.3 +> show databases +name: databases +name +---- +_internal +telegraf +> use telegraf +Using database telegraf +> show subscriptions +name: telegraf +retention_policy name mode destinations +---------------- ---- ---- ------------ +autogen kapacitor-dc455e9d-b306-4687-aa39-f146a250dd76 ANY [http://kapacitor:9092] + +name: _internal +retention_policy name mode destinations +---------------- ---- ---- ------------ +monitor kapacitor-dc455e9d-b306-4687-aa39-f146a250dd76 ANY [http://kapacitor:9092] +> exit +``` +## Kapacitor Alerts and the TICKscript + +The top level nodes of a TICKscript define the mode by which the underlying node chain is to be executed. They can be setup so that Kapacitor receives processed data in a steady stream, or so that it triggers the processing of a batch of data points, from which it will receive the results. + +### Setting up a live stream CPU alert + +To create an alert stream it is necessary to: + + * declare the desired functionality in a TICKscript + * define the actual alert task in Kapacitor + * test the alert task by recording a sample of stream activity and then playing it back + * enable the alert + +An initial script has been prepared in the `home/kapacitor` directory, which is mapped as a volume into the Kapacitor container (`home/kapacitor/cpu_alert_stream.tick`). + +This simple script touches upon just the basics of the rich domain specific TICKscript language. It is self-descriptive and should be easily understood. + +*cpu_alert_stream.tick* +``` +stream + // Select just the cpu measurement from our example database. + |from() + .measurement('cpu') + |alert() + .crit(lambda: "usage_idle" < 70) + // Whenever we get an alert write it to a file + .log('/var/log/kapacitor/alerts-stream.log') + +``` +Note that the `alerts-stream.log` file is written to a volume mapped back to the package directory tree `./var/log/kapacitor`. This will simplify log inspection. + +The TICKscript can then be used over Docker to define a new alert in the Kapacitor container. +``` +$ docker exec tik_kapacitor_1 sh -c "cd /home/kapacitor && kapacitor define cpu_alert_stream -type stream -tick ./cpu_alert_stream.tick -dbrp telegraf.autogen" +``` + +Verify that the alert has been created with the following. + +``` +$ docker exec tik_kapacitor_1 kapacitor show cpu_alert_stream +ID: cpu_alert_stream +Error: +Template: +Type: stream +Status: disabled +Executing: false +Created: 17 Aug 17 09:30 UTC +Modified: 17 Aug 17 09:30 UTC +LastEnabled: 01 Jan 01 00:00 UTC +Databases Retention Policies: ["telegraf"."autogen"] +TICKscript: +stream + // Select just the cpu measurement from our example database. + |from() + .measurement('cpu') + |alert() + .crit(lambda: "usage_idle" < 70) + // Whenever we get an alert write it to a file. + .log('/var/log/kapacitor/alerts-stream.log') + +DOT: +digraph cpu_alert_stream { +stream0 -> from1; +from1 -> alert2; +} +``` +#### Test the stream alert using 'record' + +Before an alert is enabled, it is prudent to check its behavior. A test run of how the alert stream will behave can be done using the Kapacitor 'record' command. This will return a UUID that can then be used as a reference to list and replay what was captured in the test run. + +``` +$ docker exec tik_kapacitor_1 kapacitor record stream -task cpu_alert_stream -duration 60s +fd7d7081-c985-433e-87df-97ab0c267161 +``` + +During the minute that this test run is being recorded, in order to force one or more CPUs to have a low idle measurement, which will trigger an alert, it will be useful to execute a process that will generate some artificial load. For example in a third console, the following might be executed. + +```shell +while true; do i=0; done; +``` + +List the recording with the following command: + +``` +$ docker exec tik_kapacitor_1 kapacitor list recordings fd7d7081-c985-433e-87df-97ab0c267161 +ID Type Status Size Date +fd7d7081-c985-433e-87df-97ab0c267161 stream finished 1.9 kB 17 Aug 17 09:34 UTC + +``` +#### Rerunning a recording of a stream alert + +When a recording is rerun, alerts are written to the `alerts-stream.log` as they will occur when the alert will be enabled. Replay the recording as follows: + +``` +docker exec tik_kapacitor_1 kapacitor replay -recording fd7d7081-c985-433e-87df-97ab0c267161 -task cpu_alert_stream +c8cd033f-a79e-46a6-bb5d-81d2f56722b2 +``` +Check the contents of the local `var/log/kapacitor` directory. + +``` +$ ls -1 var/log/kapacitor/ +alerts-stream.log +kapacitor.log +README.md +``` + +Check the contents of the `alerts-stream.log`. + +``` +$ sudo less -X var/log/kapacitor/alerts-stream.log +{"id":"cpu:nil","message":"cpu:nil is CRITICAL","details":"{...}\n","time":"2017-08-17T09:36:09.693216014Z","duration":0,"level":"CRITICAL","data":{... +``` +#### Enable the alert stream + +Once it is clear that the new alert will not be generating spam, and that it will actually catch meaningful information, it can be enabled in Kapacitor. + +``` +$ docker exec tik_kapacitor_1 kapacitor enable cpu_alert_stream +``` +Verify that it has been enabled by once again showing the task. +``` +$ docker exec tik_kapacitor_1 kapacitor show cpu_alert_stream +ID: cpu_alert_stream +Error: +Template: +Type: stream +Status: enabled +Executing: true +... +``` +If the alert stream will no longer be needed it can likewise be disabled. +``` +$ docker exec tik_kapacitor_1 kapacitor disable cpu_alert_stream +``` + +### Setting up a batch CPU alert + +The second mode for setting up a TICKscript node chain is batch processing. A batch process can be executed periodically over a window of time series data points. + +To create a batch process it is necessary to: + + * declare the desired functionality, window or time period to be sampled, and run frequency in a TICKscript + * define the actual alert task in Kapacitor + * test the alert task by recording a data point sample and then playing it back + * enable the alert + +It may have already been noted that an example batch TICKscript has been created in the directory `home/kapacitor`. + +As with the stream based TICKscript, the contents are self-descriptive and should be easily understood. + +*cpu_alert_batch.tick* +``` +batch + |query(''' + SELECT usage_idle + FROM "telegraf"."autogen"."cpu" + ''') + .period(5m) + .every(5m) + |alert() + .crit(lambda: "usage_idle" < 70) + .log('/var/log/kapacitor/alerts-batch.log') +``` +Here again the `alerts-batch.log` will be written to a directory mapped as a volume into the Kapacitor container. + +The TICKscript can then be used over Docker to define a new alert in the Kapacitor container. + +``` +$ docker exec tik_kapacitor_1 sh -c "cd /home/kapacitor && kapacitor define cpu_alert_batch -type batch -tick ./cpu_alert_batch.tick -dbrp telegraf.autogen" +``` +Verify that the task has been created. + +``` +$ docker exec tik_kapacitor_1 kapacitor show cpu_alert_batch +ID: cpu_alert_batch +Error: +Template: +Type: batch +Status: disabled +Executing: false +Created: 17 Aug 17 12:41 UTC +Modified: 17 Aug 17 13:06 UTC +LastEnabled: 01 Jan 01 00:00 UTC +Databases Retention Policies: ["telegraf"."autogen"] +TICKscript: +batch + |query(''' + SELECT usage_idle + FROM "telegraf"."autogen"."cpu" + ''') + .period(5m) + .every(5m) + |alert() + .crit(lambda: "usage_idle" < 70) + .log('/var/log/kapacitor/alerts-batch.log') + +DOT: +digraph cpu_alert_batch { +query1 -> alert2; +} +``` + +#### Test the batch alert using 'record' + +As with the stream alert, it would be advisable to test the alert task before enabling it. + +Prepare some alert triggering data points by creating artificial CPU load. For example in a third console the following might be run for a minute or two. + +```shell +while true; do i=0; done; +``` +A test run of how the alert batch will behave can be generated using the Kapacitor 'record' command. + +``` +docker exec tik_kapacitor_1 kapacitor record batch -task cpu_alert_batch -past 5m +b2c46972-8d01-4fab-8088-56fd51fa577c +``` +List the recording with the following command. +``` +$ docker exec tik_kapacitor_1 kapacitor list recordings b2c46972-8d01-4fab-8088-56fd51fa577c +ID Type Status Size Date +b2c46972-8d01-4fab-8088-56fd51fa577c batch finished 2.4 kB 17 Aug 17 13:06 UTC +``` + +#### Rerunning a recording of a batch alert + +When the recording is rerun, alerts are written to the `alerts-batch.log` as they occurred when uncovered during batch processing. Replay the recording as follows: + +``` +$ docker exec tik_kapacitor_1 kapacitor replay -recording b2c46972-8d01-4fab-8088-56fd51fa577c -task cpu_alert_batch +0cc65a9f-7dba-4a02-a118-e95b4fccf123 +``` +Check the contents of the local `var/log/kapacitor` directory. + +``` +$ ls -1 var/log/kapacitor/ +alerts-batch.log +alerts-stream.log +kapacitor.log +README.md +README.md +``` +Check the contents of the `alerts-batch.log`. + +``` +$ sudo less -X var/log/kapacitor/alerts-batch.log +{"id":"cpu:nil","message":"cpu:nil is CRITICAL","details":"{...}\n","time":"2017-08-17T13:07:00.156730835Z","duration":0,"level":"CRITICAL","data":{... + +``` +#### Enable the batch alert + +Once it is clear that the new alert will not be generating spam, and that it will actually catch meaningful information, it can be enabled in Kapacitor. + +``` +$ docker exec tik_kapacitor_1 kapacitor enable cpu_alert_batch +``` +Verify that it has been enabled by once again showing the task. +``` +$ docker exec tik_kapacitor_1 kapacitor show cpu_alert_batch +ID: cpu_alert_batch +Error: +Template: +Type: batch +Status: enabled +Executing: true +Created: 17 Aug 17 12:41 UTC +... +``` +If the alert stream will no longer be needed it can likewise be disabled. +``` +$ docker exec tik_kapacitor_1 kapacitor disable cpu_alert_batch +``` + +### Summary + +This short tutorial has covered the most basic steps in starting up the TICK stack with Docker and checking the most elementary feature of Kapacitor: configuring and testing alerts triggered by changes in data written to InfluxDB. This installation can be used to further explore Kapacitor and its integration with InfluxDB and Telegraf. + +### Shutting down the stack + +There are two ways in which the stack can be taken down. + * Either, in the first console hit CTRL + C + * Or, in the second console run `$ docker-compose down --volumes` diff --git a/content/kapacitor/v1.5/introduction/installation.md b/content/kapacitor/v1.5/introduction/installation.md new file mode 100644 index 000000000..6bdd6c4c9 --- /dev/null +++ b/content/kapacitor/v1.5/introduction/installation.md @@ -0,0 +1,131 @@ +--- +title: Installing Kapacitor +weight: 10 +menu: + kapacitor_1_5: + parent: Introduction +--- + +This page provides directions for installing, starting, and configuring Kapacitor. + +## Requirements + +Installation of the InfluxDB package may require `root` or administrator privileges in order to complete successfully. + +### Networking + +Kapacitor listens on TCP port `9092` for all API and write +calls. + +Kapacitor may also bind to randomized UDP ports +for handling of InfluxDB data via subscriptions. + +## Installation + +Kapacitor has two binaries: + +* kapacitor: a CLI program for calling the Kapacitor API. +* kapacitord: the Kapacitor server daemon. + +You can download the binaries directly from the +[downloads](https://portal.influxdata.com/downloads) page. + +> **Note:** Windows support is experimental. + +### Starting the Kapacitor service + +For packaged installations, please see the respective sections below +for your operating system. For non-packaged installations (tarballs or +from source), you will need to start the Kapacitor application +manually by running: + +``` +./kapacitord -config +``` + +#### macOS (using Homebrew) + +To have `launchd` start Kapacitor at login: + +``` +ln -sfv /usr/local/opt/kapacitor/*.plist ~/Library/LaunchAgents +``` + +Then to load Kapacitor now: + +``` +launchctl load ~/Library/LaunchAgents/homebrew.mxcl.kapacitor.plist +``` + +Or, if you don't want/need `lanchctl`, you can just run: + +``` +kapacitord -config /usr/local/etc/kapacitor.conf +``` + +#### Linux - SysV or Upstart systems + +To start the Kapacitor service, run: + +``` +sudo service kapacitor start +``` + +#### Linux - systemd systems + +To start the Kapacitor service, run: + +``` +sudo systemctl start kapacitor +``` + +## Configuration + +An example configuration file can be found [here](https://github.com/influxdb/kapacitor/blob/master/etc/kapacitor/kapacitor.conf). + +Kapacitor can also provide an example configuration for you using this command: + +```bash +kapacitord config +``` + +To generate a new configuration file, run: +``` +kapacitord config > kapacitor.generated.conf +``` + +### Shared secret + +If using [Kapacitor v1.5.3](/kapacitor/v1.5/about_the_project/releasenotes-changelog/#v1-5-3-2019-06-18) +or newer and InfluxDB with [authentication enabled](/influxdb/v1.7/administration/authentication_and_authorization/), +set the `[http].shared-secret` option in your Kapacitor configuration file to the +shared secret of your InfluxDB instances. + +```toml +# ... +[http] + # ... + shared-secret = "youramazingsharedsecret" +``` + +If not set, set to an empty string, or does not match InfluxDB's shared-secret, +the integration with InfluxDB will fail and Kapacitor will not start. + +### Time zone + +To display alerts notifications using a preferred time zone, either change the time zone +of the host on which Kapacitor is running or set the Kapacitor process' `TZ` environment variable. + +#### systemd + +Add the environment variable using `systemctl edit kapacitor`: + +``` +[Service] +Environment="TZ=Asia/Shanghai" +``` + +#### docker + +Set the environment variable using the `-e` flag when starting the container (`-e TZ=Asia/Shanghai`) +or in your `docker-compose.yml`. diff --git a/content/kapacitor/v1.5/nodes/_index.md b/content/kapacitor/v1.5/nodes/_index.md new file mode 100644 index 000000000..0275b7e9a --- /dev/null +++ b/content/kapacitor/v1.5/nodes/_index.md @@ -0,0 +1,64 @@ +--- +title: TICKscript nodes overview + +aliases: + - kapacitor/v1.5/nodes/source_batch_node/ + - kapacitor/v1.5/nodes/source_stream_node/ + - kapacitor/v1.5/nodes/map_node/ + - kapacitor/v1.5/nodes/reduce_node/ +menu: + kapacitor_1_5_ref: + name: TICKscript nodes + identifier: nodes + weight: 40 +--- + +> ***Note:*** Before continuing, please make sure you have read the +> [TICKscript Language Specification](/kapacitor/v1.5/tick/). + +Nodes represent process invocation units that either take data as a batch or a point-by-point stream, and then alter the data, store the data, or trigger some other activity based on changes in the data (e.g., an alert). + +The property methods for these two nodes define the type of task that you are running, either +[stream](/kapacitor/v1.5/introduction/getting-started/#triggering-alerts-from-stream-data) +or +[batch](/kapacitor/v1.5/introduction/getting-started/#triggering-alerts-from-batch-data). + +Below is a complete list of the available nodes. For each node, the associated property methods are described. + +## Available nodes + +* [AlertNode](/kapacitor/v1.5/nodes/alert_node) +* [BarrierNode](/kapacitor/v1.5/nodes/barrier_node) +* [BatchNode](/kapacitor/v1.5/nodes/batch_node) +* [ChangeDetectNode](/kapacitor/v1.5/nodes/change_detect_node) +* [CombineNode](/kapacitor/v1.5/nodes/combine_node) +* [DefaultNode](/kapacitor/v1.5/nodes/default_node) +* [DeleteNode](/kapacitor/v1.5/nodes/delete_node) +* [DerivativeNode](/kapacitor/v1.5/nodes/derivative_node) +* [EC2AutoscaleNode](/kapacitor/v1.5/nodes/ec2_autoscale_node) +* [EvalNode](/kapacitor/v1.5/nodes/eval_node) +* [FlattenNode](/kapacitor/v1.5/nodes/flatten_node) +* [FromNode](/kapacitor/v1.5/nodes/from_node) +* [GroupByNode](/kapacitor/v1.5/nodes/group_by_node) +* [HTTPOutputNode](/kapacitor/v1.5/nodes/http_out_node) +* [HTTPPostNode](/kapacitor/v1.5/nodes/http_post_node) +* [InfluxDBOutputNode](/kapacitor/v1.5/nodes/influx_d_b_out_node) +* [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node) +* [JoinNode](/kapacitor/v1.5/nodes/join_node) +* [K8sAutoscaleNode](/kapacitor/v1.5/nodes/k8s_autoscale_node) +* [Kapacitor LoopbackNode](/kapacitor/v1.5/nodes/kapacitor_loopback_node) +* [LogNode](/kapacitor/v1.5/nodes/log_node) +* [NoOpNode](/kapacitor/v1.5/nodes/no_op_node) +* [QueryNode](/kapacitor/v1.5/nodes/query_node) +* [SampleNode](/kapacitor/v1.5/nodes/sample_node) +* [ShiftNode](/kapacitor/v1.5/nodes/shift_node) +* [SideloadNode](/kapacitor/v1.5/nodes/sideload_node) +* [StateCountNode](/kapacitor/v1.5/nodes/state_count_node) +* [StateDurationNode](/kapacitor/v1.5/nodes/state_duration_node) +* [StatsNode](/kapacitor/v1.5/nodes/stats_node) +* [StreamNode](/kapacitor/v1.5/nodes/stream_node) +* [SwarmAutoscaleNode](/kapacitor/v1.5/nodes/swarm_autoscale_node) +* [UDF (User Defined Function)Node](/kapacitor/v1.5/nodes/u_d_f_node) +* [UnionNode](/kapacitor/v1.5/nodes/union_node) +* [WhereNode](/kapacitor/v1.5/nodes/where_node) +* [WindowNode](/kapacitor/v1.5/nodes/window_node) diff --git a/content/kapacitor/v1.5/nodes/alert_node.md b/content/kapacitor/v1.5/nodes/alert_node.md new file mode 100644 index 000000000..e929d68c7 --- /dev/null +++ b/content/kapacitor/v1.5/nodes/alert_node.md @@ -0,0 +1,2005 @@ +--- +title: AlertNode +description: A Kapacitor AlertNode triggers an event of varying severity levels and passes the event to alert handlers. The criteria for triggering an alert is specified using a lambda expression. +note: Auto generated by tickdoc + +menu: + kapacitor_1_5_ref: + name: AlertNode + identifier: alert_node + weight: 10 + parent: nodes +--- + +The `alert` node triggers an event of varying severity levels and passes the +event to [event handlers](/kapacitor/v1.5/event_handlers/). The criteria for +triggering an alert is specified via a [lambda expression](/kapacitor/latest/tick/expr/). +See [AlertNode.Info](/kapacitor/v1.5/nodes/alert_node/#info), +[AlertNode.Warn](/kapacitor/v1.5/nodes/alert_node/#warn), +and [AlertNode.Crit](/kapacitor/v1.5/nodes/alert_node/#crit) below. + + +### Constructor + +| Chaining method | Description | +|:---------|:---------| +| **alert ( )** | Create an alert node, which can trigger alerts. | + +### Property methods + +| Setters | description | +|:---|:---| +| **[alerta](#alerta) \( \)** | Send the alert to Alerta. | +| **[all](#all) ( )** | Indicates an alert should trigger only if all points in a batch match the criteria. Does not apply to stream alerts. | +| **[crit](#crit) ( `value` `ast.LambdaNode`)** | Filter expression for the CRITICAL alert level. An empty value indicates the level is invalid and is skipped. | +| **[critReset](#critreset) ( `value` `ast.LambdaNode`)** | Filter expression for resetting the CRITICAL alert level to lower level. | +| **[details](#details) ( `value` `string`)** | Template for constructing a detailed HTML message for the alert. The same template data is available as the AlertNode.Message property, in addition to a Message field that contains the rendered Message value. | +| **[discord](#discord) ( )** | Send the alert to Discord. | +| **[durationField](#durationfield) ( `value` `string`)** | Optional field key to add the alert duration to the data. The duration is always in units of nanoseconds. | +| **[email](#email) ( `to` `...string`)** | Email the alert data. | +| **[exec](#exec) ( `executable` `string`, `args` `...string`)** | Execute a command whenever an alert is triggered and pass the alert data over STDIN in JSON format. | +| **[flapping](#flapping) ( `low` `float64`, `high` `float64`)** | Perform flap detection on the alerts. The method used is similar method to Nagios: https://assets.nagios.com/downloads/nagioscore/docs/nagioscore/3/en/flapping.html | +| **[hipChat](#hipchat) ( )** | Send the alert to HipChat. | +| **[history](#history) ( `value` `int64`)** | Number of previous states to remember when computing flapping levels and checking for state changes. Minimum value is 2 in order to keep track of current and previous states. | +| **[id](#id) ( `value` `string`)** | Template for constructing a unique ID for a given alert. | +| **[idField](#idfield) ( `value` `string`)** | Optional field key to add to the data, containing the alert ID as a string. | +| **[idTag](#idtag) ( `value` `string`)** | Optional tag key to use when tagging the data with the alert ID. | +| **[info](#info) ( `value` `ast.LambdaNode`)** | Filter expression for the INFO alert level. An empty value indicates the level is invalid and is skipped. | +| **[inhibit](#inhibit) ( `category` `string`, `equalTags` `...string`)** | Inhibit other alerts in a category. The equal tags provides a list of tags that must be equal in order for an alert event to be inhibited. | +| **[infoReset](#inforeset) ( `value` `ast.LambdaNode`)** | Filter expression for resetting the INFO alert level to lower level. | +| **[kafka](#kafka) ( )** | Send the alert to a Kafka cluster. | +| **[levelField](#levelfield) ( `value` `string`)** | Optional field key to add to the data, containing the alert level as a string. | +| **[levelTag](#leveltag) ( `value` `string`)** | Optional tag key to use when tagging the data with the alert level. | +| **[log](#log) ( `filepath` `string`)** | Log JSON alert data to file. One event per line. Must specify the absolute path to the log file. It will be created if it does not exist. Example: stream |alert() .log('/tmp/alert') | +| **[message](#message) ( `value` `string`)** | Template for constructing a meaningful message for the alert. | +| **[messageField](#messagefield) ( `value` `string`)** | Optional field key to add to the data, containing the alert message. | +| **[mqtt](#mqtt) ( `topic` `string`)** | Send alert to an MQTT broker | +| **[noRecoveries](#norecoveries) ( )** | Do not send recovery alerts. | +| **[opsGenie](#opsgenie-v1) ( )** | Send alert to OpsGenie using OpsGenie's v1 API. (Deprecated) | +| **[opsGenie2](#opsgenie-v2) ( )** | Send alert to OpsGenie using OpsGenie's v2 API. | +| **[pagerDuty](#pagerduty-v1) ( )** | Send the alert to PagerDuty using PagerDuty's v1 API. (Deprecated) | +| **[pagerDuty2](#pagerduty-v2) ( )** | Send the alert to PagerDuty using PagerDuty's v2 API. | +| **[post](#post) ( `urls` `...string`)** | HTTP POST JSON alert data to a specified URL. | +| **[pushover](#pushover) ( )** | Send the alert to Pushover. | +| **[quiet](#quiet) ( )** | Suppresses all error logging events from this node. | +| **[sensu](#sensu) ( )** | Send the alert to Sensu. | +| **[slack](#slack) ( )** | Send the alert to Slack. | +| **[snmpTrap](#snmptrap) ( `trapOid` `string`)** | Send the alert using SNMP traps. | +| **[stateChangesOnly](#statechangesonly) ( `maxInterval` `...time.Duration`)** | Only sends events where the state changed. Each different alert level OK, INFO, WARNING, and CRITICAL are considered different states. | +| **[talk](#talk) ( )** | Send the alert to Talk. | +| **[tcp](#tcp) ( `address` `string`)** | Send JSON alert data to a specified address over TCP. | +| **[telegram](#telegram) ( )** | Send the alert to Telegram. | +| **[topic](#topic) ( `value` `string`)** | Topic specifies the name of an alert topic to which, alerts will be published. Alert handlers can be configured per topic, see the API documentation. | +| **[victorOps](#victorops) ( )** | Send alert to VictorOps. | +| **[warn](#warn) ( `value` `ast.LambdaNode`)** | Filter expression for the WARNING alert level. An empty value indicates the level is invalid and is skipped. | +| **[warnReset](#warnreset) ( `value` `ast.LambdaNode`)** | Filter expression for resetting the WARNING alert level to lower level. | + + + +### Chaining methods +[Alert](#alert), +[Barrier](#barrier), +[Bottom](#bottom), +[ChangeDetect](#changedetect), +[Combine](#combine), +[Count](#count), +[CumulativeSum](#cumulativesum), +[Deadman](#deadman), +[Default](#default), +[Delete](#delete), +[Derivative](#derivative), +[Difference](#difference), +[Distinct](#distinct), +[Ec2Autoscale](#ec2autoscale), +[Elapsed](#elapsed), +[Eval](#eval), +[First](#first), +[Flatten](#flatten), +[GroupBy](#groupby), +[HoltWinters](#holtwinters), +[HoltWintersWithFit](#holtwinterswithfit), +[HttpOut](#httpout), +[HttpPost](#httppost), +[InfluxDBOut](#influxdbout), +[Join](#join), +[K8sAutoscale](#k8sautoscale), +[KapacitorLoopback](#kapacitorloopback), +[Last](#last), +[Max](#max), +[Mean](#mean), +[Median](#median), +[Min](#min), +[Mode](#mode), +[MovingAverage](#movingaverage), +[Percentile](#percentile), +[Sample](#sample), +[Shift](#shift), +[Sideload](#sideload), +[Spread](#spread), +[StateCount](#statecount), +[StateDuration](#stateduration), +[Stats](#stats), +[Stddev](#stddev), +[Sum](#sum), +[SwarmAutoscale](#swarmautoscale), +[Top](#top), +[Union](#union), +[Where](#where), +[Window](#window) + +--- + + +#### Available event handlers + +Different event handlers can be configured for each [AlertNode.](/kapacitor/v1.5/nodes/alert_node/) +Some handlers like Email, HipChat, Sensu, Slack, OpsGenie, VictorOps, PagerDuty, Telegram and Talk have a configuration +option, `global`, that indicates that all alerts implicitly use the handler. + +| Handler | Description | +| ------- | ----------- | +| [Alerta](#alerta) | Post alert message to Alerta. | +| [Discord](#discord) | Post alert message to Discord channel. | +| [email](#email) | Send and email with alert data. | +| [exec](#exec) | Execute a command passing alert data over STDIN. | +| [HipChat](#hipchat) | Post alert message to HipChat room. | +| [Kafka](#kafka) | Send alert to a Apache Kafka cluster. | +| [log](#log) | Log alert data to file. | +| [MQTT](#mqtt) | Post alert message to MQTT. | +| [OpsGenie v1](#opsgenie-v1) | Send alert to OpsGenie using their v1 API. (Deprecated) | +| [OpsGenie v2](#opsgenie-v2) | Send alert to OpsGenie using their v2 API. | +| [PagerDuty v1](#pagerduty-v1) | Send alert to PagerDuty using their v1 API. (Deprecated) | +| [PagerDuty v2](#pagerduty-v2) | Send alert to PagerDuty using their v2 API. | +| [post](#post) | HTTP POST data to a specified URL. | +| [Pushover](#pushover) | Send alert to Pushover. | +| [Sensu](#sensu) | Post alert message to Sensu client. | +| [Slack](#slack) | Post alert message to Slack channel. | +| [SNMPTrap](#snmptrap) | Trigger SNMP traps. | +| [Talk](#talk) | Post alert message to Talk client. | +| [tcp](#tcp) | Send data to a specified address via raw TCP. | +| [Telegram](#telegram) | Post alert message to Telegram client. | +| [VictorOps](#victorops) | Send alert to VictorOps. | + + +#### Alert event data + +Each event that gets sent to a handler contains the following alert data: + +| Name | Description | +| ---- | ----------- | +| **ID** | The ID of the alert, user defined. | +| **Message** | The alert message, user defined. | +| **Details** | The alert details, user defined HTML content. | +| **Time** | The time the alert occurred. | +| **Duration** | The duration of the alert in nanoseconds. | +| **Level** | One of OK, INFO, WARNING or CRITICAL. | +| **Data** | influxql.Result containing the data that triggered the alert. | +| **Recoverable** | Indicates whether the alert is auto-recoverable. Determined by the [`.noRecoveries()`](#norecoveries) property. | + +Events are sent to handlers if the alert is in a state other than 'OK' +or the alert just changed to the 'OK' state from a non 'OK' state (a.k.a. the alert recovered). +Using the [AlertNode.StateChangesOnly](/kapacitor/v1.5/nodes/alert_node/#statechangesonly) +property events will only be sent to handlers if the alert changed state. + +It is valid to configure multiple alert handlers, even with the same type. + +Example: + +```js +stream + .groupBy('service') + |alert() + .id('kapacitor/{{ index .Tags "service" }}') + .message('{{ .ID }} is {{ .Level }} value:{{ index .Fields "value" }}') + .info(lambda: "value" > 10) + .warn(lambda: "value" > 20) + .crit(lambda: "value" > 30) + .post("http://example.com/api/alert") + .post("http://another.example.com/api/alert") + .tcp("exampleendpoint.com:5678") + .email('oncall@example.com') +``` + + +Each expression maintains its own state. +The order of execution for the expressions is not considered to be deterministic. +For each point an expression may or may not be evaluated. +If no expression is true then the alert is considered to be in the OK state. + +Kapacitor supports alert reset expressions. +This way when an alert enters a state, it can only be lowered in severity if its +reset expression evaluates to true. + +**Example:** + +```js +stream + |from() + .measurement('cpu') + .where(lambda: "host" == 'serverA') + .groupBy('host') + |alert() + .info(lambda: "value" > 60) + .infoReset(lambda: "value" < 50) + .warn(lambda: "value" > 70) + .warnReset(lambda: "value" < 60) + .crit(lambda: "value" > 80) + .critReset(lambda: "value" < 70) +``` + +For example, given the following values, the corresponding alert states are: + +| Value | Alert State | +| ----- | ----------- | +| 61 | INFO | +| 73 | WARNING | +| 64 | WARNING | +| 85 | CRITICAL | +| 62 | INFO | +| 56 | INFO | +| 47 | OK | + + + +**Available Statistics:** + +* `alerts_triggered`: Total number of alerts triggered +* `oks_triggered`: Number of OK alerts triggered +* `infos_triggered`: Number of Info alerts triggered +* `warns_triggered`: Number of Warn alerts triggered +* `crits_triggered`: Number of Crit alerts triggered + + + + +## Properties + +Property methods modify state on the calling node. +They do not add another node to the pipeline, and always return a reference to the calling node. +Property methods are marked using the `.` operator. + + +### Alerta + +Send the alert to Alerta. +Detailed configuration options and setup instructions are provided in the +[Alerta Event Handler](/kapacitor/v1.5/event_handlers/alerta/) article. + +_**Example kapacitor.conf**_ +```toml +[alerta] + enabled = true + url = "https://alerta.yourdomain" + token = "9hiWoDOZ9IbmHsOTeST123ABciWTIqXQVFDo63h9" + environment = "Production" + origin = "Kapacitor" +``` + +_**Example TICKscript**_ +```js +stream + |alert() + .alerta() + .resource('Hostname or service') + .event('Something went wrong') +``` + + +### All + +Indicates an alert should trigger only if all points in a batch match the criteria. +Does not apply to stream alerts. + +```js +alert.all() +``` + + + + +### Category + +Category places this alert in a named category. +Categories are used to [inhibit](#inhibit) alerts. + + +```js +alert.category(value string) +``` + + + + +### Crit + +Filter expression for the CRITICAL alert level. +An empty value indicates the level is invalid and is skipped. + +```js +alert.crit(value ast.LambdaNode) +``` + + + + +### CritReset + +Filter expression for resetting the CRITICAL alert level to lower level. + +```js +alert.critReset(value ast.LambdaNode) +``` + + + + +### Details + +Template for constructing a detailed HTML message for the alert. +The same template data is available as the [AlertNode.Message](/kapacitor/v1.5/nodes/alert_node/#message) property, +in addition to a Message field that contains the rendered Message value. + +The intent is that the Message property be a single line summary while the +Details property is a more detailed message possibly spanning multiple lines, +and containing HTML formatting. + +This template is rendered using the html/template package in Go so safe and +valid HTML is generated. + +The `json` method is available within the template to convert any variable to a valid +JSON string. + +_**xExample:**_ + +```js +|alert() + .id('{{ .Name }}') + .details(''' +

{{ .ID }}

+{{ .Message }} +Value: {{ index .Fields "value" }} +''') + .email() +``` + +**Default:** `{{ json . }}` + +```js +alert.details(value string) +``` + + + +### Discord + +Send the alert to Discord. +Detailed configuration options and setup instructions are provided in the +[Discord Event Handler](/kapacitor/v1.5/event_handlers/discord/) article. + +_**Example kapacitor.conf**_ +```toml +[discord] + enabled = true + url = "https://discordapp.com/api/webhooks/xxxxxxxxxxxxxxxxxx/xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" +``` + +_**Example TICKscript**_ +```js +stream + |alert() + .discord() + .embedTitle('Alert!') +``` + + + + +### DurationField + +Optional field key to add the alert duration to the data. +The duration is always in units of nanoseconds. + +```js +alert.durationField(value string) +``` + + + + +### Email + +Email the alert data to specified "To" email addresses. +Detailed configuration options and setup instructions are provided in the +[Email Event Handler](/kapacitor/v1.5/event_handlers/email/) article. + +_**Example kapacitor.conf**_ +```toml +[smtp] + enabled = true + host = "localhost" + port = 25 + username = "xxxx" + password = "xxxx" + from = "kapacitor@example.com" + to = ["oncall@example.com"] + global = true + state-changes-only = true +``` + +_**Example TICKscript**_ +```js +|alert() + .id('{{ .Name }}') + // Email subject + .message('{{ .ID }}:{{ .Level }}') + // Email body as HTML + .details(''' +

{{ .ID }}

+{{ .Message }} +Value: {{ index .Fields "value" }} +''') + .email() +``` + + + + +### Exec + +Execute a command whenever an alert is triggered and pass the alert data over STDIN in JSON format. +Detailed usage instructions and examples are provided in the +[Exec Event Handler](/kapacitor/v1.5/event_handlers/exec/) article. + +```js +// Pattern +alert.exec(executable string, args ...string) + +// Example +alert.exec('/usr/bin/python', 'myscript.py') +``` + + + + +### Flapping + +Perform flap detection on the alerts. +The method used is similar method to +[Nagios flapping](https://assets.nagios.com/downloads/nagioscore/docs/nagioscore/3/en/flapping.html) + +Each different alerting level is considered a different state. +The low and high thresholds are inverted thresholds of a percentage of state changes. +Meaning that if the percentage of state changes goes above the `high` +threshold, the alert enters a flapping state. The alert remains in the flapping state +until the percentage of state changes goes below the `low` threshold. +Typical values are low: 0.25 and high: 0.5. The percentage values represent the number state changes +over the total possible number of state changes. A percentage change of 0.5 means that the alert changed +state in half of the recorded history, and remained the same in the other half of the history. + + +```js +// Pattern +alert.flapping(low float64, high float64) + +// Example +alert.flapping(0.25, 0.5) +``` + + + + +### Kafka + +Send the alert to an Apache Kafka cluster. +Detailed configuration options and setup instructions are provided in the +[Kafka Event Handler](/kapacitor/v1.5/event_handlers/kafka/) article. + +_**Example kapacitor.conf**_ + +```toml +[[kafka]] + enabled = true + id = "localhost" + timeout = 10s +``` + +_**Example TICKscript**_ +```js +stream + |alert() + .kafka() + .cluster('kafka-cluster') + .kafkaTopic('kafka-topic-name') +``` + + + + +### History + +Number of previous states to remember when computing flapping levels and +checking for state changes. +Minimum value is 2 in order to keep track of current and previous states. + +**Default:** 21 + +```js +// Pattern +alert.history(value int64) + +// Example +alert.history(21) +``` + + + + +### HipChat + +Send the alert to HipChat. +Detailed configuration options and setup instructions are provided in the +[HipChat Event Handler](/kapacitor/v1.5/event_handlers/hipchat/) article. + +_**Example kapacitor.conf**_ + +```toml +[hipchat] + enabled = true + url = "https://orgname.hipchat.com/v2/room" + room = "4189212" + token = "9hiWoDOZ9IbmHsOTeST123ABciWTIqXQVFDo63h9" +``` + +_**Example TICKscript**_ +```js +stream + |alert() + .hipChat() + .room('Kapacitor') +``` + + + + +### Id + +Template for constructing a unique ID for a given alert. + +_**Available template data**_ + +| Data Name | Description | +| --------- | ----------- | +| **Name** | Measurement name. | +| **TaskName** | The name of the task | +| **Group** | Concatenation of all group-by tags of the form `[key=value,]+`. If no groupBy is performed equal to literal 'nil'. | +| **Tags** | Map of tags. Use `{{ index .Tags "key" }}`` to get a specific tag value. | +| **ServerInfo** | Information about the running server. Available nested fields are `Hostname`, `ClusterID` and `ServerID`. | + + +**Default:** {{ .Name }}:{{ .Group }} + +```js +// Pattern +alert.id(value string) + +// Example +alert.id('{{ .Name }}:{{ .Group }}') +``` + +_**Example: ID template using name and group**_ +```js +stream + |from() + .measurement('cpu') + .groupBy('cpu') + |alert() + .id('kapacitor/{{ .Name }}/{{ .Group }}') +``` + +Resulting ID: `kapacitor/cpu/cpu=cpu0` + +_**Example: ID template using a tag**_ +```js +stream + |from() + .measurement('cpu') + .groupBy('service') + |alert() + .id('kapacitor/{{ index .Tags "service" }}') +``` + +Resulting ID: `kapacitor/authentication` + +_**Example: ID template using multiple tags**_ +```js +stream + |from() + .measurement('cpu') + .groupBy('service', 'host') + |alert() + .id('kapacitor/{{ index .Tags "service" }}/{{ index .Tags "host" }}') +``` + +Resulting ID: `kapacitor/authentication/auth001.example.com` + + + + +### IdField + +Optional field key to add to the data, containing the alert ID as a string. + +```js +// Pattern +alert.idField(value string) + +// Example +alert.idField('id') +``` + + + + +### IdTag + +Optional tag key to use when tagging the data with the alert ID. + +```js +// Pattern +alert.idTag(value string) + +// Example +alert.idTag('alertID') +``` + + + + +### Info + +Filter expression for the INFO alert level. +An empty value indicates the level is invalid and is skipped. + +```js +// Pattern +alert.info(value ast.LambdaNode) + +// Example +alert.info(lambda: 'usage_idle' < 60) +``` + + + + +### InfoReset + +Filter expression for resetting the INFO alert level to lower level. + +```js +// Pattern +alert.infoReset(value ast.LambdaNode) + +// Example +alert.infoReset(lamda: 'usage_idle' > 60) +``` + + + + +### Inhibit + +Inhibit other alerts in a category. +The equal tags provides a list of tags that must be equal in order for an alert event to be inhibited. + +The following two TICKscripts demonstrate how to use the inhibit feature: + +```js + //cpu_alert.tick +stream + |from() + .measurement('cpu') + .groupBy('host') + |alert() + .category('system_alerts') + .crit(lambda: "usage_idle" < 10.0) +``` + +```js +//host_alert.tick +stream + |from() + .measurement('uptime') + .groupBy('host') + |deadman(0.0, 1m) + .inhibit('system_alerts', 'host') +``` + +The deadman is a type of alert node and can be used to inhibit all alerts in the `system_alerts` category when triggered. +The `host` argument to the inhibit function says that the host tag must be equal between the CPU alert and the host alert in order for it to be inhibited. +This has the effect of the deadman alerts only inhibiting CPU alerts for hosts that are currently dead. + +```js +alert.inhibit(category string, equalTags ...string) +``` + + + + +### Kafka + +Send the alert to a Kafka topic. +Detailed setup and usage instructions are provided in the +[Kafka Event Handler](/kapacitor/v1.5/event_handlers/kafka/) article. + +_**Example: kapacitor.conf**_ +```toml +[[kafka]] + enabled = true + id = "default" + brokers = ["localhost:9092"] +``` + +_**Example: TICKscript**_ +```js +stream + |alert() + .kafka() + .cluster('default') + .kafkaTopic('alerts') +``` + + + + +### LevelField + +Optional field key to add to the data, containing the alert level as a string. + +```js +// Pattern +alert.levelField(value string) + +// Example +alert.levelField('INFO') +``` + + + + +### LevelTag + +Optional tag key to use when tagging the data with the alert level. + +```js +// Pattern +alert.levelTag(value string) + +// Example +alert.levelTag('level') +``` + + + + +### Log + +Log JSON alert data to file. +Detailed setup and usage instructions are provided in the +[Log Event Handler](/kapacitor/v1.5/event_handlers/log/) article. + +_**Example TICKscript**_ +```js +stream + |alert() + .log('/tmp/alert') + .mode(0644) +``` + + + + +### Message + +Template for constructing a meaningful message for the alert. + +_**Available template data**_ + +| Data Name | Description | +| --------- | ----------- | +| **ID** | The ID of the alert. | +| **Name** | Measurement name. | +| **TaskName** | The name of the task | +| **Group** | Concatenation of all group-by tags of the form 1. If no groupBy is performed equal to literal 'nil'. | +| **Tags** | Map of tags. Use `{{ index .Tags "key" }}` to get a specific tag value. | +| **Level** | Alert Level - `INFO`, `WARNING`, `CRITICAL`. | +| **Fields** | Map of fields. Use `{{ index .Fields "key" }}` to get a specific field value. | +| **Time** | The time of the point that triggered the event. Default format is `YYYY-MM-DD 00:00:00 +0000 UTC` | +| **Duration** | The duration of the alert. | + + +**Default:** `{{ .ID }} is {{ .Level }}` + +_**Example:**_ + +```js +stream + |from() + .measurement('cpu') + .groupBy('service', 'host') + |alert() + .id('{{ index .Tags "service" }}/{{ index .Tags "host" }}') + .message('{{ .ID }} is {{ .Level}} value: {{ index .Fields "value" }}') +``` + +Resulting Message: authentication/auth001.example.com is CRITICAL value:42 + + + + +### MessageField + +Optional field key to add to the data containing the alert message. + +```js +// Pattern +alert.messageField(value string) + +// Example +alert.messageField('message') +``` + + + + +### MQTT + +Send alert to an MQTT broker. +Detailed configuration options and usage instructions are provided in the +[MQTT Event Handler](/kapacitor/v1.5/event_handlers/mqtt/) article. + +_**Example kapacitor.conf**_ +```toml +[[mqtt]] + enabled = true + name = "localhost" + default = true + url = "tcp://localhost:1883" + client-id = "kapacitor" + username = "myusername" + password = "mysupersecretpassw0rd" +``` + +_**Example TICKscript**_ +```js +stream + |alert() + .mqtt('topic') + .brokerName('localhost') +``` + + + + +### NoRecoveries + +Do not send recovery alerts. Sets `recoverable` alert data field to `false`. + +```js +alert.noRecoveries() +``` + + + + +### OpsGenie v1 +Send alert to OpsGenie using OpsGenie's v1 API. +Detailed configuration options and setup instructions are provided in the +[OpsGenie v1 Event Handler](/kapacitor/v1.5/event_handlers/opsgenie/v1/) article. + +_**Example kapacitor.conf**_ +```toml +[opsgenie] + enabled = true + api-key = "xxxxx" + teams = ["everyone"] + recipients = ["jim", "bob"] +``` + +_**Example TICKscript**_ +```js +stream + |alert() + .opsGenie() +``` + + + + +### OpsGenie v2 +Send alert to OpsGenie using OpsGenie's v2 API. +Detailed configuration options and setup instructions are provided in the +[OpsGenie v2 Event Handler](/kapacitor/v1.5/event_handlers/opsgenie/v2/) article. + +_**Example kapacitor.conf**_ +```toml +[opsgenie2] + enabled = true + api-key = "xxxxx" + teams = ["everyone"] + recipients = ["jim", "bob"] +``` + +_**Example TICKscript**_ +```js +stream + |alert() + .opsGenie2() +``` + + + + +### PagerDuty v1 +Send the alert to PagerDuty using PagerDuty's v1 API. +Detailed configuration options and setup instructions are provided in the +[PagerDuty v1 Event Handler](/kapacitor/v1.5/event_handlers/pagerduty/v1/) article. + +_**Example kapacitor.conf**_ +```toml +[pagerduty] + enabled = true + service-key = "xxxx" + url = "https://events.pagerduty.com/generic/2010-04-15/create_event.json" +``` + +_**Example TICKscript**_ +```js +stream + |alert() + .pagerDuty() +``` + + + + +### PagerDuty v2 +Send the alert to PagerDuty using PagerDuty's v2 API. +Detailed configuration options and setup instructions are provided in the +[PagerDuty v2 Event Handler](/kapacitor/v1.5/event_handlers/pagerduty/v2/) article. + +_**Example kapacitor.conf**_ +```toml +[pagerduty2] + enabled = true + routing-key = "xxxx" + url = "https://events.pagerduty.com/v2/enqueue" +``` + +_**Example TICKscript**_ +```js +stream + |alert() + .pagerDuty2() +``` + + + + +### Post + +HTTP POST JSON alert data to a specified URL. +Detailed configuration options and setup instructions are provided in the +[Post Event Handler](/kapacitor/v1.5/event_handlers/post/) article. + +_**Example TICKscript**_ + +```js +stream + |alert() + .post('http://example.com') + .captureResponse() +``` + + + + +### Pushover + +Send the alert to Pushover. +Detailed configuration options and setup instructions are provided in the +[Pushover Event Handler](/kapacitor/v1.5/event_handlers/pushover/) article. + +_**Example kapacitor.conf**_ +```toml +[pushover] + enabled = true + token = "9hiWoDOZ9IbmHsOTeST123ABciWTIqXQVFDo63h9" + user_key = "Pushover" +``` + +_**Example TICKscript**_ +```js +stream + |alert() + .pushover() + .sound('siren') + .user_key('other user') + .device('mydev') + .title('mytitle') + .uRL('myurl') + .URLTitle('mytitle') +``` + + + + +### Quiet + +Suppress all error logging events from this node. + +```js +alert.quiet() +``` + + + + +### Sensu + +Send the alert to Sensu. +Detailed configuration options and setup instructions are provided in the +[Sensu Event Handler](/kapacitor/v1.5/event_handlers/sensu/) article. + +_**Example kapacitor.conf*_ +```toml +[sensu] + enabled = true + url = "http://sensu:3030" + source = "Kapacitor" + handlers = ["sns","slack"] +``` + +_**Example TICKscript**_ +```js +stream + |alert() + .sensu() + .handlers('sns','slack') +``` + + + + +### Slack + +Send the alert to Slack. +Detailed configuration options and setup instructions are provided in the +[Slack Event Handler](/kapacitor/v1.5/event_handlers/slack/) article. + +_**Example kapacitor.conf**_ +```toml +[slack] + enabled = true + url = "https://hooks.slack.com/services/xxxxxxxxx/xxxxxxxxx/xxxxxxxxxxxxxxxxxxxxxxxx" + channel = "#general" +``` + +_**Example TICKscript**_ +```js +stream + |alert() + .slack() + .channel('#alerts') + .channel('@jsmith') +``` + + + + + +### SnmpTrap + +Send the alert using SNMP traps. +Detailed configuration options and setup instructions are provided in the +[SNMP Trap Event Handler](/kapacitor/v1.5/event_handlers/snmptrap/) article. + +_**Example kapacitor.conf**_ +```toml +[snmptrap] + enabled = true + addr = "127.0.0.1:9162" + community = "public" +``` + +_**Example TICKscript**_ +```js +stream + |alert() + .snmpTrap('1.3.6.1.2.1.1') + .data('1.3.6.1.2.1.1.6', 'i', '{{ index .Field "value" }}') + .data('1.3.6.1.2.1.1.7', 's', '{{ .Message }}') +``` + + + + +### StateChangesOnly + +Only sends events where the state changed. +Each different alert level OK, INFO, WARNING, and CRITICAL +are considered different states. + +_**Example**_ +```js +stream + |from() + .measurement('cpu') + |window() + .period(10s) + .every(10s) + |alert() + .crit(lambda: "value" > 10) + .stateChangesOnly() + .slack() +``` + +If the "value" is greater than 10 for a total of 60s, then +only two events will be sent. First, when the value crosses +the threshold, and second, when it falls back into an OK state. +Without stateChangesOnly, the alert would have triggered 7 times: +6 times for each 10s period where the condition was met and once more +for the recovery. + +An optional maximum interval duration can be provided. +An event will not be ignored (aka trigger an alert) if more than the maximum interval has elapsed +since the last alert. + +_**Example**_ +```js +stream + // ... + |alert() + .crit(lambda: "value" > 10) + .stateChangesOnly(10m) + .slack() +``` + + + + +### Talk + +Send the alert to Talk. +Detailed configuration options and setup instructions are provided in the +[Talk Event Handler](/kapacitor/v1.5/event_handlers/talk/) article. + +_**Example kapacitor.conf**_ +```toml +[talk] + enabled = true + url = "https://jianliao.com/v2/services/webhook/uuid" + author_name = "Kapacitor" +``` + +_**Example TICKscript**_ +```js +stream + alert() + .talk() +``` + + + + +### TCP + +Send JSON alert data to a specified address over TCP. +Detailed usage instructions are provided in the +[TCPEvent Handler](/kapacitor/v1.5/event_handlers/tcp/) article. + +```js +// Pattern +alert.tcp(address string) + +// Example +alert.tcp('127.0.0.1:7777') +``` + + + + +### Telegram + +Send the alert to Telegram. +Detailed configuration options and setup instructions are provided in the +[Telegram Event Handler](/kapacitor/v1.5/event_handlers/telegram/) article. + +_**Example kapacitor.conf**_ +```toml +[telegram] + enabled = true + token = "123456789:xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + chat-id = "xxxxxxxxx" + parse-mode = "Markdown" + disable-web-page-preview = true + disable-notification = false +``` + +_**Example TICKscript**_ +```js +stream + |alert() + .telegram() + .chatId('xxxxxxx') +``` + + + + + +### Topic + +Topic specifies the name of an alert topic to which alerts will be published. +Alert handlers can be configured per topic. See the [API documentation](/kapacitor/v1.5/working/api#topics). + +```js +// Pattern +alert.topic(value string) + +// Example +alert.topic('cpu') +``` + + + + +### VictorOps + +Send alert to VictorOps. +Detailed configuration options and setup instructions are provided in the +[VictorOps Event Handler](/kapacitor/v1.5/event_handlers/victorops/) article. + +_**Example kapacitor.conf**_ +```toml +[victorops] + enabled = true + api-key = "xxxxx" + routing-key = "everyone" +``` + +_**Example TICKscript**_ +```js +stream + |alert() + .victorOps() + .routingKey('team_rocket') +``` + + + + + +### Warn + +Filter expression for the WARNING alert level. +An empty value indicates the level is invalid and is skipped. + +```js +// Pattern +alert.warn(value ast.LambdaNode) + +// Example +alert.warn(lambda: 'usage_idle' < 20) +``` + + + + +### WarnReset + +Filter expression for resetting the WARNING alert level to lower level. + +```js +// Pattern +alert.warnReset(value ast.LambdaNode) + +// Example +alert.warnReset(lambda: 'usage_idle' > 20) +``` + + + + +## Chaining Methods + +Chaining methods create a new node in the pipeline as a child of the calling node. +They do not modify the calling node. +Chaining methods are marked using the `|` operator. + + +### Alert + +Create an alert node, which can trigger alerts. + +```js +alert|alert() +``` + +Returns: [AlertNode](/kapacitor/v1.5/nodes/alert_node/) + + + +### Barrier + +Create a new Barrier node that emits a BarrierMessage periodically. + +One BarrierMessage will be emitted every period duration. + +```js +alert|barrier() +``` + +Returns: [BarrierNode](/kapacitor/v1.5/nodes/barrier_node/) + + + +### Bottom + +Select the bottom `num` points for `field` and sort by any extra tags or fields. + +```js +alert|bottom(num int64, field string, fieldsAndTags ...string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### ChangeDetect + +Create a new node that only emits new points if different from the previous point. + + +```js +alert|changeDetect(field string) +``` + +Returns: [ChangeDetectNode](/kapacitor/v1.5/nodes/change_detect_node/) + + + +### Combine + +Combine this node with itself. The data is combined on timestamp. + +```js +alert|combine(expressions ...ast.LambdaNode) +``` + +Returns: [CombineNode](/kapacitor/v1.5/nodes/combine_node/) + + + +### Count + +Count the number of points. + +```js +alert|count(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### CumulativeSum + +Compute a cumulative sum of each point that is received. +A point is emitted for every point collected. + +```js +alert|cumulativeSum(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Deadman + +Helper function for creating an alert on low throughput, a.k.a. deadman's switch. + +- Threshold: trigger alert if throughput drops below threshold in points/interval. +- Interval: how often to check the throughput. +- Expressions: optional list of expressions to also evaluate. Useful for time of day alerting. + +Example: + +```js + var data = stream + |from()... + // Trigger critical alert if the throughput drops below 100 points per 10s and checked every 10s. + data + |deadman(100.0, 10s) + //Do normal processing of data + data... +``` + +The above is equivalent to this example: + +```js + var data = stream + |from()... + // Trigger critical alert if the throughput drops below 100 points per 10s and checked every 10s. + data + |stats(10s) + .align() + |derivative('emitted') + .unit(10s) + .nonNegative() + |alert() + .id('node \'stream0\' in task \'{{ .TaskName }}\'') + .message('{{ .ID }} is {{ if eq .Level "OK" }}alive{{ else }}dead{{ end }}: {{ index .Fields "emitted" | printf "%0.3f" }} points/10s.') + .crit(lambda: "emitted" <= 100.0) + //Do normal processing of data + data... +``` + +The `id` and `message` alert properties can be configured globally via the 'deadman' configuration section. + +Since the [AlertNode](/kapacitor/v1.5/nodes/alert_node/) is the last piece it can be further modified as usual. +Example: + +```js + var data = stream + |from()... + // Trigger critical alert if the throughput drops below 100 points per 10s and checked every 10s. + data + |deadman(100.0, 10s) + .slack() + .channel('#dead_tasks') + //Do normal processing of data + data... +``` + +You can specify additional lambda expressions to further constrain when the deadman's switch is triggered. +Example: + +```js + var data = stream + |from()... + // Trigger critical alert if the throughput drops below 100 points per 10s and checked every 10s. + // Only trigger the alert if the time of day is between 8am-5pm. + data + |deadman(100.0, 10s, lambda: hour("time") >= 8 AND hour("time") <= 17) + //Do normal processing of data + data... +``` + + +```js +alert|deadman(threshold float64, interval time.Duration, expr ...ast.LambdaNode) +``` + +Returns: [AlertNode](/kapacitor/v1.5/nodes/alert_node/) + + + +### Default + +Create a node that can set defaults for missing tags or fields. + +```js +alert|default() +``` + +Returns: [DefaultNode](/kapacitor/v1.5/nodes/default_node/) + + + +### Delete + +Create a node that can delete tags or fields. + +```js +alert|delete() +``` + +Returns: [DeleteNode](/kapacitor/v1.5/nodes/delete_node/) + + + +### Derivative + +Create a new node that computes the derivative of adjacent points. + +```js +alert|derivative(field string) +``` + +Returns: [DerivativeNode](/kapacitor/v1.5/nodes/derivative_node/) + + + +### Difference + +Compute the difference between points independent of elapsed time. + +```js +alert|difference(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Distinct + +Produce batch of only the distinct points. + +```js +alert|distinct(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Ec2Autoscale + +Create a node that can trigger autoscale events for a ec2 autoscalegroup. + +```js +alert|ec2Autoscale() +``` + +Returns: [Ec2AutoscaleNode](/kapacitor/v1.5/nodes/ec2_autoscale_node/) + + + +### Elapsed + +Compute the elapsed time between points. + +```js +alert|elapsed(field string, unit time.Duration) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Eval + +Create an eval node that will evaluate the given transformation function to each data point. +A list of expressions may be provided and will be evaluated in the order they are given. +The results are available to later expressions. + +```js +alert|eval(expressions ...ast.LambdaNode) +``` + +Returns: [EvalNode](/kapacitor/v1.5/nodes/eval_node/) + + + +### First + +Select the first point. + +```js +alert|first(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Flatten + +Flatten points with similar times into a single point. + +```js +alert|flatten() +``` + +Returns: [FlattenNode](/kapacitor/v1.5/nodes/flatten_node/) + + + +### GroupBy + +Group the data by a set of tags. + +Can pass literal * to group by all dimensions. +Example: + +```js + |groupBy(*) +``` + + +```js +alert|groupBy(tag ...interface{}) +``` + +Returns: [GroupByNode](/kapacitor/v1.5/nodes/group_by_node/) + + + +### HoltWinters + +Compute the Holt-Winters (https://docs.influxdata.com/influxdb/latest/query_language/functions/#holt-winters) forecast of a data set. + +```js +alert|holtWinters(field string, h int64, m int64, interval time.Duration) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### HoltWintersWithFit + +Compute the Holt-Winters (https://docs.influxdata.com/influxdb/latest/query_language/functions/#holt-winters) forecast of a data set. +This method also outputs all the points used to fit the data in addition to the forecasted data. + +```js +alert|holtWintersWithFit(field string, h int64, m int64, interval time.Duration) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### HttpOut + +Create an HTTP output node that caches the most recent data it has received. +The cached data is available at the given endpoint. +The endpoint is the relative path from the API endpoint of the running task. +For example, if the task endpoint is at `/kapacitor/v1/tasks/` and endpoint is +`top10`, then the data can be requested from `/kapacitor/v1/tasks//top10`. + +```js +alert|httpOut(endpoint string) +``` + +Returns: [HTTPOutNode](/kapacitor/v1.5/nodes/http_out_node/) + + + +### HttpPost + +Creates an HTTP Post node that POSTS received data to the provided HTTP endpoint. +HttpPost expects 0 or 1 arguments. If 0 arguments are provided, you must specify an +endpoint property method. + +```js +alert|httpPost(url ...string) +``` + +Returns: [HTTPPostNode](/kapacitor/v1.5/nodes/http_post_node/) + + + +### InfluxDBOut + +Create an influxdb output node that will store the incoming data into InfluxDB. + +```js +alert|influxDBOut() +``` + +Returns: [InfluxDBOutNode](/kapacitor/v1.5/nodes/influx_d_b_out_node/) + + + +### Join + +Join this node with other nodes. The data is joined on timestamp. + +```js +alert|join(others ...Node) +``` + +Returns: [JoinNode](/kapacitor/v1.5/nodes/join_node/) + + + +### K8sAutoscale + +Create a node that can trigger autoscale events for a kubernetes cluster. + +```js +alert|k8sAutoscale() +``` + +Returns: [K8sAutoscaleNode](/kapacitor/v1.5/nodes/k8s_autoscale_node/) + + + +### KapacitorLoopback + +Create an kapacitor loopback node that will send data back into Kapacitor as a stream. + +```js +alert|kapacitorLoopback() +``` + +Returns: [KapacitorLoopbackNode](/kapacitor/v1.5/nodes/kapacitor_loopback_node/) + + + +### Last + +Select the last point. + +```js +alert|last(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Max + +Select the maximum point. + +```js +alert|max(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Mean + +Compute the mean of the data. + +```js +alert|mean(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Median + +Compute the median of the data. + +> **Note:** This method is not a selector. +If you want the median point, use `.percentile(field, 50.0)`. + +```js +alert|median(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Min + +Select the minimum point. + +```js +alert|min(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Mode + +Compute the mode of the data. + +```js +alert|mode(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### MovingAverage + +Compute a moving average of the last window points. +No points are emitted until the window is full. + +```js +alert|movingAverage(field string, window int64) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Percentile + +Select a point at the given percentile. This is a selector function, no interpolation between points is performed. + +```js +alert|percentile(field string, percentile float64) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Sample + +Create a new node that samples the incoming points or batches. + +One point will be emitted every count or duration specified. + +```js +alert|sample(rate interface{}) +``` + +Returns: [SampleNode](/kapacitor/v1.5/nodes/sample_node/) + + + +### Shift + +Create a new node that shifts the incoming points or batches in time. + +```js +alert|shift(shift time.Duration) +``` + +Returns: [ShiftNode](/kapacitor/v1.5/nodes/shift_node/) + + + +### Sideload + +Create a node that can load data from external sources. + +```js +alert|sideload() +``` + +Returns: [SideloadNode](/kapacitor/v1.5/nodes/sideload_node/) + + + +### Spread + +Compute the difference between `min` and `max` points. + +```js +alert|spread(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### StateCount + +Create a node that tracks number of consecutive points in a given state. + +```js +alert|stateCount(expression ast.LambdaNode) +``` + +Returns: [StateCountNode](/kapacitor/v1.5/nodes/state_count_node/) + + + +### StateDuration + +Create a node that tracks duration in a given state. + +```js +alert|stateDuration(expression ast.LambdaNode) +``` + +Returns: [StateDurationNode](/kapacitor/v1.5/nodes/state_duration_node/) + + + +### Stats + +Create a new stream of data that contains the internal statistics of the node. +The interval represents how often to emit the statistics based on real time. +This means the interval time is independent of the times of the data points the source node is receiving. + +```js +alert|stats(interval time.Duration) +``` + +Returns: [StatsNode](/kapacitor/v1.5/nodes/stats_node/) + + + +### Stddev + +Compute the standard deviation. + +```js +alert|stddev(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Sum + +Compute the sum of all values. + +```js +alert|sum(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### SwarmAutoscale + +Create a node that can trigger autoscale events for a Docker swarm cluster. + +```js +alert|swarmAutoscale() +``` + +Returns: [SwarmAutoscaleNode](/kapacitor/v1.5/nodes/swarm_autoscale_node/) + + + +### Top + +Select the top `num` points for `field` and sort by any extra tags or fields. + +```js +alert|top(num int64, field string, fieldsAndTags ...string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Union + +Perform the union of this node and all other given nodes. + +```js +alert|union(node ...Node) +``` + +Returns: [UnionNode](/kapacitor/v1.5/nodes/union_node/) + + + +### Where + +Create a new node that filters the data stream by a given expression. + +```js +alert|where(expression ast.LambdaNode) +``` + +Returns: [WhereNode](/kapacitor/v1.5/nodes/where_node/) + + + +### Window + +Create a new node that windows the stream by time. + +NOTE: Window can only be applied to stream edges. + +```js +alert|window() +``` + +Returns: [WindowNode](/kapacitor/v1.5/nodes/window_node/) + + diff --git a/content/kapacitor/v1.5/nodes/barrier_node.md b/content/kapacitor/v1.5/nodes/barrier_node.md new file mode 100644 index 000000000..e5a60a4cd --- /dev/null +++ b/content/kapacitor/v1.5/nodes/barrier_node.md @@ -0,0 +1,934 @@ +--- +title: BarrierNode +description: BarrierNode emits a barrier with the current time, according to the system clock, and allows pipelines to be forced in the absence of data traffic. The barrier emitted will be based on either idle time since the last received message or on a periodic timer based on the system clock. +note: Auto generated by tickdoc + +menu: + kapacitor_1_5_ref: + name: BarrierNode + identifier: barrier_node + weight: 20 + parent: nodes +--- + +The `barrier` node emits a barrier based on one of the following: + +- Idle time since the last data point was received +- Periodic timer based on the system time + +Barriers let you execute pipelines without data traffic. Data points received after a specified barrier are dropped. + +##### Example barrier based on idle time + +```js +stream + |from() + .measurement('cpu') + |barrier() + .idle(5s) + .delete(TRUE) + |window() + .period(10s) + .every(5s) + |top(10, 'value') + //Post the top 10 results over the last 10s updated every 5s. + |httpPost('http://example.com/api/top10') +``` +> **Note:** In .delete(TRUE), TRUE must be uppercase. + +### Constructor + +| Chaining Method | Description | +|:---------|:---------| +| **barrier ( )** | Create a new Barrier node that emits a BarrierMessage periodically | + +### Property Methods + +| Setters | Description | +|:---|:---| +| **[idle](#idle) ( `value` `time.Duration`)** | Emit barrier based on idle time since the last received message. Must be greater than zero. | +| **[period](#period) ( `value` `time.Duration`)** | Emit barrier based on periodic timer. The timer is based on system clock rather than message time. Must be greater than zero. | +| **[delete](#delete) ( `value` `Boolean`)** | Delete the group after processing each barrier. | +| **[quiet](#quiet) ( )** | Suppress all error logging events from this node. | + + + +### Chaining Methods +[Alert](#alert), +[Barrier](#barrier), +[Bottom](#bottom), +[ChangeDetect](#changedetect), +[Combine](#combine), +[Count](#count), +[CumulativeSum](#cumulativesum), +[Deadman](#deadman), +[Default](#default), +[Delete](#delete), +[Derivative](#derivative), +[Difference](#difference), +[Distinct](#distinct), +[Ec2Autoscale](#ec2autoscale), +[Elapsed](#elapsed), +[Eval](#eval), +[First](#first), +[Flatten](#flatten), +[GroupBy](#groupby), +[HoltWinters](#holtwinters), +[HoltWintersWithFit](#holtwinterswithfit), +[HttpOut](#httpout), +[HttpPost](#httppost), +[InfluxDBOut](#influxdbout), +[Join](#join), +[K8sAutoscale](#k8sautoscale), +[KapacitorLoopback](#kapacitorloopback), +[Last](#last), +[Log](#log), +[Max](#max), +[Mean](#mean), +[Median](#median), +[Min](#min), +[Mode](#mode), +[MovingAverage](#movingaverage), +[Percentile](#percentile), +[Sample](#sample), +[Shift](#shift), +[Sideload](#sideload), +[Spread](#spread), +[StateCount](#statecount), +[StateDuration](#stateduration), +[Stats](#stats), +[Stddev](#stddev), +[Sum](#sum), +[SwarmAutoscale](#swarmautoscale), +[Top](#top), +[Union](#union), +[Where](#where), +[Window](#window) + +--- + +## Properties + +Property methods modify state on the calling node. +They do not add another node to the pipeline, and always return a reference to the calling node. +Property methods are marked using the `.` operator. + + +### Idle + +Emit barrier based on idle time since the last received message. +Must be greater than zero. + + +```js +barrier.idle(value time.Duration) +``` + + + + +### Period + +Emit barrier based on periodic timer. The timer is based on system +clock rather than message time. +Must be greater than zero. + + +```js +barrier.period(value time.Duration) +``` + + + +### Delete + +Delete indicates that the group should be deleted after processing each barrier. +This includes the barrier node itself, meaning that if `delete` is `true`, the barrier +is triggered only once for each group and the barrier node forgets about the group. +The group will be created again if a new point is received for the group. + +This is useful if you have increasing cardinality over time as once a barrier is +triggered for a group it is then deleted, freeing any resources managing the group. + +```js +barrier.delete() +``` + +{{% note %}} +`delete` will free system resources used for managing groups and can help to maintain +the overall performance of Kapacitor, but these gains are minimal. +For information about optimizing tasks, see [How can I optimize Kapacitor tasks?](/kapacitor/v1.5/troubleshooting/frequently-asked-questions/#how-can-i-optimize-kapacitor-tasks) +{{% /note %}} + + + + +### Quiet + +Suppress all error logging events from this node. + +```js +barrier.quiet() +``` + + + + +## Chaining Methods + +Chaining methods create a new node in the pipeline as a child of the calling node. +They do not modify the calling node. +Chaining methods are marked using the `|` operator. + + +### Alert + +Create an alert node, which can trigger alerts. + + +```js +barrier|alert() +``` + +Returns: [AlertNode](/kapacitor/v1.5/nodes/alert_node/) + + + +### Barrier + +Create a new Barrier node that emits a BarrierMessage periodically. + +One BarrierMessage will be emitted every period duration. + + +```js +barrier|barrier() +``` + +Returns: [BarrierNode](/kapacitor/v1.5/nodes/barrier_node/) + + + +### Bottom + +Select the bottom `num` points for `field` and sort by any extra tags or fields. + + +```js +barrier|bottom(num int64, field string, fieldsAndTags ...string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### ChangeDetect + +Create a new node that only emits new points if different from the previous point. + +```js +barrier|changeDetect(field string) +``` + +Returns: [ChangeDetectNode](/kapacitor/v1.5/nodes/change_detect_node/) + + + +### Combine + +Combine this node with itself. The data is combined on timestamp. + + +```js +barrier|combine(expressions ...ast.LambdaNode) +``` + +Returns: [CombineNode](/kapacitor/v1.5/nodes/combine_node/) + + + +### Count + +Count the number of points. + + +```js +barrier|count(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### CumulativeSum + +Compute a cumulative sum of each point that is received. +A point is emitted for every point collected. + + +```js +barrier|cumulativeSum(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Deadman + +Helper function for creating an alert on low throughput, a.k.a. deadman's switch. + +- Threshold: trigger alert if throughput drops below threshold in points/interval. +- Interval: how often to check the throughput. +- Expressions: optional list of expressions to also evaluate. Useful for time of day alerting. + +Example: + + +```js + var data = stream + |from()... + // Trigger critical alert if the throughput drops below 100 points per 10s and checked every 10s. + data + |deadman(100.0, 10s) + //Do normal processing of data + data... +``` + +The above is equivalent to this example: + + +```js + var data = stream + |from()... + // Trigger critical alert if the throughput drops below 100 points per 10s and checked every 10s. + data + |stats(10s) + .align() + |derivative('emitted') + .unit(10s) + .nonNegative() + |alert() + .id('node \'stream0\' in task \'{{ .TaskName }}\'') + .message('{{ .ID }} is {{ if eq .Level "OK" }}alive{{ else }}dead{{ end }}: {{ index .Fields "emitted" | printf "%0.3f" }} points/10s.') + .crit(lambda: "emitted" <= 100.0) + //Do normal processing of data + data... +``` + +The `id` and `message` alert properties can be configured globally via the 'deadman' configuration section. + +Since the [AlertNode](/kapacitor/v1.5/nodes/alert_node/) is the last piece it can be further modified as usual. +Example: + + +```js + var data = stream + |from()... + // Trigger critical alert if the throughput drops below 100 points per 10s and checked every 10s. + data + |deadman(100.0, 10s) + .slack() + .channel('#dead_tasks') + //Do normal processing of data + data... +``` + +You can specify additional lambda expressions to further constrain when the deadman's switch is triggered. +Example: + + +```js + var data = stream + |from()... + // Trigger critical alert if the throughput drops below 100 points per 10s and checked every 10s. + // Only trigger the alert if the time of day is between 8am-5pm. + data + |deadman(100.0, 10s, lambda: hour("time") >= 8 AND hour("time") <= 17) + //Do normal processing of data + data... +``` + + + +```js +barrier|deadman(threshold float64, interval time.Duration, expr ...ast.LambdaNode) +``` + +Returns: [AlertNode](/kapacitor/v1.5/nodes/alert_node/) + + + +### Default + +Create a node that can set defaults for missing tags or fields. + + +```js +barrier|default() +``` + +Returns: [DefaultNode](/kapacitor/v1.5/nodes/default_node/) + + + +### Delete + +Create a node that can delete tags or fields. + + +```js +barrier|delete() +``` + +Returns: [DeleteNode](/kapacitor/v1.5/nodes/delete_node/) + + + +### Derivative + +Create a new node that computes the derivative of adjacent points. + + +```js +barrier|derivative(field string) +``` + +Returns: [DerivativeNode](/kapacitor/v1.5/nodes/derivative_node/) + + + +### Difference + +Compute the difference between points independent of elapsed time. + + +```js +barrier|difference(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Distinct + +Produce batch of only the distinct points. + + +```js +barrier|distinct(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Ec2Autoscale + +Create a node that can trigger autoscale events for a ec2 autoscalegroup. + + +```js +barrier|ec2Autoscale() +``` + +Returns: [Ec2AutoscaleNode](/kapacitor/v1.5/nodes/ec2_autoscale_node/) + + + +### Elapsed + +Compute the elapsed time between points. + + +```js +barrier|elapsed(field string, unit time.Duration) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Eval + +Create an eval node that will evaluate the given transformation function to each data point. +A list of expressions may be provided and will be evaluated in the order they are given. +The results are available to later expressions. + + +```js +barrier|eval(expressions ...ast.LambdaNode) +``` + +Returns: [EvalNode](/kapacitor/v1.5/nodes/eval_node/) + + + +### First + +Select the first point. + + +```js +barrier|first(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Flatten + +Flatten points with similar times into a single point. + + +```js +barrier|flatten() +``` + +Returns: [FlattenNode](/kapacitor/v1.5/nodes/flatten_node/) + + + +### GroupBy + +Group the data by a set of tags. + +Can pass literal * to group by all dimensions. +Example: + + +```js + |groupBy(*) +``` + + + +```js +barrier|groupBy(tag ...interface{}) +``` + +Returns: [GroupByNode](/kapacitor/v1.5/nodes/group_by_node/) + + + +### HoltWinters + +Compute the Holt-Winters (https://docs.influxdata.com/influxdb/latest/query_language/functions/#holt-winters) forecast of a data set. + + +```js +barrier|holtWinters(field string, h int64, m int64, interval time.Duration) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### HoltWintersWithFit + +Compute the Holt-Winters (https://docs.influxdata.com/influxdb/latest/query_language/functions/#holt-winters) forecast of a data set. +This method also outputs all the points used to fit the data in addition to the forecasted data. + + +```js +barrier|holtWintersWithFit(field string, h int64, m int64, interval time.Duration) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### HttpOut + +Create an HTTP output node that caches the most recent data it has received. +The cached data is available at the given endpoint. +The endpoint is the relative path from the API endpoint of the running task. +For example, if the task endpoint is at `/kapacitor/v1/tasks/` and endpoint is +`top10`, then the data can be requested from `/kapacitor/v1/tasks//top10`. + + +```js +barrier|httpOut(endpoint string) +``` + +Returns: [HTTPOutNode](/kapacitor/v1.5/nodes/http_out_node/) + + + +### HttpPost + +Creates an HTTP Post node that POSTS received data to the provided HTTP endpoint. +HttpPost expects 0 or 1 arguments. If 0 arguments are provided, you must specify an +endpoint property method. + + +```js +barrier|httpPost(url ...string) +``` + +Returns: [HTTPPostNode](/kapacitor/v1.5/nodes/http_post_node/) + + + +### InfluxDBOut + +Create an influxdb output node that will store the incoming data into InfluxDB. + + +```js +barrier|influxDBOut() +``` + +Returns: [InfluxDBOutNode](/kapacitor/v1.5/nodes/influx_d_b_out_node/) + + + +### Join + +Join this node with other nodes. The data is joined on timestamp. + + +```js +barrier|join(others ...Node) +``` + +Returns: [JoinNode](/kapacitor/v1.5/nodes/join_node/) + + + +### K8sAutoscale + +Create a node that can trigger autoscale events for a kubernetes cluster. + + +```js +barrier|k8sAutoscale() +``` + +Returns: [K8sAutoscaleNode](/kapacitor/v1.5/nodes/k8s_autoscale_node/) + + + +### KapacitorLoopback + +Create an kapacitor loopback node that will send data back into Kapacitor as a stream. + + +```js +barrier|kapacitorLoopback() +``` + +Returns: [KapacitorLoopbackNode](/kapacitor/v1.5/nodes/kapacitor_loopback_node/) + + + +### Last + +Select the last point. + + +```js +barrier|last(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Log + +Create a node that logs all data it receives. + + +```js +barrier|log() +``` + +Returns: [LogNode](/kapacitor/v1.5/nodes/log_node/) + + + +### Max + +Select the maximum point. + + +```js +barrier|max(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Mean + +Compute the mean of the data. + + +```js +barrier|mean(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Median + +Compute the median of the data. + +> **Note:** This method is not a selector. +If you want the median point, use `.percentile(field, 50.0)`. + + +```js +barrier|median(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Min + +Select the minimum point. + + +```js +barrier|min(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Mode + +Compute the mode of the data. + + +```js +barrier|mode(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### MovingAverage + +Compute a moving average of the last window points. +No points are emitted until the window is full. + + +```js +barrier|movingAverage(field string, window int64) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Percentile + +Select a point at the given percentile. This is a selector function, no interpolation between points is performed. + + +```js +barrier|percentile(field string, percentile float64) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Sample + +Create a new node that samples the incoming points or batches. + +One point will be emitted every count or duration specified. + + +```js +barrier|sample(rate interface{}) +``` + +Returns: [SampleNode](/kapacitor/v1.5/nodes/sample_node/) + + + +### Shift + +Create a new node that shifts the incoming points or batches in time. + + +```js +barrier|shift(shift time.Duration) +``` + +Returns: [ShiftNode](/kapacitor/v1.5/nodes/shift_node/) + + + +### Sideload + +Create a node that can load data from external sources. + + +```js +barrier|sideload() +``` + +Returns: [SideloadNode](/kapacitor/v1.5/nodes/sideload_node/) + + + +### Spread + +Compute the difference between `min` and `max` points. + + +```js +barrier|spread(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### StateCount + +Create a node that tracks number of consecutive points in a given state. + + +```js +barrier|stateCount(expression ast.LambdaNode) +``` + +Returns: [StateCountNode](/kapacitor/v1.5/nodes/state_count_node/) + + + +### StateDuration + +Create a node that tracks duration in a given state. + + +```js +barrier|stateDuration(expression ast.LambdaNode) +``` + +Returns: [StateDurationNode](/kapacitor/v1.5/nodes/state_duration_node/) + + + +### Stats + +Create a new stream of data that contains the internal statistics of the node. +The interval represents how often to emit the statistics based on real time. +This means the interval time is independent of the times of the data points the source node is receiving. + + +```js +barrier|stats(interval time.Duration) +``` + +Returns: [StatsNode](/kapacitor/v1.5/nodes/stats_node/) + + + +### Stddev + +Compute the standard deviation. + + +```js +barrier|stddev(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Sum + +Compute the sum of all values. + + +```js +barrier|sum(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### SwarmAutoscale + +Create a node that can trigger autoscale events for a Docker swarm cluster. + + +```js +barrier|swarmAutoscale() +``` + +Returns: [SwarmAutoscaleNode](/kapacitor/v1.5/nodes/swarm_autoscale_node/) + + + +### Top + +Select the top `num` points for `field` and sort by any extra tags or fields. + + +```js +barrier|top(num int64, field string, fieldsAndTags ...string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Union + +Perform the union of this node and all other given nodes. + + +```js +barrier|union(node ...Node) +``` + +Returns: [UnionNode](/kapacitor/v1.5/nodes/union_node/) + + + +### Where + +Create a new node that filters the data stream by a given expression. + + +```js +barrier|where(expression ast.LambdaNode) +``` + +Returns: [WhereNode](/kapacitor/v1.5/nodes/where_node/) + + + +### Window + +Create a new node that windows the stream by time. + +NOTE: Window can only be applied to stream edges. + + +```js +barrier|window() +``` + +Returns: [WindowNode](/kapacitor/v1.5/nodes/window_node/) + + diff --git a/content/kapacitor/v1.5/nodes/batch_node.md b/content/kapacitor/v1.5/nodes/batch_node.md new file mode 100644 index 000000000..7ff83ef5b --- /dev/null +++ b/content/kapacitor/v1.5/nodes/batch_node.md @@ -0,0 +1,202 @@ +--- +title: BatchNode +description: BatchNode handles creating several child QueryNodes. Each call to `query` creates a child batch node that can further be configured. The `batch` variable in batch tasks is an instance of BatchNode. +note: Auto generated by tickdoc + +menu: + kapacitor_1_5_ref: + name: BatchNode + identifier: batch_node + weight: 4 + parent: nodes +--- + +The `batch` node handles the creation of several child QueryNodes. +Each call to [`query`](/kapacitor/v1.5/nodes/query_node) creates a child batch node that +can further be configured. *See [QueryNode](/kapacitor/v1.5/nodes/query_node/)*. +The `batch` variable in batch tasks is an instance of +a [BatchNode.](/kapacitor/v1.5/nodes/batch_node/) + +> A **QueryNode** is required when using **BatchNode**. +> It defines the source and schedule for batch data and should be used before +> any other [chaining methods](#chaining-methods-1). + +Example: + + +```js + var errors = batch + |query('SELECT value from errors') + ... + var views = batch + |query('SELECT value from views') + ... +``` + +Available Statistics: + +* query_errors: number of errors when querying +* batches_queried: number of batches returned from queries +* points_queried: total number of points in batches + + +### Constructor + +| Chaining Method | Description | +|:---------|:---------| +| **batch** | Has no constructor signature. | + +### Property Methods + +| Setters | Description | +|:--------|:------------| +| **[quiet](#quiet) ( )** | Suppress all error logging events from this node. | + + +### Chaining Methods +[Deadman](#deadman), +[Query](#query), +[Stats](#stats) + +--- + +## Properties + +Property methods modify state on the calling node. +They do not add another node to the pipeline, and always return a reference to the calling node. +Property methods are marked using the `.` operator. + + +### Quiet + +Suppress all error logging events from this node. + +```js +batch.quiet() +``` + + + +## Chaining Methods + +Chaining methods create a new node in the pipeline as a child of the calling node. +They do not modify the calling node. +Chaining methods are marked using the `|` operator. + + +### Deadman + +Helper function for creating an alert on low throughput, a.k.a. deadman's switch. + +- Threshold: trigger alert if throughput drops below threshold in points/interval. +- Interval: how often to check the throughput. +- Expressions: optional list of expressions to also evaluate. Useful for time of day alerting. + +Example: + + +```js + var data = batch + |query()... + // Trigger critical alert if the throughput drops below 100 points per 10s and checked every 10s. + data + |deadman(100.0, 10s) + //Do normal processing of data + data... +``` + +The above is equivalent to this example: + + +```js + var data = batch + |query()... + // Trigger critical alert if the throughput drops below 100 points per 10s and checked every 10s. + data + |stats(10s) + .align() + |derivative('emitted') + .unit(10s) + .nonNegative() + |alert() + .id('node \'stream0\' in task \'{{ .TaskName }}\'') + .message('{{ .ID }} is {{ if eq .Level "OK" }}alive{{ else }}dead{{ end }}: {{ index .Fields "emitted" | printf "%0.3f" }} points/10s.') + .crit(lambda: "emitted" <= 100.0) + //Do normal processing of data + data... +``` + +The `id` and `message` alert properties can be configured globally via the 'deadman' configuration section. + +Since the [AlertNode](/kapacitor/v1.5/nodes/alert_node/) is the last piece it can be further modified as usual. +Example: + + +```js + var data = batch + |query()... + // Trigger critical alert if the throughput drops below 100 points per 10s and checked every 10s. + data + |deadman(100.0, 10s) + .slack() + .channel('#dead_tasks') + //Do normal processing of data + data... +``` + +You can specify additional lambda expressions to further constrain when the deadman's switch is triggered. +Example: + + +```js + var data = batch + |query()... + // Trigger critical alert if the throughput drops below 100 points per 10s and checked every 10s. + // Only trigger the alert if the time of day is between 8am-5pm. + data + |deadman(100.0, 10s, lambda: hour("time") >= 8 AND hour("time") <= 17) + //Do normal processing of data + data... +``` + + + +```js +batch|deadman(threshold float64, interval time.Duration, expr ...ast.LambdaNode) +``` + +Returns: [AlertNode](/kapacitor/v1.5/nodes/alert_node/) + + + +### Query + +The query to execute. Must not contain a time condition +in the `WHERE` clause or contain a `GROUP BY` clause. +The time conditions are added dynamically according to the period, offset and schedule. +The `GROUP BY` clause is added dynamically according to the dimensions +passed to the `groupBy` method. + + +```js +batch|query(q string) +``` + +Returns: [QueryNode](/kapacitor/v1.5/nodes/query_node/) + + + +### Stats + +Create a new stream of data that contains the internal statistics of the node. +The interval represents how often to emit the statistics based on real time. +This means the interval time is independent of the times of the data points the source node is receiving. + + +```js +batch|stats(interval time.Duration) +``` + +Returns: [StatsNode](/kapacitor/v1.5/nodes/stats_node/) + + diff --git a/content/kapacitor/v1.5/nodes/change_detect_node.md b/content/kapacitor/v1.5/nodes/change_detect_node.md new file mode 100644 index 000000000..248acd85f --- /dev/null +++ b/content/kapacitor/v1.5/nodes/change_detect_node.md @@ -0,0 +1,868 @@ +--- +title: ChangeDetectNode (Kapacitor TICKscript node) +description: ChangeDetectNode creates a new node that only emits new points if different from the previous point. + +menu: + kapacitor_1_5_ref: + name: ChangeDetectNode + identifier: change_detect_node + weight: 40 + parent: nodes +--- + +The `changeDetect` node creates a new node that emits new points only if different from the previous point. +The `changeDetect` node can monitor multiple fields. + +##### Example changeDetect node +```js +stream + |from().measurement('packets') + |changeDetect('field_a','field_b') +``` + +### Constructor + +| Chaining Method | Description | +|:--------------- |:----------- | +| **changeDetect ( `fields` `...string`)** | Create a new node that emits new points only if different from the previous point | + +### Property methods + +| Setters | Description | +|:------- |:----------- | +| **[quiet](#quiet) ( )** | Suppresses all error logging events from this node. | + + + +### Chaining methods +[Alert](#alert), +[Barrier](#barrier), +[Bottom](#bottom), +[ChangeDetect](#changedetect), +[Combine](#combine), +[Count](#count), +[CumulativeSum](#cumulativesum), +[Deadman](#deadman), +[Default](#default), +[Delete](#delete), +[Derivative](#derivative), +[Difference](#difference), +[Distinct](#distinct), +[Ec2Autoscale](#ec2autoscale), +[Elapsed](#elapsed), +[Eval](#eval), +[First](#first), +[Flatten](#flatten), +[GroupBy](#groupby), +[HoltWinters](#holtwinters), +[HoltWintersWithFit](#holtwinterswithfit), +[HttpOut](#httpout), +[HttpPost](#httppost), +[InfluxDBOut](#influxdbout), +[Join](#join), +[K8sAutoscale](#k8sautoscale), +[KapacitorLoopback](#kapacitorloopback), +[Last](#last), +[Log](#log), +[Max](#max), +[Mean](#mean), +[Median](#median), +[Min](#min), +[Mode](#mode), +[MovingAverage](#movingaverage), +[Percentile](#percentile), +[Sample](#sample), +[Shift](#shift), +[Sideload](#sideload), +[Spread](#spread), +[StateCount](#statecount), +[StateDuration](#stateduration), +[Stats](#stats), +[Stddev](#stddev), +[Sum](#sum), +[SwarmAutoscale](#swarmautoscale), +[Top](#top), +[Union](#union), +[Where](#where), +[Window](#window) + +--- + +## Properties + +Property methods modify state on the calling node. +They do not add another node to the pipeline, and always return a reference to the calling node. +Property methods are marked using the `.` operator. + + +### Quiet + +Suppress all error logging events from this node. + + +```js +changeDetect.quiet() +``` + + + + +## Chaining Methods + +Chaining methods create a new node in the pipeline as a child of the calling node. +They do not modify the calling node. +Chaining methods are marked using the `|` operator. + + +### Alert + +Create an alert node, which can trigger alerts. + + +```js +changeDetect|alert() +``` + +Returns: [AlertNode](/kapacitor/v1.5/nodes/alert_node/) + + + +### Barrier + +Create a new Barrier node that emits a BarrierMessage periodically. + +One BarrierMessage will be emitted every period duration. + + +```js +changeDetect|barrier() +``` + +Returns: [BarrierNode](/kapacitor/v1.5/nodes/barrier_node/) + + + +### Bottom + +Select the bottom `num` points for `field` and sort by any extra tags or fields. + + +```js +changeDetect|bottom(num int64, field string, fieldsAndTags ...string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### ChangeDetect + +Create a new node that only emits new points if different from the previous point.. + + +```js +changeDetect|changeDetect(field string) +``` + +Returns: [ChangeDetectNode](/kapacitor/v1.5/nodes/change_detect_node/) + + + +### Combine + +Combine this node with itself. The data is combined on timestamp. + + +```js +changeDetect|combine(expressions ...ast.LambdaNode) +``` + +Returns: [CombineNode](/kapacitor/v1.5/nodes/combine_node/) + + + +### Count + +Count the number of points. + + +```js +changeDetect|count(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### CumulativeSum + +Compute a cumulative sum of each point that is received. +A point is emitted for every point collected. + + +```js +changeDetect|cumulativeSum(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Deadman + +Helper function for creating an alert on low throughput, a.k.a. deadman's switch. + +- Threshold: trigger alert if throughput drops below threshold in points/interval. +- Interval: how often to check the throughput. +- Expressions: optional list of expressions to also evaluate. Useful for time of day alerting. + +Example: + + +```js + var data = stream + |from()... + // Trigger critical alert if the throughput drops below 100 points per 10s and checked every 10s. + data + |deadman(100.0, 10s) + //Do normal processing of data + data... +``` + +The above is equivalent to this example: + + +```js + var data = stream + |from()... + // Trigger critical alert if the throughput drops below 100 points per 10s and checked every 10s. + data + |stats(10s) + .align() + |derivative('emitted') + .unit(10s) + .nonNegative() + |alert() + .id('node \'stream0\' in task \'{{ .TaskName }}\'') + .message('{{ .ID }} is {{ if eq .Level "OK" }}alive{{ else }}dead{{ end }}: {{ index .Fields "emitted" | printf "%0.3f" }} points/10s.') + .crit(lambda: "emitted" <= 100.0) + //Do normal processing of data + data... +``` + +The `id` and `message` alert properties can be configured globally via the 'deadman' configuration section. + +Since the [AlertNode](/kapacitor/v1.5/nodes/alert_node/) is the last piece it can be further modified as usual. +Example: + + +```js + var data = stream + |from()... + // Trigger critical alert if the throughput drops below 100 points per 10s and checked every 10s. + data + |deadman(100.0, 10s) + .slack() + .channel('#dead_tasks') + //Do normal processing of data + data... +``` + +You can specify additional lambda expressions to further constrain when the deadman's switch is triggered. +Example: + + +```js + var data = stream + |from()... + // Trigger critical alert if the throughput drops below 100 points per 10s and checked every 10s. + // Only trigger the alert if the time of day is between 8am-5pm. + data + |deadman(100.0, 10s, lambda: hour("time") >= 8 AND hour("time") <= 17) + //Do normal processing of data + data... +``` + + + +```js +changeDetect|deadman(threshold float64, interval time.Duration, expr ...ast.LambdaNode) +``` + +Returns: [AlertNode](/kapacitor/v1.5/nodes/alert_node/) + + + +### Default + +Create a node that can set defaults for missing tags or fields. + + +```js +changeDetect|default() +``` + +Returns: [DefaultNode](/kapacitor/v1.5/nodes/default_node/) + + + +### Delete + +Create a node that can delete tags or fields. + + +```js +changeDetect|delete() +``` + +Returns: [DeleteNode](/kapacitor/v1.5/nodes/delete_node/) + + + +### Derivative + +Create a new node that computes the derivative of adjacent points. + + +```js +changeDetect|derivative(field string) +``` + +Returns: [DerivativeNode](/kapacitor/v1.5/nodes/derivative_node/) + + + +### Difference + +Compute the difference between points independent of elapsed time. + + +```js +changeDetect|difference(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Distinct + +Produce batch of only the distinct points. + + +```js +changeDetect|distinct(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Ec2Autoscale + +Create a node that can trigger autoscale events for a ec2 autoscalegroup. + + +```js +changeDetect|ec2Autoscale() +``` + +Returns: [Ec2AutoscaleNode](/kapacitor/v1.5/nodes/ec2_autoscale_node/) + + + +### Elapsed + +Compute the elapsed time between points. + + +```js +changeDetect|elapsed(field string, unit time.Duration) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Eval + +Create an eval node that will evaluate the given transformation function to each data point. +A list of expressions may be provided and will be evaluated in the order they are given. +The results are available to later expressions. + + +```js +changeDetect|eval(expressions ...ast.LambdaNode) +``` + +Returns: [EvalNode](/kapacitor/v1.5/nodes/eval_node/) + + + +### First + +Select the first point. + + +```js +changeDetect|first(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Flatten + +Flatten points with similar times into a single point. + + +```js +changeDetect|flatten() +``` + +Returns: [FlattenNode](/kapacitor/v1.5/nodes/flatten_node/) + + + +### GroupBy + +Group the data by a set of tags. + +Can pass literal * to group by all dimensions. +Example: + + +```js + |groupBy(*) +``` + + + +```js +changeDetect|groupBy(tag ...interface{}) +``` + +Returns: [GroupByNode](/kapacitor/v1.5/nodes/group_by_node/) + + + +### HoltWinters + +Compute the Holt-Winters (https://docs.influxdata.com/influxdb/latest/query_language/functions/#holt-winters) forecast of a data set. + + +```js +changeDetect|holtWinters(field string, h int64, m int64, interval time.Duration) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### HoltWintersWithFit + +Compute the Holt-Winters (https://docs.influxdata.com/influxdb/latest/query_language/functions/#holt-winters) forecast of a data set. +This method also outputs all the points used to fit the data in addition to the forecasted data. + + +```js +changeDetect|holtWintersWithFit(field string, h int64, m int64, interval time.Duration) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### HttpOut + +Create an HTTP output node that caches the most recent data it has received. +The cached data is available at the given endpoint. +The endpoint is the relative path from the API endpoint of the running task. +For example, if the task endpoint is at `/kapacitor/v1/tasks/` and endpoint is +`top10`, then the data can be requested from `/kapacitor/v1/tasks//top10`. + + +```js +changeDetect|httpOut(endpoint string) +``` + +Returns: [HTTPOutNode](/kapacitor/v1.5/nodes/http_out_node/) + + + +### HttpPost + +Creates an HTTP Post node that POSTS received data to the provided HTTP endpoint. +HttpPost expects 0 or 1 arguments. If 0 arguments are provided, you must specify an +endpoint property method. + + +```js +changeDetect|httpPost(url ...string) +``` + +Returns: [HTTPPostNode](/kapacitor/v1.5/nodes/http_post_node/) + + + +### InfluxDBOut + +Create an InfluxDB output node that will store the incoming data into InfluxDB. + + +```js +changeDetect|influxDBOut() +``` + +Returns: [InfluxDBOutNode](/kapacitor/v1.5/nodes/influx_d_b_out_node/) + + + +### Join + +Join this node with other nodes. The data is joined on timestamp. + + +```js +changeDetect|join(others ...Node) +``` + +Returns: [JoinNode](/kapacitor/v1.5/nodes/join_node/) + + + +### K8sAutoscale + +Create a node that can trigger autoscale events for a Kubernetes cluster. + + +```js +changeDetect|k8sAutoscale() +``` + +Returns: [K8sAutoscaleNode](/kapacitor/v1.5/nodes/k8s_autoscale_node/) + + + +### KapacitorLoopback + +Create an Kapacitor loopback node that will send data back into Kapacitor as a stream. + + +```js +changeDetect|kapacitorLoopback() +``` + +Returns: [KapacitorLoopbackNode](/kapacitor/v1.5/nodes/kapacitor_loopback_node/) + + + +### Last + +Select the last point. + + +```js +changeDetect|last(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Log + +Create a node that logs all data it receives. + + +```js +changeDetect|log() +``` + +Returns: [LogNode](/kapacitor/v1.5/nodes/log_node/) + + + +### Max + +Select the maximum point. + + +```js +changeDetect|max(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Mean + +Compute the mean of the data. + + +```js +changeDetect|mean(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Median + +Compute the median of the data. + +> **Note:** This method is not a selector.. +If you want the median point, use `.percentile(field, 50.0)`. + + +```js +changeDetect|median(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Min + +Select the minimum point. + + +```js +changeDetect|min(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Mode + +Compute the mode of the data. + + +```js +changeDetect|mode(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### MovingAverage + +Compute a moving average of the last window points. +No points are emitted until the window is full. + + +```js +changeDetect|movingAverage(field string, window int64) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Percentile + +Select a point at the given percentile. This is a selector function, no interpolation between points is performed. + + +```js +changeDetect|percentile(field string, percentile float64) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Sample + +Create a new node that samples the incoming points or batches. + +One point will be emitted every count or duration specified. + + +```js +changeDetect|sample(rate interface{}) +``` + +Returns: [SampleNode](/kapacitor/v1.5/nodes/sample_node/) + + + +### Shift + +Create a new node that shifts the incoming points or batches in time. + + +```js +changeDetect|shift(shift time.Duration) +``` + +Returns: [ShiftNode](/kapacitor/v1.5/nodes/shift_node/) + + + +### Sideload + +Create a node that can load data from external sources. + + +```js +changeDetect|sideload() +``` + +Returns: [SideloadNode](/kapacitor/v1.5/nodes/sideload_node/) + + + +### Spread + +Compute the difference between `min` and `max` points. + + +```js +changeDetect|spread(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### StateCount + +Create a node that tracks number of consecutive points in a given state. + + +```js +changeDetect|stateCount(expression ast.LambdaNode) +``` + +Returns: [StateCountNode](/kapacitor/v1.5/nodes/state_count_node/) + + + +### StateDuration + +Create a node that tracks duration in a given state. + + +```js +changeDetect|stateDuration(expression ast.LambdaNode) +``` + +Returns: [StateDurationNode](/kapacitor/v1.5/nodes/state_duration_node/) + + + +### Stats + +Create a new stream of data that contains the internal statistics of the node. +The interval represents how often to emit the statistics based on real time. +This means the interval time is independent of the times of the data points the source node is receiving. + + +```js +changeDetect|stats(interval time.Duration) +``` + +Returns: [StatsNode](/kapacitor/v1.5/nodes/stats_node/) + + + +### Stddev + +Compute the standard deviation. + + +```js +changeDetect|stddev(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Sum + +Compute the sum of all values. + + +```js +changeDetect|sum(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### SwarmAutoscale + +Create a node that can trigger autoscale events for a Docker swarm cluster. + + +```js +changeDetect|swarmAutoscale() +``` + +Returns: [SwarmAutoscaleNode](/kapacitor/v1.5/nodes/swarm_autoscale_node/) + + + +### Top + +Select the top `num` points for `field` and sort by any extra tags or fields. + + +```js +changeDetect|top(num int64, field string, fieldsAndTags ...string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Union + +Perform the union of this node and all other given nodes. + + +```js +changeDetect|union(node ...Node) +``` + +Returns: [UnionNode](/kapacitor/v1.5/nodes/union_node/) + + + +### Where + +Create a new node that filters the data stream by a given expression. + + +```js +changeDetect|where(expression ast.LambdaNode) +``` + +Returns: [WhereNode](/kapacitor/v1.5/nodes/where_node/) + + + +### Window + +Create a new node that windows the stream by time. + +NOTE: Window can only be applied to stream edges. + + +```js +changeDetect|window() +``` + +Returns: [WindowNode](/kapacitor/v1.5/nodes/window_node/) + + diff --git a/content/kapacitor/v1.5/nodes/combine_node.md b/content/kapacitor/v1.5/nodes/combine_node.md new file mode 100644 index 000000000..37deaf0d4 --- /dev/null +++ b/content/kapacitor/v1.5/nodes/combine_node.md @@ -0,0 +1,951 @@ +--- +title: CombineNode +description: CombineNode combines data from a single node with itself. Points with the same time are grouped and then combinations are created. The size of the combinations is defined by how many expressions are given. +note: Auto generated by tickdoc + +menu: + kapacitor_1_5_ref: + name: CombineNode + identifier: combine_node + weight: 40 + parent: nodes +--- + +The `combine` node combines data from a single node with itself. +Points with the same time are grouped and then combinations are created. +The size of the combinations is defined by how many expressions are given. +Combinations are order-independent and will never include the same point multiple times. + +In the following example, data points for the `login` service are combined with +the data points from all other services: + +```js +stream + |from() + .measurement('request_latency') + |combine(lambda: "service" == 'login', lambda: TRUE) + .as('login', 'other') + // points that are within 1 second are considered the same time. + .tolerance(1s) + // delimiter for new field and tag names + .delimiter('.') + // Change group by to be new other.service tag + |groupBy('other.service') + // Both the "value" fields from each data point have been prefixed + // with the respective names 'login' and 'other'. + |eval(lambda: "login.value" / "other.value") + .as('ratio') + ... +``` + +In the following example, all combination pairs are created: + +```js +|combine(lambda: TRUE, lambda: TRUE) + .as('login', 'other') +``` + +In the following example, all combinations triples are created: + +```js +|combine(lambda: TRUE, lambda: TRUE, lambda: TRUE) + .as('login', 'other', 'another') +``` + + +### Constructor + +| Chaining Method | Description | +|:---------|:---------| +| **combine ( `expressions` `...ast.LambdaNode`)** | Combine this node with itself. The data is combined on timestamp. | + +### Property Methods + +| Setters | Description | +|:---|:---| +| **[as](#as) ( `names` `...string`)** | Prefix names for all fields from the respective nodes. Each field from the parent nodes will be prefixed with the provided name and a '.'. See the example above. | +| **[delimiter](#delimiter) ( `value` `string`)** | The delimiter between the As names and existing field an tag keys. Can be the empty string, but you are responsible for ensuring conflicts are not possible if you use the empty string. | +| **[max](#max) ( `value` `int64`)** | Maximum number of possible combinations. Since the number of possible combinations can grow very rapidly you can set a maximum number of combinations allowed. If the max is crossed, an error is logged and the combinations are not calculated. Default: 10,000 | +| **[quiet](#quiet) ( )** | Suppress all error logging events from this node. | +| **[tolerance](#tolerance) ( `value` `time.Duration`)** | The maximum duration of time that two incoming points can be apart and still be considered to be equal in time. The joined data point's time will be rounded to the nearest multiple of the tolerance duration. | + + + +### Chaining Methods +[Alert](#alert), +[Barrier](#barrier), +[Bottom](#bottom), +[ChangeDetect](#changedetect), +[Combine](#combine), +[Count](#count), +[CumulativeSum](#cumulativesum), +[Deadman](#deadman), +[Default](#default), +[Delete](#delete), +[Derivative](#derivative), +[Difference](#difference), +[Distinct](#distinct), +[Ec2Autoscale](#ec2autoscale), +[Elapsed](#elapsed), +[Eval](#eval), +[First](#first), +[Flatten](#flatten), +[GroupBy](#groupby), +[HoltWinters](#holtwinters), +[HoltWintersWithFit](#holtwinterswithfit), +[HttpOut](#httpout), +[HttpPost](#httppost), +[InfluxDBOut](#influxdbout), +[Join](#join), +[K8sAutoscale](#k8sautoscale), +[KapacitorLoopback](#kapacitorloopback), +[Last](#last), +[Log](#log), +[Mean](#mean), +[Median](#median), +[Min](#min), +[Mode](#mode), +[MovingAverage](#movingaverage), +[Percentile](#percentile), +[Sample](#sample), +[Shift](#shift), +[Sideload](#sideload), +[Spread](#spread), +[StateCount](#statecount), +[StateDuration](#stateduration), +[Stats](#stats), +[Stddev](#stddev), +[Sum](#sum), +[SwarmAutoscale](#swarmautoscale), +[Top](#top), +[Union](#union), +[Where](#where), +[Window](#window) + +--- + +## Properties + +Property methods modify state on the calling node. +They do not add another node to the pipeline, and always return a reference to the calling node. +Property methods are marked using the `.` operator. + + +### As + +Prefix names for all fields from the respective nodes. +Each field from the parent nodes will be prefixed with the provided name and a `.`. +See the example above. + +The names cannot have a dot `.` character. + + + +```js +combine.as(names ...string) +``` + + + + +### Delimiter + +The delimiter between the As names and existing field an tag keys. +Can be the empty string, but you are responsible for ensuring conflicts are not possible if you use the empty string. + + +```js +combine.delimiter(value string) +``` + + + + +### Max + +Maximum number of possible combinations. +Since the number of possible combinations can grow very rapidly, +you can set a maximum number of combinations allowed. +If the max is exceeded, an error is logged and the combinations are not calculated. + +**Default:** 10,000 + + +```js +combine.max(value int64) +``` + + + + +### Quiet + +Suppress all error logging events from this node. + +```js +combine.quiet() +``` + + + + +### Tolerance + +The maximum duration of time that two incoming points +can be apart and still be considered to be equal in time. +The joined data point's time will be rounded to the nearest +multiple of the tolerance duration. + + +```js +combine.tolerance(value time.Duration) +``` + + + + +## Chaining Methods + +Chaining methods create a new node in the pipeline as a child of the calling node. +They do not modify the calling node. +Chaining methods are marked using the `|` operator. + + +### Alert + +Create an alert node, which can trigger alerts. + + +```js +combine|alert() +``` + +Returns: [AlertNode](/kapacitor/v1.5/nodes/alert_node/) + + + +### Barrier + +Create a new Barrier node that emits a BarrierMessage periodically. + +One BarrierMessage will be emitted every period duration. + + +```js +combine|barrier() +``` + +Returns: [BarrierNode](/kapacitor/v1.5/nodes/barrier_node/) + + + +### Bottom + +Select the bottom `num` points for `field` and sort by any extra tags or fields. + + +```js +combine|bottom(num int64, field string, fieldsAndTags ...string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### ChangeDetect + +Create a new node that only emits new points if different from the previous point. + +```js +combine|changeDetect(field string) +``` + +Returns: [ChangeDetectNode](/kapacitor/v1.5/nodes/change_detect_node/) + + + +### Combine + +Combine this node with itself. The data is combined on timestamp. + + +```js +combine|combine(expressions ...ast.LambdaNode) +``` + +Returns: [CombineNode](/kapacitor/v1.5/nodes/combine_node/) + + + +### Count + +Count the number of points. + + +```js +combine|count(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### CumulativeSum + +Compute a cumulative sum of each point that is received. +A point is emitted for every point collected. + + +```js +combine|cumulativeSum(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Deadman + +Helper function for creating an alert on low throughput, a.k.a. deadman's switch. + +- Threshold: trigger alert if throughput drops below threshold in points/interval. +- Interval: how often to check the throughput. +- Expressions: optional list of expressions to also evaluate. Useful for time of day alerting. + +Example: + + +```js + var data = stream + |from()... + // Trigger critical alert if the throughput drops below 100 points per 10s and checked every 10s. + data + |deadman(100.0, 10s) + //Do normal processing of data + data... +``` + +The above is equivalent to this example: + + +```js + var data = stream + |from()... + // Trigger critical alert if the throughput drops below 100 points per 10s and checked every 10s. + data + |stats(10s) + .align() + |derivative('emitted') + .unit(10s) + .nonNegative() + |alert() + .id('node \'stream0\' in task \'{{ .TaskName }}\'') + .message('{{ .ID }} is {{ if eq .Level "OK" }}alive{{ else }}dead{{ end }}: {{ index .Fields "emitted" | printf "%0.3f" }} points/10s.') + .crit(lambda: "emitted" <= 100.0) + //Do normal processing of data + data... +``` + +The `id` and `message` alert properties can be configured globally via the 'deadman' configuration section. + +Since the [AlertNode](/kapacitor/v1.5/nodes/alert_node/) is the last piece it can be further modified as usual. +Example: + + +```js + var data = stream + |from()... + // Trigger critical alert if the throughput drops below 100 points per 10s and checked every 10s. + data + |deadman(100.0, 10s) + .slack() + .channel('#dead_tasks') + //Do normal processing of data + data... +``` + +You can specify additional lambda expressions to further constrain when the deadman's switch is triggered. +Example: + + +```js + var data = stream + |from()... + // Trigger critical alert if the throughput drops below 100 points per 10s and checked every 10s. + // Only trigger the alert if the time of day is between 8am-5pm. + data + |deadman(100.0, 10s, lambda: hour("time") >= 8 AND hour("time") <= 17) + //Do normal processing of data + data... +``` + + + +```js +combine|deadman(threshold float64, interval time.Duration, expr ...ast.LambdaNode) +``` + +Returns: [AlertNode](/kapacitor/v1.5/nodes/alert_node/) + + + +### Default + +Create a node that can set defaults for missing tags or fields. + + +```js +combine|default() +``` + +Returns: [DefaultNode](/kapacitor/v1.5/nodes/default_node/) + + + +### Delete + +Create a node that can delete tags or fields. + + +```js +combine|delete() +``` + +Returns: [DeleteNode](/kapacitor/v1.5/nodes/delete_node/) + + + +### Derivative + +Create a new node that computes the derivative of adjacent points. + + +```js +combine|derivative(field string) +``` + +Returns: [DerivativeNode](/kapacitor/v1.5/nodes/derivative_node/) + + + +### Difference + +Compute the difference between points independent of elapsed time. + + +```js +combine|difference(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Distinct + +Produce batch of only the distinct points. + + +```js +combine|distinct(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Ec2Autoscale + +Create a node that can trigger autoscale events for a ec2 autoscalegroup. + + +```js +combine|ec2Autoscale() +``` + +Returns: [Ec2AutoscaleNode](/kapacitor/v1.5/nodes/ec2_autoscale_node/) + + + +### Elapsed + +Compute the elapsed time between points. + + +```js +combine|elapsed(field string, unit time.Duration) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Eval + +Create an eval node that will evaluate the given transformation function to each data point. +A list of expressions may be provided and will be evaluated in the order they are given. +The results are available to later expressions. + + +```js +combine|eval(expressions ...ast.LambdaNode) +``` + +Returns: [EvalNode](/kapacitor/v1.5/nodes/eval_node/) + + + +### First + +Select the first point. + + +```js +combine|first(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Flatten + +Flatten points with similar times into a single point. + + +```js +combine|flatten() +``` + +Returns: [FlattenNode](/kapacitor/v1.5/nodes/flatten_node/) + + + +### GroupBy + +Group the data by a set of tags. + +Can pass literal * to group by all dimensions. +Example: + + +```js + |groupBy(*) +``` + + + +```js +combine|groupBy(tag ...interface{}) +``` + +Returns: [GroupByNode](/kapacitor/v1.5/nodes/group_by_node/) + + + +### HoltWinters + +Compute the Holt-Winters (https://docs.influxdata.com/influxdb/latest/query_language/functions/#holt-winters) forecast of a data set. + + +```js +combine|holtWinters(field string, h int64, m int64, interval time.Duration) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### HoltWintersWithFit + +Compute the Holt-Winters (https://docs.influxdata.com/influxdb/latest/query_language/functions/#holt-winters) forecast of a data set. +This method also outputs all the points used to fit the data in addition to the forecasted data. + + +```js +combine|holtWintersWithFit(field string, h int64, m int64, interval time.Duration) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### HttpOut + +Create an HTTP output node that caches the most recent data it has received. +The cached data is available at the given endpoint. +The endpoint is the relative path from the API endpoint of the running task. +For example, if the task endpoint is at `/kapacitor/v1/tasks/` and endpoint is +`top10`, then the data can be requested from `/kapacitor/v1/tasks//top10`. + + +```js +combine|httpOut(endpoint string) +``` + +Returns: [HTTPOutNode](/kapacitor/v1.5/nodes/http_out_node/) + + + +### HttpPost + +Creates an HTTP Post node that POSTS received data to the provided HTTP endpoint. +HttpPost expects 0 or 1 arguments. If 0 arguments are provided, you must specify an +endpoint property method. + + +```js +combine|httpPost(url ...string) +``` + +Returns: [HTTPPostNode](/kapacitor/v1.5/nodes/http_post_node/) + + + +### InfluxDBOut + +Create an influxdb output node that will store the incoming data into InfluxDB. + + +```js +combine|influxDBOut() +``` + +Returns: [InfluxDBOutNode](/kapacitor/v1.5/nodes/influx_d_b_out_node/) + + + +### Join + +Join this node with other nodes. The data is joined on timestamp. + + +```js +combine|join(others ...Node) +``` + +Returns: [JoinNode](/kapacitor/v1.5/nodes/join_node/) + + + +### K8sAutoscale + +Create a node that can trigger autoscale events for a Kubernetes cluster. + + +```js +combine|k8sAutoscale() +``` + +Returns: [K8sAutoscaleNode](/kapacitor/v1.5/nodes/k8s_autoscale_node/) + + + +### KapacitorLoopback + +Create an kapacitor loopback node that will send data back into Kapacitor as a stream. + + +```js +combine|kapacitorLoopback() +``` + +Returns: [KapacitorLoopbackNode](/kapacitor/v1.5/nodes/kapacitor_loopback_node/) + + + +### Last + +Select the last point. + + +```js +combine|last(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Log + +Create a node that logs all data it receives. + + +```js +combine|log() +``` + +Returns: [LogNode](/kapacitor/v1.5/nodes/log_node/) + + + +### Mean + +Compute the mean of the data. + + +```js +combine|mean(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Median + +Compute the median of the data. + +> **Note:** This method is not a selector. +If you want the median point, use `.percentile(field, 50.0)`. + + +```js +combine|median(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Min + +Select the minimum point. + + +```js +combine|min(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Mode + +Compute the mode of the data. + + +```js +combine|mode(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### MovingAverage + +Compute a moving average of the last window points. +No points are emitted until the window is full. + + +```js +combine|movingAverage(field string, window int64) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Percentile + +Select a point at the given percentile. This is a selector function, no interpolation between points is performed. + + +```js +combine|percentile(field string, percentile float64) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Sample + +Create a new node that samples the incoming points or batches. + +One point will be emitted every count or duration specified. + + +```js +combine|sample(rate interface{}) +``` + +Returns: [SampleNode](/kapacitor/v1.5/nodes/sample_node/) + + + +### Shift + +Create a new node that shifts the incoming points or batches in time. + + +```js +combine|shift(shift time.Duration) +``` + +Returns: [ShiftNode](/kapacitor/v1.5/nodes/shift_node/) + + + +### Sideload + +Create a node that can load data from external sources. + + +```js +combine|sideload() +``` + +Returns: [SideloadNode](/kapacitor/v1.5/nodes/sideload_node/) + + + +### Spread + +Compute the difference between `min` and `max` points. + + +```js +combine|spread(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### StateCount + +Create a node that tracks number of consecutive points in a given state. + + +```js +combine|stateCount(expression ast.LambdaNode) +``` + +Returns: [StateCountNode](/kapacitor/v1.5/nodes/state_count_node/) + + + +### StateDuration + +Create a node that tracks duration in a given state. + + +```js +combine|stateDuration(expression ast.LambdaNode) +``` + +Returns: [StateDurationNode](/kapacitor/v1.5/nodes/state_duration_node/) + + + +### Stats + +Create a new stream of data that contains the internal statistics of the node. +The interval represents how often to emit the statistics based on real time. +This means the interval time is independent of the times of the data points the source node is receiving. + + +```js +combine|stats(interval time.Duration) +``` + +Returns: [StatsNode](/kapacitor/v1.5/nodes/stats_node/) + + + +### Stddev + +Compute the standard deviation. + + +```js +combine|stddev(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Sum + +Compute the sum of all values. + + +```js +combine|sum(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### SwarmAutoscale + +Create a node that can trigger autoscale events for a Docker swarm cluster. + + +```js +combine|swarmAutoscale() +``` + +Returns: [SwarmAutoscaleNode](/kapacitor/v1.5/nodes/swarm_autoscale_node/) + + + +### Top + +Select the top `num` points for `field` and sort by any extra tags or fields. + + +```js +combine|top(num int64, field string, fieldsAndTags ...string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Union + +Perform the union of this node and all other given nodes. + + +```js +combine|union(node ...Node) +``` + +Returns: [UnionNode](/kapacitor/v1.5/nodes/union_node/) + + + +### Where + +Create a new node that filters the data stream by a given expression. + + +```js +combine|where(expression ast.LambdaNode) +``` + +Returns: [WhereNode](/kapacitor/v1.5/nodes/where_node/) + + + +### Window + +Create a new node that windows the stream by time. + +NOTE: Window can only be applied to stream edges. + + +```js +combine|window() +``` + +Returns: [WindowNode](/kapacitor/v1.5/nodes/window_node/) + + diff --git a/content/kapacitor/v1.5/nodes/default_node.md b/content/kapacitor/v1.5/nodes/default_node.md new file mode 100644 index 000000000..412f4ec0b --- /dev/null +++ b/content/kapacitor/v1.5/nodes/default_node.md @@ -0,0 +1,903 @@ +--- +title: DefaultNode +description: DefaultNode sets defaults of fields and tags on data points. +note: Auto generated by tickdoc + +menu: + kapacitor_1_5_ref: + name: DefaultNode + identifier: default_node + weight: 50 + parent: nodes +--- + +The `default` node sets defaults fields and tags on data points. + +Example: + +```js +stream + |default() + .field('value', 0.0) + .tag('host', '') +``` + +The above example will set the field `value` to float64(0) if it does not already exist +It will also set the tag `host` to string("") if it does not already exist. + +Available Statistics: + +* fields_defaulted: number of fields that were missing +* tags_defaulted: number of tags that were missing + + +### Constructor + +| Chaining Method | Description | +|:---------|:---------| +| **default ( )** | Create a node that can set defaults for missing tags or fields. | + +### Property Methods + +| Setters | Description | +|:---|:---| +| **[field](#field) ( `name` `string`, `value` `interface{}`)** | Define a field default. | +| **[quiet](#quiet) ( )** | Suppress all error logging events from this node. | +| **[tag](#tag) ( `name` `string`, `value` `string`)** | Define a tag default. | + + + +### Chaining Methods +[Alert](#alert), +[Barrier](#barrier), +[Bottom](#bottom), +[ChangeDetect](#changedetect), +[Combine](#combine), +[Count](#count), +[CumulativeSum](#cumulativesum), +[Deadman](#deadman), +[Default](#default), +[Delete](#delete), +[Derivative](#derivative), +[Difference](#difference), +[Distinct](#distinct), +[Ec2Autoscale](#ec2autoscale), +[Elapsed](#elapsed), +[Eval](#eval), +[First](#first), +[Flatten](#flatten), +[GroupBy](#groupby), +[HoltWinters](#holtwinters), +[HoltWintersWithFit](#holtwinterswithfit), +[HttpOut](#httpout), +[HttpPost](#httppost), +[InfluxDBOut](#influxdbout), +[Join](#join), +[K8sAutoscale](#k8sautoscale), +[KapacitorLoopback](#kapacitorloopback), +[Last](#last), +[Log](#log), +[Max](#max), +[Mean](#mean), +[Median](#median), +[Min](#min), +[Mode](#mode), +[MovingAverage](#movingaverage), +[Percentile](#percentile), +[Sample](#sample), +[Shift](#shift), +[Sideload](#sideload), +[Spread](#spread), +[StateCount](#statecount), +[StateDuration](#stateduration), +[Stats](#stats), +[Stddev](#stddev), +[Sum](#sum), +[SwarmAutoscale](#swarmautoscale), +[Top](#top), +[Union](#union), +[Where](#where), +[Window](#window) + +--- + +## Properties + +Property methods modify state on the calling node. +They do not add another node to the pipeline, and always return a reference to the calling node. +Property methods are marked using the `.` operator. + + +### Field + +Define a field default. + + +```js +default.field(name string, value interface{}) +``` + + + + +### Quiet + +Suppress all error logging events from this node. + +```js +default.quiet() +``` + + + + +### Tag + +Define a tag default. + + +```js +default.tag(name string, value string) +``` + + + + +## Chaining Methods + +Chaining methods create a new node in the pipeline as a child of the calling node. +They do not modify the calling node. +Chaining methods are marked using the `|` operator. + + +### Alert + +Create an alert node, which can trigger alerts. + + +```js +default|alert() +``` + +Returns: [AlertNode](/kapacitor/v1.5/nodes/alert_node/) + + + +### Barrier + +Create a new Barrier node that emits a BarrierMessage periodically. + +One BarrierMessage will be emitted every period duration. + + +```js +default|barrier() +``` + +Returns: [BarrierNode](/kapacitor/v1.5/nodes/barrier_node/) + + + +### Bottom + +Select the bottom `num` points for `field` and sort by any extra tags or fields. + + +```js +default|bottom(num int64, field string, fieldsAndTags ...string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### ChangeDetect + +Create a new node that only emits new points if different from the previous point. + +```js +default|changeDetect(field string) +``` + +Returns: [ChangeDetectNode](/kapacitor/v1.5/nodes/change_detect_node/) + + + +### Combine + +Combine this node with itself. The data is combined on timestamp. + + +```js +default|combine(expressions ...ast.LambdaNode) +``` + +Returns: [CombineNode](/kapacitor/v1.5/nodes/combine_node/) + + + +### Count + +Count the number of points. + + +```js +default|count(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### CumulativeSum + +Compute a cumulative sum of each point that is received. +A point is emitted for every point collected. + + +```js +default|cumulativeSum(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Deadman + +Helper function for creating an alert on low throughput, a.k.a. deadman's switch. + +- Threshold: trigger alert if throughput drops below threshold in points/interval. +- Interval: how often to check the throughput. +- Expressions: optional list of expressions to also evaluate. Useful for time of day alerting. + +Example: + + +```js + var data = stream + |from()... + // Trigger critical alert if the throughput drops below 100 points per 10s and checked every 10s. + data + |deadman(100.0, 10s) + //Do normal processing of data + data... +``` + +The above is equivalent to this example: + + +```js + var data = stream + |from()... + // Trigger critical alert if the throughput drops below 100 points per 10s and checked every 10s. + data + |stats(10s) + .align() + |derivative('emitted') + .unit(10s) + .nonNegative() + |alert() + .id('node \'stream0\' in task \'{{ .TaskName }}\'') + .message('{{ .ID }} is {{ if eq .Level "OK" }}alive{{ else }}dead{{ end }}: {{ index .Fields "emitted" | printf "%0.3f" }} points/10s.') + .crit(lambda: "emitted" <= 100.0) + //Do normal processing of data + data... +``` + +The `id` and `message` alert properties can be configured globally via the 'deadman' configuration section. + +Since the [AlertNode](/kapacitor/v1.5/nodes/alert_node/) is the last piece it can be further modified as usual. +Example: + + +```js + var data = stream + |from()... + // Trigger critical alert if the throughput drops below 100 points per 10s and checked every 10s. + data + |deadman(100.0, 10s) + .slack() + .channel('#dead_tasks') + //Do normal processing of data + data... +``` + +You can specify additional lambda expressions to further constrain when the deadman's switch is triggered. +Example: + + +```js + var data = stream + |from()... + // Trigger critical alert if the throughput drops below 100 points per 10s and checked every 10s. + // Only trigger the alert if the time of day is between 8am-5pm. + data + |deadman(100.0, 10s, lambda: hour("time") >= 8 AND hour("time") <= 17) + //Do normal processing of data + data... +``` + + + +```js +default|deadman(threshold float64, interval time.Duration, expr ...ast.LambdaNode) +``` + +Returns: [AlertNode](/kapacitor/v1.5/nodes/alert_node/) + + + +### Default + +Create a node that can set defaults for missing tags or fields. + + +```js +default|default() +``` + +Returns: [DefaultNode](/kapacitor/v1.5/nodes/default_node/) + + + +### Delete + +Create a node that can delete tags or fields. + + +```js +default|delete() +``` + +Returns: [DeleteNode](/kapacitor/v1.5/nodes/delete_node/) + + + +### Derivative + +Create a new node that computes the derivative of adjacent points. + + +```js +default|derivative(field string) +``` + +Returns: [DerivativeNode](/kapacitor/v1.5/nodes/derivative_node/) + + + +### Difference + +Compute the difference between points independent of elapsed time. + + +```js +default|difference(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Distinct + +Produce batch of only the distinct points. + + +```js +default|distinct(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Ec2Autoscale + +Create a node that can trigger autoscale events for a ec2 autoscalegroup. + + +```js +default|ec2Autoscale() +``` + +Returns: [Ec2AutoscaleNode](/kapacitor/v1.5/nodes/ec2_autoscale_node/) + + + +### Elapsed + +Compute the elapsed time between points. + + +```js +default|elapsed(field string, unit time.Duration) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Eval + +Create an eval node that will evaluate the given transformation function to each data point. +A list of expressions may be provided and will be evaluated in the order they are given. +The results are available to later expressions. + + +```js +default|eval(expressions ...ast.LambdaNode) +``` + +Returns: [EvalNode](/kapacitor/v1.5/nodes/eval_node/) + + + +### First + +Select the first point. + + +```js +default|first(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Flatten + +Flatten points with similar times into a single point. + + +```js +default|flatten() +``` + +Returns: [FlattenNode](/kapacitor/v1.5/nodes/flatten_node/) + + + +### GroupBy + +Group the data by a set of tags. + +Can pass literal * to group by all dimensions. +Example: + + +```js + |groupBy(*) +``` + + + +```js +default|groupBy(tag ...interface{}) +``` + +Returns: [GroupByNode](/kapacitor/v1.5/nodes/group_by_node/) + + + +### HoltWinters + +Compute the Holt-Winters (https://docs.influxdata.com/influxdb/latest/query_language/functions/#holt-winters) forecast of a data set. + + +```js +default|holtWinters(field string, h int64, m int64, interval time.Duration) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### HoltWintersWithFit + +Compute the Holt-Winters (https://docs.influxdata.com/influxdb/latest/query_language/functions/#holt-winters) forecast of a data set. +This method also outputs all the points used to fit the data in addition to the forecasted data. + + +```js +default|holtWintersWithFit(field string, h int64, m int64, interval time.Duration) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### HttpOut + +Create an HTTP output node that caches the most recent data it has received. +The cached data is available at the given endpoint. +The endpoint is the relative path from the API endpoint of the running task. +For example, if the task endpoint is at `/kapacitor/v1/tasks/` and endpoint is +`top10`, then the data can be requested from `/kapacitor/v1/tasks//top10`. + + +```js +default|httpOut(endpoint string) +``` + +Returns: [HTTPOutNode](/kapacitor/v1.5/nodes/http_out_node/) + + + +### HttpPost + +Creates an HTTP Post node that POSTS received data to the provided HTTP endpoint. +HttpPost expects 0 or 1 arguments. If 0 arguments are provided, you must specify an +endpoint property method. + + +```js +default|httpPost(url ...string) +``` + +Returns: [HTTPPostNode](/kapacitor/v1.5/nodes/http_post_node/) + + + +### InfluxDBOut + +Create an influxdb output node that will store the incoming data into InfluxDB. + + +```js +default|influxDBOut() +``` + +Returns: [InfluxDBOutNode](/kapacitor/v1.5/nodes/influx_d_b_out_node/) + + + +### Join + +Join this node with other nodes. The data is joined on timestamp. + + +```js +default|join(others ...Node) +``` + +Returns: [JoinNode](/kapacitor/v1.5/nodes/join_node/) + + + +### K8sAutoscale + +Create a node that can trigger autoscale events for a kubernetes cluster. + + +```js +default|k8sAutoscale() +``` + +Returns: [K8sAutoscaleNode](/kapacitor/v1.5/nodes/k8s_autoscale_node/) + + + +### KapacitorLoopback + +Create an kapacitor loopback node that will send data back into Kapacitor as a stream. + + +```js +default|kapacitorLoopback() +``` + +Returns: [KapacitorLoopbackNode](/kapacitor/v1.5/nodes/kapacitor_loopback_node/) + + + +### Last + +Select the last point. + + +```js +default|last(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Log + +Create a node that logs all data it receives. + + +```js +default|log() +``` + +Returns: [LogNode](/kapacitor/v1.5/nodes/log_node/) + + + +### Max + +Select the maximum point. + + +```js +default|max(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Mean + +Compute the mean of the data. + + +```js +default|mean(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Median + +Compute the median of the data. + +> **Note:** This method is not a selector. +If you want the median point, use `.percentile(field, 50.0)`. + + +```js +default|median(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Min + +Select the minimum point. + + +```js +default|min(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Mode + +Compute the mode of the data. + + +```js +default|mode(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### MovingAverage + +Compute a moving average of the last window points. +No points are emitted until the window is full. + + +```js +default|movingAverage(field string, window int64) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Percentile + +Select a point at the given percentile. This is a selector function, no interpolation between points is performed. + + +```js +default|percentile(field string, percentile float64) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Sample + +Create a new node that samples the incoming points or batches. + +One point will be emitted every count or duration specified. + + +```js +default|sample(rate interface{}) +``` + +Returns: [SampleNode](/kapacitor/v1.5/nodes/sample_node/) + + + +### Shift + +Create a new node that shifts the incoming points or batches in time. + + +```js +default|shift(shift time.Duration) +``` + +Returns: [ShiftNode](/kapacitor/v1.5/nodes/shift_node/) + + + +### Sideload + +Create a node that can load data from external sources. + + +```js +default|sideload() +``` + +Returns: [SideloadNode](/kapacitor/v1.5/nodes/sideload_node/) + + + +### Spread + +Compute the difference between `min` and `max` points. + + +```js +default|spread(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### StateCount + +Create a node that tracks number of consecutive points in a given state. + + +```js +default|stateCount(expression ast.LambdaNode) +``` + +Returns: [StateCountNode](/kapacitor/v1.5/nodes/state_count_node/) + + + +### StateDuration + +Create a node that tracks duration in a given state. + + +```js +default|stateDuration(expression ast.LambdaNode) +``` + +Returns: [StateDurationNode](/kapacitor/v1.5/nodes/state_duration_node/) + + + +### Stats + +Create a new stream of data that contains the internal statistics of the node. +The interval represents how often to emit the statistics based on real time. +This means the interval time is independent of the times of the data points the source node is receiving. + + +```js +default|stats(interval time.Duration) +``` + +Returns: [StatsNode](/kapacitor/v1.5/nodes/stats_node/) + + + +### Stddev + +Compute the standard deviation. + + +```js +default|stddev(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Sum + +Compute the sum of all values. + + +```js +default|sum(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### SwarmAutoscale + +Create a node that can trigger autoscale events for a Docker swarm cluster. + + +```js +default|swarmAutoscale() +``` + +Returns: [SwarmAutoscaleNode](/kapacitor/v1.5/nodes/swarm_autoscale_node/) + + + +### Top + +Select the top `num` points for `field` and sort by any extra tags or fields. + + +```js +default|top(num int64, field string, fieldsAndTags ...string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Union + +Perform the union of this node and all other given nodes. + + +```js +default|union(node ...Node) +``` + +Returns: [UnionNode](/kapacitor/v1.5/nodes/union_node/) + + + +### Where + +Create a new node that filters the data stream by a given expression. + + +```js +default|where(expression ast.LambdaNode) +``` + +Returns: [WhereNode](/kapacitor/v1.5/nodes/where_node/) + + + +### Window + +Create a new node that windows the stream by time. + +NOTE: Window can only be applied to stream edges. + + +```js +default|window() +``` + +Returns: [WindowNode](/kapacitor/v1.5/nodes/window_node/) + + diff --git a/content/kapacitor/v1.5/nodes/delete_node.md b/content/kapacitor/v1.5/nodes/delete_node.md new file mode 100644 index 000000000..4d9923360 --- /dev/null +++ b/content/kapacitor/v1.5/nodes/delete_node.md @@ -0,0 +1,901 @@ +--- +title: DeleteNode +description: DeleteNode deletes fields and tags from data points. +note: Auto generated by tickdoc + +menu: + kapacitor_1_5_ref: + name: DeleteNode + identifier: delete_node + weight: 60 + parent: nodes +--- + +The `delete` node deletes fields and tags from data points. + +Example: +```js +stream + |delete() + .field('value') + .tag('host') +``` + +The above example will remove the field `value` and the tag `host`, from each point. + +Available Statistics: + +* fields_deleted: number of fields that were deleted. Only counts if the field already existed. +* tags_deleted: number of tags that were deleted. Only counts if the tag already existed. + + +### Constructor + +| Chaining Method | Description | +|:---------|:---------| +| **delete ( )** | Create a node that can delete tags or fields. | + +### Property Methods + +| Setters | Description | +|:---|:---| +| **[field](#field) ( `name` `string`)** | Delete a field. | +| **[quiet](#quiet) ( )** | Suppress all error logging events from this node. | +| **[tag](#tag) ( `name` `string`)** | Delete a tag. | + + + +### Chaining Methods +[Alert](#alert), +[Barrier](#barrier), +[Bottom](#bottom), +[ChangeDetect](#changedetect), +[Combine](#combine), +[Count](#count), +[CumulativeSum](#cumulativesum), +[Deadman](#deadman), +[Default](#default), +[Delete](#delete), +[Derivative](#derivative), +[Difference](#difference), +[Distinct](#distinct), +[Ec2Autoscale](#ec2autoscale), +[Elapsed](#elapsed), +[Eval](#eval), +[First](#first), +[Flatten](#flatten), +[GroupBy](#groupby), +[HoltWinters](#holtwinters), +[HoltWintersWithFit](#holtwinterswithfit), +[HttpOut](#httpout), +[HttpPost](#httppost), +[InfluxDBOut](#influxdbout), +[Join](#join), +[K8sAutoscale](#k8sautoscale), +[KapacitorLoopback](#kapacitorloopback), +[Last](#last), +[Log](#log), +[Max](#max), +[Mean](#mean), +[Median](#median), +[Min](#min), +[Mode](#mode), +[MovingAverage](#movingaverage), +[Percentile](#percentile), +[Sample](#sample), +[Shift](#shift), +[Sideload](#sideload), +[Spread](#spread), +[StateCount](#statecount), +[StateDuration](#stateduration), +[Stats](#stats), +[Stddev](#stddev), +[Sum](#sum), +[SwarmAutoscale](#swarmautoscale), +[Top](#top), +[Union](#union), +[Where](#where), +[Window](#window) + +--- + +## Properties + +Property methods modify state on the calling node. +They do not add another node to the pipeline, and always return a reference to the calling node. +Property methods are marked using the `.` operator. + + +### Field + +Delete a field. + + +```js +delete.field(name string) +``` + + + + +### Quiet + +Suppress all error logging events from this node. + +```js +delete.quiet() +``` + + + + +### Tag + +Delete a tag. + + +```js +delete.tag(name string) +``` + + + + +## Chaining Methods + +Chaining methods create a new node in the pipeline as a child of the calling node. +They do not modify the calling node. +Chaining methods are marked using the `|` operator. + + +### Alert + +Create an alert node, which can trigger alerts. + + +```js +delete|alert() +``` + +Returns: [AlertNode](/kapacitor/v1.5/nodes/alert_node/) + + + +### Barrier + +Create a new Barrier node that emits a BarrierMessage periodically. + +One BarrierMessage will be emitted every period duration. + + +```js +delete|barrier() +``` + +Returns: [BarrierNode](/kapacitor/v1.5/nodes/barrier_node/) + + + +### Bottom + +Select the bottom `num` points for `field` and sort by any extra tags or fields. + + +```js +delete|bottom(num int64, field string, fieldsAndTags ...string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### ChangeDetect + +Create a new node that only emits new points if different from the previous point. + +```js +delete|changeDetect(field string) +``` + +Returns: [ChangeDetectNode](/kapacitor/v1.5/nodes/change_detect_node/) + + + +### Combine + +Combine this node with itself. The data is combined on timestamp. + + +```js +delete|combine(expressions ...ast.LambdaNode) +``` + +Returns: [CombineNode](/kapacitor/v1.5/nodes/combine_node/) + + + +### Count + +Count the number of points. + + +```js +delete|count(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### CumulativeSum + +Compute a cumulative sum of each point that is received. +A point is emitted for every point collected. + + +```js +delete|cumulativeSum(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Deadman + +Helper function for creating an alert on low throughput, a.k.a. deadman's switch. + +- Threshold: trigger alert if throughput drops below threshold in points/interval. +- Interval: how often to check the throughput. +- Expressions: optional list of expressions to also evaluate. Useful for time of day alerting. + +Example: + + +```js + var data = stream + |from()... + // Trigger critical alert if the throughput drops below 100 points per 10s and checked every 10s. + data + |deadman(100.0, 10s) + //Do normal processing of data + data... +``` + +The above is equivalent to this example: + + +```js + var data = stream + |from()... + // Trigger critical alert if the throughput drops below 100 points per 10s and checked every 10s. + data + |stats(10s) + .align() + |derivative('emitted') + .unit(10s) + .nonNegative() + |alert() + .id('node \'stream0\' in task \'{{ .TaskName }}\'') + .message('{{ .ID }} is {{ if eq .Level "OK" }}alive{{ else }}dead{{ end }}: {{ index .Fields "emitted" | printf "%0.3f" }} points/10s.') + .crit(lambda: "emitted" <= 100.0) + //Do normal processing of data + data... +``` + +The `id` and `message` alert properties can be configured globally via the 'deadman' configuration section. + +Since the [AlertNode](/kapacitor/v1.5/nodes/alert_node/) is the last piece it can be further modified as usual. +Example: + + +```js + var data = stream + |from()... + // Trigger critical alert if the throughput drops below 100 points per 10s and checked every 10s. + data + |deadman(100.0, 10s) + .slack() + .channel('#dead_tasks') + //Do normal processing of data + data... +``` + +You can specify additional lambda expressions to further constrain when the deadman's switch is triggered. +Example: + + +```js + var data = stream + |from()... + // Trigger critical alert if the throughput drops below 100 points per 10s and checked every 10s. + // Only trigger the alert if the time of day is between 8am-5pm. + data + |deadman(100.0, 10s, lambda: hour("time") >= 8 AND hour("time") <= 17) + //Do normal processing of data + data... +``` + + + +```js +delete|deadman(threshold float64, interval time.Duration, expr ...ast.LambdaNode) +``` + +Returns: [AlertNode](/kapacitor/v1.5/nodes/alert_node/) + + + +### Default + +Create a node that can set defaults for missing tags or fields. + + +```js +delete|default() +``` + +Returns: [DefaultNode](/kapacitor/v1.5/nodes/default_node/) + + + +### Delete + +Create a node that can delete tags or fields. + + +```js +delete|delete() +``` + +Returns: [DeleteNode](/kapacitor/v1.5/nodes/delete_node/) + + + +### Derivative + +Create a new node that computes the derivative of adjacent points. + + +```js +delete|derivative(field string) +``` + +Returns: [DerivativeNode](/kapacitor/v1.5/nodes/derivative_node/) + + + +### Difference + +Compute the difference between points independent of elapsed time. + + +```js +delete|difference(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Distinct + +Produce batch of only the distinct points. + + +```js +delete|distinct(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Ec2Autoscale + +Create a node that can trigger autoscale events for a ec2 autoscalegroup. + + +```js +delete|ec2Autoscale() +``` + +Returns: [Ec2AutoscaleNode](/kapacitor/v1.5/nodes/ec2_autoscale_node/) + + + +### Elapsed + +Compute the elapsed time between points. + + +```js +delete|elapsed(field string, unit time.Duration) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Eval + +Create an eval node that will evaluate the given transformation function to each data point. +A list of expressions may be provided and will be evaluated in the order they are given. +The results are available to later expressions. + + +```js +delete|eval(expressions ...ast.LambdaNode) +``` + +Returns: [EvalNode](/kapacitor/v1.5/nodes/eval_node/) + + + +### First + +Select the first point. + + +```js +delete|first(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Flatten + +Flatten points with similar times into a single point. + + +```js +delete|flatten() +``` + +Returns: [FlattenNode](/kapacitor/v1.5/nodes/flatten_node/) + + + +### GroupBy + +Group the data by a set of tags. + +Can pass literal * to group by all dimensions. +Example: + + +```js + |groupBy(*) +``` + + + +```js +delete|groupBy(tag ...interface{}) +``` + +Returns: [GroupByNode](/kapacitor/v1.5/nodes/group_by_node/) + + + +### HoltWinters + +Compute the Holt-Winters (https://docs.influxdata.com/influxdb/latest/query_language/functions/#holt-winters) forecast of a data set. + + +```js +delete|holtWinters(field string, h int64, m int64, interval time.Duration) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### HoltWintersWithFit + +Compute the Holt-Winters (https://docs.influxdata.com/influxdb/latest/query_language/functions/#holt-winters) forecast of a data set. +This method also outputs all the points used to fit the data in addition to the forecasted data. + + +```js +delete|holtWintersWithFit(field string, h int64, m int64, interval time.Duration) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### HttpOut + +Create an HTTP output node that caches the most recent data it has received. +The cached data is available at the given endpoint. +The endpoint is the relative path from the API endpoint of the running task. +For example, if the task endpoint is at `/kapacitor/v1/tasks/` and endpoint is +`top10`, then the data can be requested from `/kapacitor/v1/tasks//top10`. + + +```js +delete|httpOut(endpoint string) +``` + +Returns: [HTTPOutNode](/kapacitor/v1.5/nodes/http_out_node/) + + + +### HttpPost + +Creates an HTTP Post node that POSTS received data to the provided HTTP endpoint. +HttpPost expects 0 or 1 arguments. If 0 arguments are provided, you must specify an +endpoint property method. + + +```js +delete|httpPost(url ...string) +``` + +Returns: [HTTPPostNode](/kapacitor/v1.5/nodes/http_post_node/) + + + +### InfluxDBOut + +Create an influxdb output node that will store the incoming data into InfluxDB. + + +```js +delete|influxDBOut() +``` + +Returns: [InfluxDBOutNode](/kapacitor/v1.5/nodes/influx_d_b_out_node/) + + + +### Join + +Join this node with other nodes. The data is joined on timestamp. + + +```js +delete|join(others ...Node) +``` + +Returns: [JoinNode](/kapacitor/v1.5/nodes/join_node/) + + + +### K8sAutoscale + +Create a node that can trigger autoscale events for a kubernetes cluster. + + +```js +delete|k8sAutoscale() +``` + +Returns: [K8sAutoscaleNode](/kapacitor/v1.5/nodes/k8s_autoscale_node/) + + + +### KapacitorLoopback + +Create an kapacitor loopback node that will send data back into Kapacitor as a stream. + + +```js +delete|kapacitorLoopback() +``` + +Returns: [KapacitorLoopbackNode](/kapacitor/v1.5/nodes/kapacitor_loopback_node/) + + + +### Last + +Select the last point. + + +```js +delete|last(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Log + +Create a node that logs all data it receives. + + +```js +delete|log() +``` + +Returns: [LogNode](/kapacitor/v1.5/nodes/log_node/) + + + +### Max + +Select the maximum point. + + +```js +delete|max(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Mean + +Compute the mean of the data. + + +```js +delete|mean(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Median + +Compute the median of the data. + +> **Note:** This method is not a selector. +If you want the median point, use `.percentile(field, 50.0)`. + + +```js +delete|median(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Min + +Select the minimum point. + + +```js +delete|min(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Mode + +Compute the mode of the data. + + +```js +delete|mode(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### MovingAverage + +Compute a moving average of the last window points. +No points are emitted until the window is full. + + +```js +delete|movingAverage(field string, window int64) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Percentile + +Select a point at the given percentile. This is a selector function, no interpolation between points is performed. + + +```js +delete|percentile(field string, percentile float64) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Sample + +Create a new node that samples the incoming points or batches. + +One point will be emitted every count or duration specified. + + +```js +delete|sample(rate interface{}) +``` + +Returns: [SampleNode](/kapacitor/v1.5/nodes/sample_node/) + + + +### Shift + +Create a new node that shifts the incoming points or batches in time. + + +```js +delete|shift(shift time.Duration) +``` + +Returns: [ShiftNode](/kapacitor/v1.5/nodes/shift_node/) + + + +### Sideload + +Create a node that can load data from external sources. + + +```js +delete|sideload() +``` + +Returns: [SideloadNode](/kapacitor/v1.5/nodes/sideload_node/) + + + +### Spread + +Compute the difference between `min` and `max` points. + + +```js +delete|spread(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### StateCount + +Create a node that tracks number of consecutive points in a given state. + + +```js +delete|stateCount(expression ast.LambdaNode) +``` + +Returns: [StateCountNode](/kapacitor/v1.5/nodes/state_count_node/) + + + +### StateDuration + +Create a node that tracks duration in a given state. + + +```js +delete|stateDuration(expression ast.LambdaNode) +``` + +Returns: [StateDurationNode](/kapacitor/v1.5/nodes/state_duration_node/) + + + +### Stats + +Create a new stream of data that contains the internal statistics of the node. +The interval represents how often to emit the statistics based on real time. +This means the interval time is independent of the times of the data points the source node is receiving. + + +```js +delete|stats(interval time.Duration) +``` + +Returns: [StatsNode](/kapacitor/v1.5/nodes/stats_node/) + + + +### Stddev + +Compute the standard deviation. + + +```js +delete|stddev(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Sum + +Compute the sum of all values. + + +```js +delete|sum(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### SwarmAutoscale + +Create a node that can trigger autoscale events for a Docker swarm cluster. + + +```js +delete|swarmAutoscale() +``` + +Returns: [SwarmAutoscaleNode](/kapacitor/v1.5/nodes/swarm_autoscale_node/) + + + +### Top + +Select the top `num` points for `field` and sort by any extra tags or fields. + + +```js +delete|top(num int64, field string, fieldsAndTags ...string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Union + +Perform the union of this node and all other given nodes. + + +```js +delete|union(node ...Node) +``` + +Returns: [UnionNode](/kapacitor/v1.5/nodes/union_node/) + + + +### Where + +Create a new node that filters the data stream by a given expression. + + +```js +delete|where(expression ast.LambdaNode) +``` + +Returns: [WhereNode](/kapacitor/v1.5/nodes/where_node/) + + + +### Window + +Create a new node that windows the stream by time. + +NOTE: Window can only be applied to stream edges. + + +```js +delete|window() +``` + +Returns: [WindowNode](/kapacitor/v1.5/nodes/window_node/) + + diff --git a/content/kapacitor/v1.5/nodes/derivative_node.md b/content/kapacitor/v1.5/nodes/derivative_node.md new file mode 100644 index 000000000..542e1056f --- /dev/null +++ b/content/kapacitor/v1.5/nodes/derivative_node.md @@ -0,0 +1,926 @@ +--- +title: DerivativeNode +description: DerivativeNode computes the derivative of a stream or batch. The derivative is computed on a single field and behaves similarly to the InfluxQL derivative function. +note: Auto generated by tickdoc + +menu: + kapacitor_1_5_ref: + name: DerivativeNode + identifier: derivative_node + weight: 70 + parent: nodes +--- + +The `derivative` node computes the derivative of a stream or batch. +The derivative is computed on a single field +and behaves similarly to the InfluxQL derivative +function. Kapacitor has its own implementation +of the derivative function, and, as a result, is +not part of the normal InfluxQL functions. + +Example: + + +```js +stream + |from() + .measurement('net_rx_packets') + |derivative('value') + .unit(1s) // default + .nonNegative() + ... +``` + +Computes the derivative via: +(current - previous ) / ( time_difference / unit) + +The derivative is computed for each point, and +because of boundary conditions the first point is +dropped. + + +### Constructor + +| Chaining Method | Description | +|:---------|:---------| +| **derivative ( `field` `string`)** | Create a new node that computes the derivative of adjacent points. | + +### Property Methods + +| Setters | Description | +|:---|:---| +| **[as](#as) ( `value` `string`)** | The new name of the derivative field. Default is the name of the field used when calculating the derivative. | +| **[nonNegative](#nonnegative) ( )** | If called the derivative will skip negative results. | +| **[quiet](#quiet) ( )** | Suppress all error logging events from this node. | +| **[unit](#unit) ( `value` `time.Duration`)** | The time unit of the resulting derivative value. Default: 1s | + + + +### Chaining Methods +[Alert](#alert), +[Barrier](#barrier), +[Bottom](#bottom), +[ChangeDetect](#changedetect), +[Combine](#combine), +[Count](#count), +[CumulativeSum](#cumulativesum), +[Deadman](#deadman), +[Default](#default), +[Delete](#delete), +[Derivative](#derivative), +[Difference](#difference), +[Distinct](#distinct), +[Ec2Autoscale](#ec2autoscale), +[Elapsed](#elapsed), +[Eval](#eval), +[First](#first), +[Flatten](#flatten), +[GroupBy](#groupby), +[HoltWinters](#holtwinters), +[HoltWintersWithFit](#holtwinterswithfit), +[HttpOut](#httpout), +[HttpPost](#httppost), +[InfluxDBOut](#influxdbout), +[Join](#join), +[K8sAutoscale](#k8sautoscale), +[KapacitorLoopback](#kapacitorloopback), +[Last](#last), +[Log](#log), +[Max](#max), +[Mean](#mean), +[Median](#median), +[Min](#min), +[Mode](#mode), +[MovingAverage](#movingaverage), +[Percentile](#percentile), +[Sample](#sample), +[Shift](#shift), +[Sideload](#sideload), +[Spread](#spread), +[StateCount](#statecount), +[StateDuration](#stateduration), +[Stats](#stats), +[Stddev](#stddev), +[Sum](#sum), +[SwarmAutoscale](#swarmautoscale), +[Top](#top), +[Union](#union), +[Where](#where), +[Window](#window) + +--- + +## Properties + +Property methods modify state on the calling node. +They do not add another node to the pipeline, and always return a reference to the calling node. +Property methods are marked using the `.` operator. + + +### As + +The new name of the derivative field. +Default is the name of the field used +when calculating the derivative. + + +```js +derivative.as(value string) +``` + + + + +### NonNegative + +If called the derivative will skip negative results. + + +```js +derivative.nonNegative() +``` + + + +### Quiet + +Suppress all error logging events from this node. + +```js +derivative.quiet() +``` + + + + +### Unit + +The time unit of the resulting derivative value. +Default: 1s + + +```js +derivative.unit(value time.Duration) +``` + + + + +## Chaining Methods + +Chaining methods create a new node in the pipeline as a child of the calling node. +They do not modify the calling node. +Chaining methods are marked using the `|` operator. + + +### Alert + +Create an alert node, which can trigger alerts. + + +```js +derivative|alert() +``` + +Returns: [AlertNode](/kapacitor/v1.5/nodes/alert_node/) + + + +### Barrier + +Create a new Barrier node that emits a BarrierMessage periodically. + +One BarrierMessage will be emitted every period duration. + + +```js +derivative|barrier() +``` + +Returns: [BarrierNode](/kapacitor/v1.5/nodes/barrier_node/) + + + +### Bottom + +Select the bottom `num` points for `field` and sort by any extra tags or fields. + + +```js +derivative|bottom(num int64, field string, fieldsAndTags ...string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### ChangeDetect + +Create a new node that only emits new points if different from the previous point. + +```js +derivative|changeDetect(field string) +``` + +Returns: [ChangeDetectNode](/kapacitor/v1.5/nodes/change_detect_node/) + + + +### Combine + +Combine this node with itself. The data is combined on timestamp. + + +```js +derivative|combine(expressions ...ast.LambdaNode) +``` + +Returns: [CombineNode](/kapacitor/v1.5/nodes/combine_node/) + + + +### Count + +Count the number of points. + + +```js +derivative|count(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### CumulativeSum + +Compute a cumulative sum of each point that is received. +A point is emitted for every point collected. + + +```js +derivative|cumulativeSum(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Deadman + +Helper function for creating an alert on low throughput, a.k.a. deadman's switch. + +- Threshold: trigger alert if throughput drops below threshold in points/interval. +- Interval: how often to check the throughput. +- Expressions: optional list of expressions to also evaluate. Useful for time of day alerting. + +Example: + + +```js + var data = stream + |from()... + // Trigger critical alert if the throughput drops below 100 points per 10s and checked every 10s. + data + |deadman(100.0, 10s) + //Do normal processing of data + data... +``` + +The above is equivalent to this example: + + +```js + var data = stream + |from()... + // Trigger critical alert if the throughput drops below 100 points per 10s and checked every 10s. + data + |stats(10s) + .align() + |derivative('emitted') + .unit(10s) + .nonNegative() + |alert() + .id('node \'stream0\' in task \'{{ .TaskName }}\'') + .message('{{ .ID }} is {{ if eq .Level "OK" }}alive{{ else }}dead{{ end }}: {{ index .Fields "emitted" | printf "%0.3f" }} points/10s.') + .crit(lambda: "emitted" <= 100.0) + //Do normal processing of data + data... +``` + +The `id` and `message` alert properties can be configured globally via the 'deadman' configuration section. + +Since the [AlertNode](/kapacitor/v1.5/nodes/alert_node/) is the last piece it can be further modified as usual. +Example: + + +```js + var data = stream + |from()... + // Trigger critical alert if the throughput drops below 100 points per 10s and checked every 10s. + data + |deadman(100.0, 10s) + .slack() + .channel('#dead_tasks') + //Do normal processing of data + data... +``` + +You can specify additional lambda expressions to further constrain when the deadman's switch is triggered. +Example: + + +```js + var data = stream + |from()... + // Trigger critical alert if the throughput drops below 100 points per 10s and checked every 10s. + // Only trigger the alert if the time of day is between 8am-5pm. + data + |deadman(100.0, 10s, lambda: hour("time") >= 8 AND hour("time") <= 17) + //Do normal processing of data + data... +``` + + + +```js +derivative|deadman(threshold float64, interval time.Duration, expr ...ast.LambdaNode) +``` + +Returns: [AlertNode](/kapacitor/v1.5/nodes/alert_node/) + + + +### Default + +Create a node that can set defaults for missing tags or fields. + + +```js +derivative|default() +``` + +Returns: [DefaultNode](/kapacitor/v1.5/nodes/default_node/) + + + +### Delete + +Create a node that can delete tags or fields. + + +```js +derivative|delete() +``` + +Returns: [DeleteNode](/kapacitor/v1.5/nodes/delete_node/) + + + +### Derivative + +Create a new node that computes the derivative of adjacent points. + + +```js +derivative|derivative(field string) +``` + +Returns: [DerivativeNode](/kapacitor/v1.5/nodes/derivative_node/) + + + +### Difference + +Compute the difference between points independent of elapsed time. + + +```js +derivative|difference(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Distinct + +Produce batch of only the distinct points. + + +```js +derivative|distinct(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Ec2Autoscale + +Create a node that can trigger autoscale events for a ec2 autoscalegroup. + + +```js +derivative|ec2Autoscale() +``` + +Returns: [Ec2AutoscaleNode](/kapacitor/v1.5/nodes/ec2_autoscale_node/) + + + +### Elapsed + +Compute the elapsed time between points. + + +```js +derivative|elapsed(field string, unit time.Duration) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Eval + +Create an eval node that will evaluate the given transformation function to each data point. +A list of expressions may be provided and will be evaluated in the order they are given. +The results are available to later expressions. + + +```js +derivative|eval(expressions ...ast.LambdaNode) +``` + +Returns: [EvalNode](/kapacitor/v1.5/nodes/eval_node/) + + + +### First + +Select the first point. + + +```js +derivative|first(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Flatten + +Flatten points with similar times into a single point. + + +```js +derivative|flatten() +``` + +Returns: [FlattenNode](/kapacitor/v1.5/nodes/flatten_node/) + + + +### GroupBy + +Group the data by a set of tags. + +Can pass literal * to group by all dimensions. +Example: + + +```js + |groupBy(*) +``` + + + +```js +derivative|groupBy(tag ...interface{}) +``` + +Returns: [GroupByNode](/kapacitor/v1.5/nodes/group_by_node/) + + + +### HoltWinters + +Compute the Holt-Winters (https://docs.influxdata.com/influxdb/latest/query_language/functions/#holt-winters) forecast of a data set. + + +```js +derivative|holtWinters(field string, h int64, m int64, interval time.Duration) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### HoltWintersWithFit + +Compute the Holt-Winters (https://docs.influxdata.com/influxdb/latest/query_language/functions/#holt-winters) forecast of a data set. +This method also outputs all the points used to fit the data in addition to the forecasted data. + + +```js +derivative|holtWintersWithFit(field string, h int64, m int64, interval time.Duration) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### HttpOut + +Create an HTTP output node that caches the most recent data it has received. +The cached data is available at the given endpoint. +The endpoint is the relative path from the API endpoint of the running task. +For example, if the task endpoint is at `/kapacitor/v1/tasks/` and endpoint is +`top10`, then the data can be requested from `/kapacitor/v1/tasks//top10`. + + +```js +derivative|httpOut(endpoint string) +``` + +Returns: [HTTPOutNode](/kapacitor/v1.5/nodes/http_out_node/) + + + +### HttpPost + +Creates an HTTP Post node that POSTS received data to the provided HTTP endpoint. +HttpPost expects 0 or 1 arguments. If 0 arguments are provided, you must specify an +endpoint property method. + + +```js +derivative|httpPost(url ...string) +``` + +Returns: [HTTPPostNode](/kapacitor/v1.5/nodes/http_post_node/) + + + +### InfluxDBOut + +Create an influxdb output node that will store the incoming data into InfluxDB. + + +```js +derivative|influxDBOut() +``` + +Returns: [InfluxDBOutNode](/kapacitor/v1.5/nodes/influx_d_b_out_node/) + + + +### Join + +Join this node with other nodes. The data is joined on timestamp. + + +```js +derivative|join(others ...Node) +``` + +Returns: [JoinNode](/kapacitor/v1.5/nodes/join_node/) + + + +### K8sAutoscale + +Create a node that can trigger autoscale events for a kubernetes cluster. + + +```js +derivative|k8sAutoscale() +``` + +Returns: [K8sAutoscaleNode](/kapacitor/v1.5/nodes/k8s_autoscale_node/) + + + +### KapacitorLoopback + +Create an kapacitor loopback node that will send data back into Kapacitor as a stream. + + +```js +derivative|kapacitorLoopback() +``` + +Returns: [KapacitorLoopbackNode](/kapacitor/v1.5/nodes/kapacitor_loopback_node/) + + + +### Last + +Select the last point. + + +```js +derivative|last(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Log + +Create a node that logs all data it receives. + + +```js +derivative|log() +``` + +Returns: [LogNode](/kapacitor/v1.5/nodes/log_node/) + + + +### Max + +Select the maximum point. + + +```js +derivative|max(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Mean + +Compute the mean of the data. + + +```js +derivative|mean(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Median + +Compute the median of the data. + +> **Note:** This method is not a selector. +If you want the median point, use `.percentile(field, 50.0)`. + + +```js +derivative|median(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Min + +Select the minimum point. + + +```js +derivative|min(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Mode + +Compute the mode of the data. + + +```js +derivative|mode(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### MovingAverage + +Compute a moving average of the last window points. +No points are emitted until the window is full. + + +```js +derivative|movingAverage(field string, window int64) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Percentile + +Select a point at the given percentile. This is a selector function, no interpolation between points is performed. + + +```js +derivative|percentile(field string, percentile float64) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Sample + +Create a new node that samples the incoming points or batches. + +One point will be emitted every count or duration specified. + + +```js +derivative|sample(rate interface{}) +``` + +Returns: [SampleNode](/kapacitor/v1.5/nodes/sample_node/) + + + +### Shift + +Create a new node that shifts the incoming points or batches in time. + + +```js +derivative|shift(shift time.Duration) +``` + +Returns: [ShiftNode](/kapacitor/v1.5/nodes/shift_node/) + + + +### Sideload + +Create a node that can load data from external sources. + + +```js +derivative|sideload() +``` + +Returns: [SideloadNode](/kapacitor/v1.5/nodes/sideload_node/) + + + +### Spread + +Compute the difference between `min` and `max` points. + + +```js +derivative|spread(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### StateCount + +Create a node that tracks number of consecutive points in a given state. + + +```js +derivative|stateCount(expression ast.LambdaNode) +``` + +Returns: [StateCountNode](/kapacitor/v1.5/nodes/state_count_node/) + + + +### StateDuration + +Create a node that tracks duration in a given state. + + +```js +derivative|stateDuration(expression ast.LambdaNode) +``` + +Returns: [StateDurationNode](/kapacitor/v1.5/nodes/state_duration_node/) + + + +### Stats + +Create a new stream of data that contains the internal statistics of the node. +The interval represents how often to emit the statistics based on real time. +This means the interval time is independent of the times of the data points the source node is receiving. + + +```js +derivative|stats(interval time.Duration) +``` + +Returns: [StatsNode](/kapacitor/v1.5/nodes/stats_node/) + + + +### Stddev + +Compute the standard deviation. + + +```js +derivative|stddev(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Sum + +Compute the sum of all values. + + +```js +derivative|sum(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### SwarmAutoscale + +Create a node that can trigger autoscale events for a Docker swarm cluster. + + +```js +derivative|swarmAutoscale() +``` + +Returns: [SwarmAutoscaleNode](/kapacitor/v1.5/nodes/swarm_autoscale_node/) + + + +### Top + +Select the top `num` points for `field` and sort by any extra tags or fields. + + +```js +derivative|top(num int64, field string, fieldsAndTags ...string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Union + +Perform the union of this node and all other given nodes. + + +```js +derivative|union(node ...Node) +``` + +Returns: [UnionNode](/kapacitor/v1.5/nodes/union_node/) + + + +### Where + +Create a new node that filters the data stream by a given expression. + + +```js +derivative|where(expression ast.LambdaNode) +``` + +Returns: [WhereNode](/kapacitor/v1.5/nodes/where_node/) + + + +### Window + +Create a new node that windows the stream by time. + +NOTE: Window can only be applied to stream edges. + + +```js +derivative|window() +``` + +Returns: [WindowNode](/kapacitor/v1.5/nodes/window_node/) + + diff --git a/content/kapacitor/v1.5/nodes/ec2_autoscale_node.md b/content/kapacitor/v1.5/nodes/ec2_autoscale_node.md new file mode 100644 index 000000000..4a823cf4a --- /dev/null +++ b/content/kapacitor/v1.5/nodes/ec2_autoscale_node.md @@ -0,0 +1,1039 @@ +--- +title: EC2AutoscaleNode +description: EC2AutoscaleNode triggers autoscale events on an AWS Autoscaling group. The node also outputs points for the triggered events. +note: Auto generated by tickdoc + +menu: + kapacitor_1_5_ref: + name: EC2AutoscaleNode + identifier: ec2_autoscale_node + weight: 80 + parent: nodes +--- + +The `ec2Autoscale` node triggers autoscale events for a group on a AWS Autoscaling group. +The node also outputs points for the triggered events. + +Example: + +```js +// Target 80% cpu per ec2 instance +var target = 80.0 +var min = 1 +var max = 10 +var period = 5m +var every = period +stream + |from() + .measurement('cpu') + .groupBy('host_name','group_name') + .where(lambda: "cpu" == 'cpu-total') + |eval(lambda: 100.0 - "usage_idle") + .as('usage_percent') + |window() + .period(period) + .every(every) + |mean('usage_percent') + .as('mean_cpu') + |groupBy('group_name') + |sum('mean_cpu') + .as('total_cpu') + |ec2Autoscale() + // Get the group name of the VM(EC2 instance) from "group_name" tag. + .groupNameTag('group_name') + .min(min) + .max(max) + // Set the desired number of replicas based on target. + .replicas(lambda: int(ceil("total_cpu" / target))) + |influxDBOut() + .database('deployments') + .measurement('scale_events') + .precision('s') +``` + +The above example computes the mean of CPU `usage_percent` by `host_name`, `name`, and `group_name`. +The sum of mean CPU usage is calculated as `total_cpu`. +Using the `total_cpu` over the last time period, a desired number of replicas is computed +based on the target percentage usage of CPU. + +If the desired number of replicas has changed, Kapacitor makes the appropriate API +call to the AWS autoscaling group to update the replica's spec. + +Any time the Ec2Autoscale node changes a replica count, it emits a point. +The point is tagged with the group name using the `groupName`. +In addition, the group by tags is preserved on the emitted point. +The point contains two fields representing change in the replicas: `old` and `new`. + +Available Statistics: + +* `increase_events`: number of times the replica count was increased. +* `decrease_events`: number of times the replica count was decreased. +* `cooldown_drops`: number of times an event was dropped because of a cooldown timer. +* `errors`: number of errors encountered, typically related to communicating with the AWS autoscaling API. + + +### Constructor + +| Chaining Method | Description | +|:---------|:---------| +| **ec2Autoscale ( )** | Create a node that can trigger autoscale events for an EC2 Autoscale group. | + +### Property Methods + +| Setters | Description | +|:---|:---| +| **[cluster](#cluster) ( `value` `string`)** | Cluster is the ID of EC2 Autoscale group to use. The ID of the cluster is specified in the kapacitor configuration. | +| **[currentField](#currentfield) ( `value` `string`)** | CurrentField is the name of a field into which the current replica count will be set as an int. If empty no field will be set. Useful for computing deltas on the current state. | +| **[decreaseCooldown](#decreasecooldown) ( `value` `time.Duration`)** | Only one decrease event can be triggered per resource every DecreaseCooldown interval. | +| **[groupName](#groupname) ( `value` `string`)** | GroupName is the name of the autoscaling group to autoscale. | +| **[groupNameTag](#groupnametag) ( `value` `string`)** | GroupName is the name of a tag which contains the name of the autoscaling group to autoscale. | +| **[increaseCooldown](#increasecooldown) ( `value` `time.Duration`)** | Only one increase event can be triggered per resource every IncreaseCooldown interval. | +| **[max](#max) ( `value` `int64`)** | The maximum scale factor to set. If 0 then there is no upper limit. Default: 0, a.k.a no limit. | +| **[min](#min) ( `value` `int64`)** | The minimum scale factor to set. Default: 1 | +| **[outputGroupNameTag](#outputgroupnametag) ( `value` `string`)** | OutputGroupName is the name of a tag into which the group name will be written for output autoscale events. Defaults to the value of GroupNameTag if its not empty. | +| **[quiet](#quiet) ( )** | Suppress all error logging events from this node. | +| **[replicas](#replicas) ( `value` `ast.LambdaNode`)** | Replicas is a lambda expression that should evaluate to the desired number of replicas for the resource. | + + + +### Chaining Methods +[Alert](#alert), +[Barrier](#barrier), +[Bottom](#bottom), +[ChangeDetect](#changedetect), +[Combine](#combine), +[Count](#count), +[CumulativeSum](#cumulativesum), +[Deadman](#deadman), +[Default](#default), +[Delete](#delete), +[Derivative](#derivative), +[Difference](#difference), +[Distinct](#distinct), +[Ec2Autoscale](#ec2autoscale), +[Elapsed](#elapsed), +[Eval](#eval), +[First](#first), +[Flatten](#flatten), +[GroupBy](#groupby), +[HoltWinters](#holtwinters), +[HoltWintersWithFit](#holtwinterswithfit), +[HttpOut](#httpout), +[HttpPost](#httppost), +[InfluxDBOut](#influxdbout), +[Join](#join), +[K8sAutoscale](#k8sautoscale), +[KapacitorLoopback](#kapacitorloopback), +[Last](#last), +[Log](#log), +[Mean](#mean), +[Median](#median), +[Mode](#mode), +[MovingAverage](#movingaverage), +[Percentile](#percentile), +[Sample](#sample), +[Shift](#shift), +[Sideload](#sideload), +[Spread](#spread), +[StateCount](#statecount), +[StateDuration](#stateduration), +[Stats](#stats), +[Stddev](#stddev), +[Sum](#sum), +[SwarmAutoscale](#swarmautoscale), +[Top](#top), +[Union](#union), +[Where](#where), +[Window](#window) + +--- + +## Properties + +Property methods modify state on the calling node. +They do not add another node to the pipeline, and always return a reference to the calling node. +Property methods are marked using the `.` operator. + + +### Cluster + +Cluster is the ID of ec2 autoscale group to use. +The ID of the cluster is specified in the kapacitor configuration. + + +```js +ec2Autoscale.cluster(value string) +``` + + + + +### CurrentField + +CurrentField is the name of a field into which the current replica count will be set as an int. +If empty no field will be set. +Useful for computing deltas on the current state. + +Example: + + +```js + |ec2Autoscale() + .currentField('replicas') + // Increase the replicas by 1 if the qps is over the threshold + .replicas(lambda: if("qps" > threshold, "replicas" + 1, "replicas")) +``` + + + +```js +ec2Autoscale.currentField(value string) +``` + + + + +### DecreaseCooldown + +Only one decrease event can be triggered per resource every DecreaseCooldown interval. + + +```js +ec2Autoscale.decreaseCooldown(value time.Duration) +``` + + + + +### GroupName + +GroupName is the name of the autoscaling group to autoscale. + + +```js +ec2Autoscale.groupName(value string) +``` + + + + +### GroupNameTag + +GroupName is the name of a tag which contains the name of the autoscaling group to autoscale. + + +```js +ec2Autoscale.groupNameTag(value string) +``` + + + + +### IncreaseCooldown + +Only one increase event can be triggered per resource every IncreaseCooldown interval. + + +```js +ec2Autoscale.increaseCooldown(value time.Duration) +``` + + + + +### Max + +The maximum scale factor to set. +If 0 then there is no upper limit. +Default: 0, a.k.a no limit. + + +```js +ec2Autoscale.max(value int64) +``` + + + + +### Min + +The minimum scale factor to set. +Default: 1 + + +```js +ec2Autoscale.min(value int64) +``` + + + + +### OutputGroupNameTag + +OutputGroupName is the name of a tag into which the group name will be written for output autoscale events. +Defaults to the value of GroupNameTag if its not empty. + + +```js +ec2Autoscale.outputGroupNameTag(value string) +``` + + + + +### Quiet + +Suppress all error logging events from this node. + + +```js +ec2Autoscale.quiet() +``` + + + + +### Replicas + +Replicas is a lambda expression that should evaluate to the desired number of replicas for the resource. + + +```js +ec2Autoscale.replicas(value ast.LambdaNode) +``` + + + + +## Chaining Methods + +Chaining methods create a new node in the pipeline as a child of the calling node. +They do not modify the calling node. +Chaining methods are marked using the `|` operator. + + +### Alert + +Create an alert node, which can trigger alerts. + + +```js +ec2Autoscale|alert() +``` + +Returns: [AlertNode](/kapacitor/v1.5/nodes/alert_node/) + + + +### Barrier + +Create a new Barrier node that emits a BarrierMessage periodically. + +One BarrierMessage will be emitted every period duration. + + +```js +ec2Autoscale|barrier() +``` + +Returns: [BarrierNode](/kapacitor/v1.5/nodes/barrier_node/) + + + +### Bottom + +Select the bottom `num` points for `field` and sort by any extra tags or fields. + + +```js +ec2Autoscale|bottom(num int64, field string, fieldsAndTags ...string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### ChangeDetect + +Create a new node that only emits new points if different from the previous point. + +```js +ec2Autoscale|changeDetect(field string) +``` + +Returns: [ChangeDetectNode](/kapacitor/v1.5/nodes/change_detect_node/) + + + +### Combine + +Combine this node with itself. The data is combined on timestamp. + + +```js +ec2Autoscale|combine(expressions ...ast.LambdaNode) +``` + +Returns: [CombineNode](/kapacitor/v1.5/nodes/combine_node/) + + + +### Count + +Count the number of points. + + +```js +ec2Autoscale|count(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### CumulativeSum + +Compute a cumulative sum of each point that is received. +A point is emitted for every point collected. + + +```js +ec2Autoscale|cumulativeSum(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Deadman + +Helper function for creating an alert on low throughput, a.k.a. deadman's switch. + +- Threshold: trigger alert if throughput drops below threshold in points/interval. +- Interval: how often to check the throughput. +- Expressions: optional list of expressions to also evaluate. Useful for time of day alerting. + +Example: + + +```js + var data = stream + |from()... + // Trigger critical alert if the throughput drops below 100 points per 10s and checked every 10s. + data + |deadman(100.0, 10s) + //Do normal processing of data + data... +``` + +The above is equivalent to this example: + + +```js + var data = stream + |from()... + // Trigger critical alert if the throughput drops below 100 points per 10s and checked every 10s. + data + |stats(10s) + .align() + |derivative('emitted') + .unit(10s) + .nonNegative() + |alert() + .id('node \'stream0\' in task \'{{ .TaskName }}\'') + .message('{{ .ID }} is {{ if eq .Level "OK" }}alive{{ else }}dead{{ end }}: {{ index .Fields "emitted" | printf "%0.3f" }} points/10s.') + .crit(lambda: "emitted" <= 100.0) + //Do normal processing of data + data... +``` + +The `id` and `message` alert properties can be configured globally via the 'deadman' configuration section. + +Since the [AlertNode](/kapacitor/v1.5/nodes/alert_node/) is the last piece it can be further modified as usual. +Example: + + +```js + var data = stream + |from()... + // Trigger critical alert if the throughput drops below 100 points per 10s and checked every 10s. + data + |deadman(100.0, 10s) + .slack() + .channel('#dead_tasks') + //Do normal processing of data + data... +``` + +You can specify additional lambda expressions to further constrain when the deadman's switch is triggered. +Example: + + +```js + var data = stream + |from()... + // Trigger critical alert if the throughput drops below 100 points per 10s and checked every 10s. + // Only trigger the alert if the time of day is between 8am-5pm. + data + |deadman(100.0, 10s, lambda: hour("time") >= 8 AND hour("time") <= 17) + //Do normal processing of data + data... +``` + + + +```js +ec2Autoscale|deadman(threshold float64, interval time.Duration, expr ...ast.LambdaNode) +``` + +Returns: [AlertNode](/kapacitor/v1.5/nodes/alert_node/) + + + +### Default + +Create a node that can set defaults for missing tags or fields. + + +```js +ec2Autoscale|default() +``` + +Returns: [DefaultNode](/kapacitor/v1.5/nodes/default_node/) + + + +### Delete + +Create a node that can delete tags or fields. + + +```js +ec2Autoscale|delete() +``` + +Returns: [DeleteNode](/kapacitor/v1.5/nodes/delete_node/) + + + +### Derivative + +Create a new node that computes the derivative of adjacent points. + + +```js +ec2Autoscale|derivative(field string) +``` + +Returns: [DerivativeNode](/kapacitor/v1.5/nodes/derivative_node/) + + + +### Difference + +Compute the difference between points independent of elapsed time. + + +```js +ec2Autoscale|difference(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Distinct + +Produce batch of only the distinct points. + + +```js +ec2Autoscale|distinct(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Ec2Autoscale + +Create a node that can trigger autoscale events for a ec2 autoscalegroup. + + +```js +ec2Autoscale|ec2Autoscale() +``` + +Returns: [Ec2AutoscaleNode](/kapacitor/v1.5/nodes/ec2_autoscale_node/) + + + +### Elapsed + +Compute the elapsed time between points. + + +```js +ec2Autoscale|elapsed(field string, unit time.Duration) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Eval + +Create an eval node that will evaluate the given transformation function to each data point. +A list of expressions may be provided and will be evaluated in the order they are given. +The results are available to later expressions. + + +```js +ec2Autoscale|eval(expressions ...ast.LambdaNode) +``` + +Returns: [EvalNode](/kapacitor/v1.5/nodes/eval_node/) + + + +### First + +Select the first point. + + +```js +ec2Autoscale|first(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Flatten + +Flatten points with similar times into a single point. + + +```js +ec2Autoscale|flatten() +``` + +Returns: [FlattenNode](/kapacitor/v1.5/nodes/flatten_node/) + + + +### GroupBy + +Group the data by a set of tags. + +Can pass literal * to group by all dimensions. +Example: + + +```js + |groupBy(*) +``` + + + +```js +ec2Autoscale|groupBy(tag ...interface{}) +``` + +Returns: [GroupByNode](/kapacitor/v1.5/nodes/group_by_node/) + + + +### HoltWinters + +Compute the Holt-Winters (https://docs.influxdata.com/influxdb/latest/query_language/functions/#holt-winters) forecast of a data set. + + +```js +ec2Autoscale|holtWinters(field string, h int64, m int64, interval time.Duration) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### HoltWintersWithFit + +Compute the Holt-Winters (https://docs.influxdata.com/influxdb/latest/query_language/functions/#holt-winters) forecast of a data set. +This method also outputs all the points used to fit the data in addition to the forecasted data. + + +```js +ec2Autoscale|holtWintersWithFit(field string, h int64, m int64, interval time.Duration) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### HttpOut + +Create an HTTP output node that caches the most recent data it has received. +The cached data is available at the given endpoint. +The endpoint is the relative path from the API endpoint of the running task. +For example, if the task endpoint is at `/kapacitor/v1/tasks/` and endpoint is +`top10`, then the data can be requested from `/kapacitor/v1/tasks//top10`. + + +```js +ec2Autoscale|httpOut(endpoint string) +``` + +Returns: [HTTPOutNode](/kapacitor/v1.5/nodes/http_out_node/) + + + +### HttpPost + +Creates an HTTP Post node that POSTS received data to the provided HTTP endpoint. +HttpPost expects 0 or 1 arguments. If 0 arguments are provided, you must specify an +endpoint property method. + + +```js +ec2Autoscale|httpPost(url ...string) +``` + +Returns: [HTTPPostNode](/kapacitor/v1.5/nodes/http_post_node/) + + + +### InfluxDBOut + +Create an influxdb output node that will store the incoming data into InfluxDB. + + +```js +ec2Autoscale|influxDBOut() +``` + +Returns: [InfluxDBOutNode](/kapacitor/v1.5/nodes/influx_d_b_out_node/) + + + +### Join + +Join this node with other nodes. The data is joined on timestamp. + + +```js +ec2Autoscale|join(others ...Node) +``` + +Returns: [JoinNode](/kapacitor/v1.5/nodes/join_node/) + + + +### K8sAutoscale + +Create a node that can trigger autoscale events for a kubernetes cluster. + + +```js +ec2Autoscale|k8sAutoscale() +``` + +Returns: [K8sAutoscaleNode](/kapacitor/v1.5/nodes/k8s_autoscale_node/) + + + +### KapacitorLoopback + +Create an kapacitor loopback node that will send data back into Kapacitor as a stream. + + +```js +ec2Autoscale|kapacitorLoopback() +``` + +Returns: [KapacitorLoopbackNode](/kapacitor/v1.5/nodes/kapacitor_loopback_node/) + + + +### Last + +Select the last point. + + +```js +ec2Autoscale|last(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Log + +Create a node that logs all data it receives. + + +```js +ec2Autoscale|log() +``` + +Returns: [LogNode](/kapacitor/v1.5/nodes/log_node/) + + + +### Mean + +Compute the mean of the data. + + +```js +ec2Autoscale|mean(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Median + +Compute the median of the data. + +> **Note:** This method is not a selector. +If you want the median point, use `.percentile(field, 50.0)`. + + +```js +ec2Autoscale|median(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Mode + +Compute the mode of the data. + + +```js +ec2Autoscale|mode(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### MovingAverage + +Compute a moving average of the last window points. +No points are emitted until the window is full. + + +```js +ec2Autoscale|movingAverage(field string, window int64) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Percentile + +Select a point at the given percentile. This is a selector function, no interpolation between points is performed. + + +```js +ec2Autoscale|percentile(field string, percentile float64) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Sample + +Create a new node that samples the incoming points or batches. + +One point will be emitted every count or duration specified. + + +```js +ec2Autoscale|sample(rate interface{}) +``` + +Returns: [SampleNode](/kapacitor/v1.5/nodes/sample_node/) + + + +### Shift + +Create a new node that shifts the incoming points or batches in time. + + +```js +ec2Autoscale|shift(shift time.Duration) +``` + +Returns: [ShiftNode](/kapacitor/v1.5/nodes/shift_node/) + + + +### Sideload + +Create a node that can load data from external sources. + + +```js +ec2Autoscale|sideload() +``` + +Returns: [SideloadNode](/kapacitor/v1.5/nodes/sideload_node/) + + + +### Spread + +Compute the difference between `min` and `max` points. + + +```js +ec2Autoscale|spread(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### StateCount + +Create a node that tracks number of consecutive points in a given state. + + +```js +ec2Autoscale|stateCount(expression ast.LambdaNode) +``` + +Returns: [StateCountNode](/kapacitor/v1.5/nodes/state_count_node/) + + + +### StateDuration + +Create a node that tracks duration in a given state. + + +```js +ec2Autoscale|stateDuration(expression ast.LambdaNode) +``` + +Returns: [StateDurationNode](/kapacitor/v1.5/nodes/state_duration_node/) + + + +### Stats + +Create a new stream of data that contains the internal statistics of the node. +The interval represents how often to emit the statistics based on real time. +This means the interval time is independent of the times of the data points the source node is receiving. + + +```js +ec2Autoscale|stats(interval time.Duration) +``` + +Returns: [StatsNode](/kapacitor/v1.5/nodes/stats_node/) + + + +### Stddev + +Compute the standard deviation. + + +```js +ec2Autoscale|stddev(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Sum + +Compute the sum of all values. + + +```js +ec2Autoscale|sum(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### SwarmAutoscale + +Create a node that can trigger autoscale events for a Docker swarm cluster. + + +```js +ec2Autoscale|swarmAutoscale() +``` + +Returns: [SwarmAutoscaleNode](/kapacitor/v1.5/nodes/swarm_autoscale_node/) + + + +### Top + +Select the top `num` points for `field` and sort by any extra tags or fields. + + +```js +ec2Autoscale|top(num int64, field string, fieldsAndTags ...string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Union + +Perform the union of this node and all other given nodes. + + +```js +ec2Autoscale|union(node ...Node) +``` + +Returns: [UnionNode](/kapacitor/v1.5/nodes/union_node/) + + + +### Where + +Create a new node that filters the data stream by a given expression. + + +```js +ec2Autoscale|where(expression ast.LambdaNode) +``` + +Returns: [WhereNode](/kapacitor/v1.5/nodes/where_node/) + + + +### Window + +Create a new node that windows the stream by time. + +NOTE: Window can only be applied to stream edges. + + +```js +ec2Autoscale|window() +``` + +Returns: [WindowNode](/kapacitor/v1.5/nodes/window_node/) + + diff --git a/content/kapacitor/v1.5/nodes/eval_node.md b/content/kapacitor/v1.5/nodes/eval_node.md new file mode 100644 index 000000000..6529b8f18 --- /dev/null +++ b/content/kapacitor/v1.5/nodes/eval_node.md @@ -0,0 +1,993 @@ +--- +title: EvalNode +description: EvalNode evaluates expressions on each data point it receives. A list of expressions may be provided and will be evaluated in the order they are given. The results of expressions are available to later expressions in the list. +note: Auto generated by tickdoc + +menu: + kapacitor_1_5_ref: + name: EvalNode + identifier: eval_node + weight: 90 + parent: nodes +--- + +The `eval` node evaluates expressions on each data point it receives. +A list of expressions may be provided and will be evaluated in the order they are given. +The results of expressions are available to later expressions in the list. +See the property [EvalNode.As](/kapacitor/v1.5/nodes/eval_node/#as) for details on how to reference the results. + +Example: + + +```js +stream + |eval(lambda: "error_count" / "total_count") + .as('error_percent') +``` + +The above example will add a new field `error_percent` to each +data point with the result of `error_count / total_count` where +`error_count` and `total_count` are existing fields on the data point. + +Available Statistics: + +* eval_errors: number of errors evaluating any expressions. + + +### Constructor + +| Chaining Method | Description | +|:---------|:---------| +| **eval ( `expressions` `...ast.LambdaNode`)** | Create an eval node that will evaluate the given transformation function to each data point. A list of expressions may be provided and will be evaluated in the order they are given. The results are available to later expressions. | + +### Property Methods + +| Setters | Description | +|:---|:---| +| **[as](#as) ( `names` `...string`)** | List of names for each expression. The expressions are evaluated in order. The result of an expression may be referenced by later expressions via the name provided. | +| **[keep](#keep) ( `fields` `...string`)** | If called the existing fields will be preserved in addition to the new fields being set. If not called then only new fields are preserved. (Tags are always preserved regardless how `keep` is used.) | +| **[quiet](#quiet) ( )** | Suppress all error logging events from this node. | +| **[tags](#tags) ( `names` `...string`)** | Convert the result of an expression into a tag. The result must be a string. Use the `string()` expression function to convert types. | + + + +### Chaining Methods +[Alert](#alert), +[Barrier](#barrier), +[Bottom](#bottom), +[ChangeDetect](#changedetect), +[Combine](#combine), +[Count](#count), +[CumulativeSum](#cumulativesum), +[Deadman](#deadman), +[Default](#default), +[Delete](#delete), +[Derivative](#derivative), +[Difference](#difference), +[Distinct](#distinct), +[Ec2Autoscale](#ec2autoscale), +[Elapsed](#elapsed), +[Eval](#eval), +[First](#first), +[Flatten](#flatten), +[GroupBy](#groupby), +[HoltWinters](#holtwinters), +[HoltWintersWithFit](#holtwinterswithfit), +[HttpOut](#httpout), +[HttpPost](#httppost), +[InfluxDBOut](#influxdbout), +[Join](#join), +[K8sAutoscale](#k8sautoscale), +[KapacitorLoopback](#kapacitorloopback), +[Last](#last), +[Log](#log), +[Max](#max), +[Mean](#mean), +[Median](#median), +[Min](#min), +[Mode](#mode), +[MovingAverage](#movingaverage), +[Percentile](#percentile), +[Sample](#sample), +[Shift](#shift), +[Sideload](#sideload), +[Spread](#spread), +[StateCount](#statecount), +[StateDuration](#stateduration), +[Stats](#stats), +[Stddev](#stddev), +[Sum](#sum), +[SwarmAutoscale](#swarmautoscale), +[Top](#top), +[Union](#union), +[Where](#where), +[Window](#window) + +--- + +## Properties + +Property methods modify state on the calling node. +They do not add another node to the pipeline, and always return a reference to the calling node. +Property methods are marked using the `.` operator. + + +### As + +List of names for each expression. +The expressions are evaluated in order. The result +of an expression may be referenced by later expressions +via the name provided. + +Example: + + +```js + stream + |eval(lambda: "value" * "value", lambda: 1.0 / "value2") + .as('value2', 'inv_value2') +``` + +The above example calculates two fields from the value and names them +`value2` and `inv_value2` respectively. + + + +```js +eval.as(names ...string) +``` + + + + +### Keep + +If called the existing fields will be preserved in addition +to the new fields being set. +If not called then only new fields are preserved. (Tags are +always preserved regardless how `keep` is used.) + +Optionally, intermediate values can be discarded +by passing a list of field names to be kept. +Only fields in the list will be retained, the rest will be discarded. +If no list is given then all fields are retained. + +Example: + + +```js + stream + |eval(lambda: "value" * "value", lambda: 1.0 / "value2") + .as('value2', 'inv_value2') + .keep('value', 'inv_value2') +``` + +In the above example the original field `value` is preserved. +The new field `value2` is calculated and used in evaluating +`inv_value2` but is discarded before the point is sent on to child nodes. +The resulting point has only two fields: `value` and `inv_value2`. + + + +```js +eval.keep(fields ...string) +``` + + + + +### Quiet + +Suppress all error logging events from this node. + + +```js +eval.quiet() +``` + + + + +### Tags + +Convert the result of an expression into a tag. +The result must be a string. +Use the `string()` expression function to convert types. + + +Example: + + +```js + stream + |eval(lambda: string(floor("value" / 10.0))) + .as('value_bucket') + .tags('value_bucket') +``` + +The above example calculates an expression from the field `value`, casts it as a string, and names it `value_bucket`. +The `value_bucket` expression is then converted from a field on the point to a tag `value_bucket` on the point. + +Example: + + +```js + stream + |eval(lambda: string(floor("value" / 10.0))) + .as('value_bucket') + .tags('value_bucket') + .keep('value') // keep the original field `value` as well +``` + +The above example calculates an expression from the field `value`, casts it as a string, and names it `value_bucket`. +The `value_bucket` expression is then converted from a field on the point to a tag `value_bucket` on the point. +The `keep` property preserves the original field `value`. +Tags are always kept since creating a tag implies you want to keep it. + + + +```js +eval.tags(names ...string) +``` + + + + +## Chaining Methods + +Chaining methods create a new node in the pipeline as a child of the calling node. +They do not modify the calling node. +Chaining methods are marked using the `|` operator. + + +### Alert + +Create an alert node, which can trigger alerts. + + +```js +eval|alert() +``` + +Returns: [AlertNode](/kapacitor/v1.5/nodes/alert_node/) + + + +### Barrier + +Create a new Barrier node that emits a BarrierMessage periodically. + +One BarrierMessage will be emitted every period duration. + + +```js +eval|barrier() +``` + +Returns: [BarrierNode](/kapacitor/v1.5/nodes/barrier_node/) + + + +### Bottom + +Select the bottom `num` points for `field` and sort by any extra tags or fields. + + +```js +eval|bottom(num int64, field string, fieldsAndTags ...string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### ChangeDetect + +Create a new node that only emits new points if different from the previous point. + +```js +eval|changeDetect(field string) +``` + +Returns: [ChangeDetectNode](/kapacitor/v1.5/nodes/change_detect_node/) + + + +### Combine + +Combine this node with itself. The data is combined on timestamp. + + +```js +eval|combine(expressions ...ast.LambdaNode) +``` + +Returns: [CombineNode](/kapacitor/v1.5/nodes/combine_node/) + + + +### Count + +Count the number of points. + + +```js +eval|count(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### CumulativeSum + +Compute a cumulative sum of each point that is received. +A point is emitted for every point collected. + + +```js +eval|cumulativeSum(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Deadman + +Helper function for creating an alert on low throughput, a.k.a. deadman's switch. + +- Threshold: trigger alert if throughput drops below threshold in points/interval. +- Interval: how often to check the throughput. +- Expressions: optional list of expressions to also evaluate. Useful for time of day alerting. + +Example: + + +```js + var data = stream + |from()... + // Trigger critical alert if the throughput drops below 100 points per 10s and checked every 10s. + data + |deadman(100.0, 10s) + //Do normal processing of data + data... +``` + +The above is equivalent to this example: + + +```js + var data = stream + |from()... + // Trigger critical alert if the throughput drops below 100 points per 10s and checked every 10s. + data + |stats(10s) + .align() + |derivative('emitted') + .unit(10s) + .nonNegative() + |alert() + .id('node \'stream0\' in task \'{{ .TaskName }}\'') + .message('{{ .ID }} is {{ if eq .Level "OK" }}alive{{ else }}dead{{ end }}: {{ index .Fields "emitted" | printf "%0.3f" }} points/10s.') + .crit(lambda: "emitted" <= 100.0) + //Do normal processing of data + data... +``` + +The `id` and `message` alert properties can be configured globally via the 'deadman' configuration section. + +Since the [AlertNode](/kapacitor/v1.5/nodes/alert_node/) is the last piece it can be further modified as usual. +Example: + + +```js + var data = stream + |from()... + // Trigger critical alert if the throughput drops below 100 points per 10s and checked every 10s. + data + |deadman(100.0, 10s) + .slack() + .channel('#dead_tasks') + //Do normal processing of data + data... +``` + +You can specify additional lambda expressions to further constrain when the deadman's switch is triggered. +Example: + + +```js + var data = stream + |from()... + // Trigger critical alert if the throughput drops below 100 points per 10s and checked every 10s. + // Only trigger the alert if the time of day is between 8am-5pm. + data + |deadman(100.0, 10s, lambda: hour("time") >= 8 AND hour("time") <= 17) + //Do normal processing of data + data... +``` + + + +```js +eval|deadman(threshold float64, interval time.Duration, expr ...ast.LambdaNode) +``` + +Returns: [AlertNode](/kapacitor/v1.5/nodes/alert_node/) + + + +### Default + +Create a node that can set defaults for missing tags or fields. + + +```js +eval|default() +``` + +Returns: [DefaultNode](/kapacitor/v1.5/nodes/default_node/) + + + +### Delete + +Create a node that can delete tags or fields. + + +```js +eval|delete() +``` + +Returns: [DeleteNode](/kapacitor/v1.5/nodes/delete_node/) + + + +### Derivative + +Create a new node that computes the derivative of adjacent points. + + +```js +eval|derivative(field string) +``` + +Returns: [DerivativeNode](/kapacitor/v1.5/nodes/derivative_node/) + + + +### Difference + +Compute the difference between points independent of elapsed time. + + +```js +eval|difference(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Distinct + +Produce batch of only the distinct points. + + +```js +eval|distinct(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Ec2Autoscale + +Create a node that can trigger autoscale events for a ec2 autoscalegroup. + + +```js +eval|ec2Autoscale() +``` + +Returns: [Ec2AutoscaleNode](/kapacitor/v1.5/nodes/ec2_autoscale_node/) + + + +### Elapsed + +Compute the elapsed time between points. + + +```js +eval|elapsed(field string, unit time.Duration) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Eval + +Create an eval node that will evaluate the given transformation function to each data point. +A list of expressions may be provided and will be evaluated in the order they are given. +The results are available to later expressions. + + +```js +eval|eval(expressions ...ast.LambdaNode) +``` + +Returns: [EvalNode](/kapacitor/v1.5/nodes/eval_node/) + + + +### First + +Select the first point. + + +```js +eval|first(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Flatten + +Flatten points with similar times into a single point. + + +```js +eval|flatten() +``` + +Returns: [FlattenNode](/kapacitor/v1.5/nodes/flatten_node/) + + + +### GroupBy + +Group the data by a set of tags. + +Can pass literal * to group by all dimensions. +Example: + + +```js + |groupBy(*) +``` + + + +```js +eval|groupBy(tag ...interface{}) +``` + +Returns: [GroupByNode](/kapacitor/v1.5/nodes/group_by_node/) + + + +### HoltWinters + +Compute the Holt-Winters (https://docs.influxdata.com/influxdb/latest/query_language/functions/#holt-winters) forecast of a data set. + + +```js +eval|holtWinters(field string, h int64, m int64, interval time.Duration) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### HoltWintersWithFit + +Compute the Holt-Winters (https://docs.influxdata.com/influxdb/latest/query_language/functions/#holt-winters) forecast of a data set. +This method also outputs all the points used to fit the data in addition to the forecasted data. + + +```js +eval|holtWintersWithFit(field string, h int64, m int64, interval time.Duration) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### HttpOut + +Create an HTTP output node that caches the most recent data it has received. +The cached data is available at the given endpoint. +The endpoint is the relative path from the API endpoint of the running task. +For example, if the task endpoint is at `/kapacitor/v1/tasks/` and endpoint is +`top10`, then the data can be requested from `/kapacitor/v1/tasks//top10`. + + +```js +eval|httpOut(endpoint string) +``` + +Returns: [HTTPOutNode](/kapacitor/v1.5/nodes/http_out_node/) + + + +### HttpPost + +Creates an HTTP Post node that POSTS received data to the provided HTTP endpoint. +HttpPost expects 0 or 1 arguments. If 0 arguments are provided, you must specify an +endpoint property method. + + +```js +eval|httpPost(url ...string) +``` + +Returns: [HTTPPostNode](/kapacitor/v1.5/nodes/http_post_node/) + + + +### InfluxDBOut + +Create an influxdb output node that will store the incoming data into InfluxDB. + + +```js +eval|influxDBOut() +``` + +Returns: [InfluxDBOutNode](/kapacitor/v1.5/nodes/influx_d_b_out_node/) + + + +### Join + +Join this node with other nodes. The data is joined on timestamp. + + +```js +eval|join(others ...Node) +``` + +Returns: [JoinNode](/kapacitor/v1.5/nodes/join_node/) + + + +### K8sAutoscale + +Create a node that can trigger autoscale events for a kubernetes cluster. + + +```js +eval|k8sAutoscale() +``` + +Returns: [K8sAutoscaleNode](/kapacitor/v1.5/nodes/k8s_autoscale_node/) + + + +### KapacitorLoopback + +Create an kapacitor loopback node that will send data back into Kapacitor as a stream. + + +```js +eval|kapacitorLoopback() +``` + +Returns: [KapacitorLoopbackNode](/kapacitor/v1.5/nodes/kapacitor_loopback_node/) + + + +### Last + +Select the last point. + + +```js +eval|last(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Log + +Create a node that logs all data it receives. + + +```js +eval|log() +``` + +Returns: [LogNode](/kapacitor/v1.5/nodes/log_node/) + + + +### Max + +Select the maximum point. + + +```js +eval|max(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Mean + +Compute the mean of the data. + + +```js +eval|mean(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Median + +Compute the median of the data. + +> **Note:** This method is not a selector. +If you want the median point, use `.percentile(field, 50.0)`. + + +```js +eval|median(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Min + +Select the minimum point. + + +```js +eval|min(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Mode + +Compute the mode of the data. + + +```js +eval|mode(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### MovingAverage + +Compute a moving average of the last window points. +No points are emitted until the window is full. + + +```js +eval|movingAverage(field string, window int64) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Percentile + +Select a point at the given percentile. This is a selector function, no interpolation between points is performed. + + +```js +eval|percentile(field string, percentile float64) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Sample + +Create a new node that samples the incoming points or batches. + +One point will be emitted every count or duration specified. + + +```js +eval|sample(rate interface{}) +``` + +Returns: [SampleNode](/kapacitor/v1.5/nodes/sample_node/) + + + +### Shift + +Create a new node that shifts the incoming points or batches in time. + + +```js +eval|shift(shift time.Duration) +``` + +Returns: [ShiftNode](/kapacitor/v1.5/nodes/shift_node/) + + + +### Sideload + +Create a node that can load data from external sources. + + +```js +eval|sideload() +``` + +Returns: [SideloadNode](/kapacitor/v1.5/nodes/sideload_node/) + + + +### Spread + +Compute the difference between `min` and `max` points. + + +```js +eval|spread(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### StateCount + +Create a node that tracks number of consecutive points in a given state. + + +```js +eval|stateCount(expression ast.LambdaNode) +``` + +Returns: [StateCountNode](/kapacitor/v1.5/nodes/state_count_node/) + + + +### StateDuration + +Create a node that tracks duration in a given state. + + +```js +eval|stateDuration(expression ast.LambdaNode) +``` + +Returns: [StateDurationNode](/kapacitor/v1.5/nodes/state_duration_node/) + + + +### Stats + +Create a new stream of data that contains the internal statistics of the node. +The interval represents how often to emit the statistics based on real time. +This means the interval time is independent of the times of the data points the source node is receiving. + + +```js +eval|stats(interval time.Duration) +``` + +Returns: [StatsNode](/kapacitor/v1.5/nodes/stats_node/) + + + +### Stddev + +Compute the standard deviation. + + +```js +eval|stddev(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Sum + +Compute the sum of all values. + + +```js +eval|sum(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### SwarmAutoscale + +Create a node that can trigger autoscale events for a Docker swarm cluster. + + +```js +eval|swarmAutoscale() +``` + +Returns: [SwarmAutoscaleNode](/kapacitor/v1.5/nodes/swarm_autoscale_node/) + + + +### Top + +Select the top `num` points for `field` and sort by any extra tags or fields. + + +```js +eval|top(num int64, field string, fieldsAndTags ...string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Union + +Perform the union of this node and all other given nodes. + + +```js +eval|union(node ...Node) +``` + +Returns: [UnionNode](/kapacitor/v1.5/nodes/union_node/) + + + +### Where + +Create a new node that filters the data stream by a given expression. + + +```js +eval|where(expression ast.LambdaNode) +``` + +Returns: [WhereNode](/kapacitor/v1.5/nodes/where_node/) + + + +### Window + +Create a new node that windows the stream by time. + +NOTE: Window can only be applied to stream edges. + + +```js +eval|window() +``` + +Returns: [WindowNode](/kapacitor/v1.5/nodes/window_node/) + + diff --git a/content/kapacitor/v1.5/nodes/flatten_node.md b/content/kapacitor/v1.5/nodes/flatten_node.md new file mode 100644 index 000000000..26a45a9dc --- /dev/null +++ b/content/kapacitor/v1.5/nodes/flatten_node.md @@ -0,0 +1,961 @@ +--- +title: FlattenNode +description: FlattenNode flattens a set of points on specific dimensions. +note: Auto generated by tickdoc + +menu: + kapacitor_1_5_ref: + name: FlattenNode + identifier: flatten_node + weight: 100 + parent: nodes +--- + +The `flatten` node flattens a set of points on specific dimensions. +For example, given two points: + +``` +m,host=A,port=80 bytes=3512 +m,host=A,port=443 bytes=6723 +``` + +Flattening the points on `port` results in a single point: + +``` +m,host=A 80.bytes=3512,443.bytes=6723 +``` + +Example: + + +```js +|flatten() + .on('port') +``` + +If flattening on multiple dimensions, the order is preserved: + +``` +m,host=A,port=80 bytes=3512 +m,host=A,port=443 bytes=6723 +m,host=B,port=443 bytes=7243 +``` + +Flattening the points on `host` and `port` would result in a single point: + +``` +m A.80.bytes=3512,A.443.bytes=6723,B.443.bytes=7243 +``` + +Example: + +```js +|flatten() + .on('host', 'port') +``` + +Since flattening points creates dynamically named fields in general it is expected +that the resultant data is passed to a UDF or similar for custom processing. + + +### Constructor + +| Chaining Method | Description | +|:---------|:---------| +| **flatten ( )** | Flatten points with similar times into a single point. | + +### Property Methods + +| Setters | Description | +|:---|:---| +| **[delimiter](#delimiter) ( `value` `string`)** | The delimiter between field name parts | +| **[dropOriginalFieldName](#droporiginalfieldname) ( `drop` `...bool`)** | DropOriginalFieldName indicates whether the original field name should be dropped when constructing the final field name. | +| **[on](#on) ( `dims` `...string`)** | Specify the dimensions on which to flatten the points. | +| **[quiet](#quiet) ( )** | Suppress all error logging events from this node. | +| **[tolerance](#tolerance) ( `value` `time.Duration`)** | The maximum duration of time that two incoming points can be apart and still be considered to be equal in time. The joined data point's time will be rounded to the nearest multiple of the tolerance duration. | + + + +### Chaining Methods +[Alert](#alert), +[Barrier](#barrier), +[Bottom](#bottom), +[ChangeDetect](#changedetect), +[Combine](#combine), +[Count](#count), +[CumulativeSum](#cumulativesum), +[Deadman](#deadman), +[Default](#default), +[Delete](#delete), +[Derivative](#derivative), +[Difference](#difference), +[Distinct](#distinct), +[Ec2Autoscale](#ec2autoscale), +[Elapsed](#elapsed), +[Eval](#eval), +[First](#first), +[Flatten](#flatten), +[GroupBy](#groupby), +[HoltWinters](#holtwinters), +[HoltWintersWithFit](#holtwinterswithfit), +[HttpOut](#httpout), +[HttpPost](#httppost), +[InfluxDBOut](#influxdbout), +[Join](#join), +[K8sAutoscale](#k8sautoscale), +[KapacitorLoopback](#kapacitorloopback), +[Last](#last), +[Log](#log), +[Max](#max), +[Mean](#mean), +[Median](#median), +[Min](#min), +[Mode](#mode), +[MovingAverage](#movingaverage), +[Percentile](#percentile), +[Sample](#sample), +[Shift](#shift), +[Sideload](#sideload), +[Spread](#spread), +[StateCount](#statecount), +[StateDuration](#stateduration), +[Stats](#stats), +[Stddev](#stddev), +[Sum](#sum), +[SwarmAutoscale](#swarmautoscale), +[Top](#top), +[Union](#union), +[Where](#where), +[Window](#window) + +--- + +## Properties + +Property methods modify state on the calling node. +They do not add another node to the pipeline, and always return a reference to the calling node. +Property methods are marked using the `.` operator. + + +### Delimiter + +The delimiter between field name parts + + +```js +flatten.delimiter(value string) +``` + + + + +### DropOriginalFieldName + +DropOriginalFieldName indicates whether the original field name should +be dropped when constructing the final field name. + + +```js +flatten.dropOriginalFieldName(drop ...bool) +``` + + + + +### On + +Specify the dimensions on which to flatten the points. + + +```js +flatten.on(dims ...string) +``` + + + + +### Quiet + +Suppress all error logging events from this node. + + +```js +flatten.quiet() +``` + + + + +### Tolerance + +The maximum duration of time that two incoming points +can be apart and still be considered to be equal in time. +The joined data point's time will be rounded to the nearest +multiple of the tolerance duration. + + +```js +flatten.tolerance(value time.Duration) +``` + + + + +## Chaining Methods + +Chaining methods create a new node in the pipeline as a child of the calling node. +They do not modify the calling node. +Chaining methods are marked using the `|` operator. + + +### Alert + +Create an alert node, which can trigger alerts. + + +```js +flatten|alert() +``` + +Returns: [AlertNode](/kapacitor/v1.5/nodes/alert_node/) + + + +### Barrier + +Create a new Barrier node that emits a BarrierMessage periodically. + +One BarrierMessage will be emitted every period duration. + + +```js +flatten|barrier() +``` + +Returns: [BarrierNode](/kapacitor/v1.5/nodes/barrier_node/) + + + +### Bottom + +Select the bottom `num` points for `field` and sort by any extra tags or fields. + + +```js +flatten|bottom(num int64, field string, fieldsAndTags ...string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### ChangeDetect + +Create a new node that only emits new points if different from the previous point. + +```js +flatten|changeDetect(field string) +``` + +Returns: [ChangeDetectNode](/kapacitor/v1.5/nodes/change_detect_node/) + + + +### Combine + +Combine this node with itself. The data is combined on timestamp. + + +```js +flatten|combine(expressions ...ast.LambdaNode) +``` + +Returns: [CombineNode](/kapacitor/v1.5/nodes/combine_node/) + + + +### Count + +Count the number of points. + + +```js +flatten|count(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### CumulativeSum + +Compute a cumulative sum of each point that is received. +A point is emitted for every point collected. + + +```js +flatten|cumulativeSum(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Deadman + +Helper function for creating an alert on low throughput, a.k.a. deadman's switch. + +- Threshold: trigger alert if throughput drops below threshold in points/interval. +- Interval: how often to check the throughput. +- Expressions: optional list of expressions to also evaluate. Useful for time of day alerting. + +Example: + + +```js + var data = stream + |from()... + // Trigger critical alert if the throughput drops below 100 points per 10s and checked every 10s. + data + |deadman(100.0, 10s) + //Do normal processing of data + data... +``` + +The above is equivalent to this example: + + +```js + var data = stream + |from()... + // Trigger critical alert if the throughput drops below 100 points per 10s and checked every 10s. + data + |stats(10s) + .align() + |derivative('emitted') + .unit(10s) + .nonNegative() + |alert() + .id('node \'stream0\' in task \'{{ .TaskName }}\'') + .message('{{ .ID }} is {{ if eq .Level "OK" }}alive{{ else }}dead{{ end }}: {{ index .Fields "emitted" | printf "%0.3f" }} points/10s.') + .crit(lambda: "emitted" <= 100.0) + //Do normal processing of data + data... +``` + +The `id` and `message` alert properties can be configured globally via the 'deadman' configuration section. + +Since the [AlertNode](/kapacitor/v1.5/nodes/alert_node/) is the last piece it can be further modified as usual. +Example: + + +```js + var data = stream + |from()... + // Trigger critical alert if the throughput drops below 100 points per 10s and checked every 10s. + data + |deadman(100.0, 10s) + .slack() + .channel('#dead_tasks') + //Do normal processing of data + data... +``` + +You can specify additional lambda expressions to further constrain when the deadman's switch is triggered. +Example: + + +```js + var data = stream + |from()... + // Trigger critical alert if the throughput drops below 100 points per 10s and checked every 10s. + // Only trigger the alert if the time of day is between 8am-5pm. + data + |deadman(100.0, 10s, lambda: hour("time") >= 8 AND hour("time") <= 17) + //Do normal processing of data + data... +``` + + + +```js +flatten|deadman(threshold float64, interval time.Duration, expr ...ast.LambdaNode) +``` + +Returns: [AlertNode](/kapacitor/v1.5/nodes/alert_node/) + + + +### Default + +Create a node that can set defaults for missing tags or fields. + + +```js +flatten|default() +``` + +Returns: [DefaultNode](/kapacitor/v1.5/nodes/default_node/) + + + +### Delete + +Create a node that can delete tags or fields. + + +```js +flatten|delete() +``` + +Returns: [DeleteNode](/kapacitor/v1.5/nodes/delete_node/) + + + +### Derivative + +Create a new node that computes the derivative of adjacent points. + + +```js +flatten|derivative(field string) +``` + +Returns: [DerivativeNode](/kapacitor/v1.5/nodes/derivative_node/) + + + +### Difference + +Compute the difference between points independent of elapsed time. + + +```js +flatten|difference(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Distinct + +Produce batch of only the distinct points. + + +```js +flatten|distinct(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Ec2Autoscale + +Create a node that can trigger autoscale events for a ec2 autoscalegroup. + + +```js +flatten|ec2Autoscale() +``` + +Returns: [Ec2AutoscaleNode](/kapacitor/v1.5/nodes/ec2_autoscale_node/) + + + +### Elapsed + +Compute the elapsed time between points. + + +```js +flatten|elapsed(field string, unit time.Duration) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Eval + +Create an eval node that will evaluate the given transformation function to each data point. +A list of expressions may be provided and will be evaluated in the order they are given. +The results are available to later expressions. + + +```js +flatten|eval(expressions ...ast.LambdaNode) +``` + +Returns: [EvalNode](/kapacitor/v1.5/nodes/eval_node/) + + + +### First + +Select the first point. + + +```js +flatten|first(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Flatten + +Flatten points with similar times into a single point. + + +```js +flatten|flatten() +``` + +Returns: [FlattenNode](/kapacitor/v1.5/nodes/flatten_node/) + + + +### GroupBy + +Group the data by a set of tags. + +Can pass literal * to group by all dimensions. +Example: + + +```js + |groupBy(*) +``` + + + +```js +flatten|groupBy(tag ...interface{}) +``` + +Returns: [GroupByNode](/kapacitor/v1.5/nodes/group_by_node/) + + + +### HoltWinters + +Compute the Holt-Winters (https://docs.influxdata.com/influxdb/latest/query_language/functions/#holt-winters) forecast of a data set. + + +```js +flatten|holtWinters(field string, h int64, m int64, interval time.Duration) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### HoltWintersWithFit + +Compute the Holt-Winters (https://docs.influxdata.com/influxdb/latest/query_language/functions/#holt-winters) forecast of a data set. +This method also outputs all the points used to fit the data in addition to the forecasted data. + + +```js +flatten|holtWintersWithFit(field string, h int64, m int64, interval time.Duration) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### HttpOut + +Create an HTTP output node that caches the most recent data it has received. +The cached data is available at the given endpoint. +The endpoint is the relative path from the API endpoint of the running task. +For example, if the task endpoint is at `/kapacitor/v1/tasks/` and endpoint is +`top10`, then the data can be requested from `/kapacitor/v1/tasks//top10`. + + +```js +flatten|httpOut(endpoint string) +``` + +Returns: [HTTPOutNode](/kapacitor/v1.5/nodes/http_out_node/) + + + +### HttpPost + +Creates an HTTP Post node that POSTS received data to the provided HTTP endpoint. +HttpPost expects 0 or 1 arguments. If 0 arguments are provided, you must specify an +endpoint property method. + + +```js +flatten|httpPost(url ...string) +``` + +Returns: [HTTPPostNode](/kapacitor/v1.5/nodes/http_post_node/) + + + +### InfluxDBOut + +Create an influxdb output node that will store the incoming data into InfluxDB. + + +```js +flatten|influxDBOut() +``` + +Returns: [InfluxDBOutNode](/kapacitor/v1.5/nodes/influx_d_b_out_node/) + + + +### Join + +Join this node with other nodes. The data is joined on timestamp. + + +```js +flatten|join(others ...Node) +``` + +Returns: [JoinNode](/kapacitor/v1.5/nodes/join_node/) + + + +### K8sAutoscale + +Create a node that can trigger autoscale events for a kubernetes cluster. + + +```js +flatten|k8sAutoscale() +``` + +Returns: [K8sAutoscaleNode](/kapacitor/v1.5/nodes/k8s_autoscale_node/) + + + +### KapacitorLoopback + +Create an kapacitor loopback node that will send data back into Kapacitor as a stream. + + +```js +flatten|kapacitorLoopback() +``` + +Returns: [KapacitorLoopbackNode](/kapacitor/v1.5/nodes/kapacitor_loopback_node/) + + + +### Last + +Select the last point. + + +```js +flatten|last(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Log + +Create a node that logs all data it receives. + + +```js +flatten|log() +``` + +Returns: [LogNode](/kapacitor/v1.5/nodes/log_node/) + + + +### Max + +Select the maximum point. + + +```js +flatten|max(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Mean + +Compute the mean of the data. + + +```js +flatten|mean(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Median + +Compute the median of the data. + +> **Note:** This method is not a selector. +If you want the median point, use `.percentile(field, 50.0)`. + + +```js +flatten|median(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Min + +Select the minimum point. + + +```js +flatten|min(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Mode + +Compute the mode of the data. + + +```js +flatten|mode(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### MovingAverage + +Compute a moving average of the last window points. +No points are emitted until the window is full. + + +```js +flatten|movingAverage(field string, window int64) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Percentile + +Select a point at the given percentile. This is a selector function, no interpolation between points is performed. + + +```js +flatten|percentile(field string, percentile float64) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Sample + +Create a new node that samples the incoming points or batches. + +One point will be emitted every count or duration specified. + + +```js +flatten|sample(rate interface{}) +``` + +Returns: [SampleNode](/kapacitor/v1.5/nodes/sample_node/) + + + +### Shift + +Create a new node that shifts the incoming points or batches in time. + + +```js +flatten|shift(shift time.Duration) +``` + +Returns: [ShiftNode](/kapacitor/v1.5/nodes/shift_node/) + + + +### Sideload + +Create a node that can load data from external sources. + + +```js +flatten|sideload() +``` + +Returns: [SideloadNode](/kapacitor/v1.5/nodes/sideload_node/) + + + +### Spread + +Compute the difference between `min` and `max` points. + + +```js +flatten|spread(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### StateCount + +Create a node that tracks number of consecutive points in a given state. + + +```js +flatten|stateCount(expression ast.LambdaNode) +``` + +Returns: [StateCountNode](/kapacitor/v1.5/nodes/state_count_node/) + + + +### StateDuration + +Create a node that tracks duration in a given state. + + +```js +flatten|stateDuration(expression ast.LambdaNode) +``` + +Returns: [StateDurationNode](/kapacitor/v1.5/nodes/state_duration_node/) + + + +### Stats + +Create a new stream of data that contains the internal statistics of the node. +The interval represents how often to emit the statistics based on real time. +This means the interval time is independent of the times of the data points the source node is receiving. + + +```js +flatten|stats(interval time.Duration) +``` + +Returns: [StatsNode](/kapacitor/v1.5/nodes/stats_node/) + + + +### Stddev + +Compute the standard deviation. + + +```js +flatten|stddev(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Sum + +Compute the sum of all values. + + +```js +flatten|sum(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### SwarmAutoscale + +Create a node that can trigger autoscale events for a Docker swarm cluster. + + +```js +flatten|swarmAutoscale() +``` + +Returns: [SwarmAutoscaleNode](/kapacitor/v1.5/nodes/swarm_autoscale_node/) + + + +### Top + +Select the top `num` points for `field` and sort by any extra tags or fields. + + +```js +flatten|top(num int64, field string, fieldsAndTags ...string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Union + +Perform the union of this node and all other given nodes. + + +```js +flatten|union(node ...Node) +``` + +Returns: [UnionNode](/kapacitor/v1.5/nodes/union_node/) + + + +### Where + +Create a new node that filters the data stream by a given expression. + + +```js +flatten|where(expression ast.LambdaNode) +``` + +Returns: [WhereNode](/kapacitor/v1.5/nodes/where_node/) + + + +### Window + +Create a new node that windows the stream by time. + +NOTE: Window can only be applied to stream edges. + + +```js +flatten|window() +``` + +Returns: [WindowNode](/kapacitor/v1.5/nodes/window_node/) + + diff --git a/content/kapacitor/v1.5/nodes/from_node.md b/content/kapacitor/v1.5/nodes/from_node.md new file mode 100644 index 000000000..cef670684 --- /dev/null +++ b/content/kapacitor/v1.5/nodes/from_node.md @@ -0,0 +1,1119 @@ +--- +title: FromNode +description: FromNode selects a subset of the data flowing through a StreamNode. You can select which portion of the stream you want to process. +note: Auto generated by tickdoc + +menu: + kapacitor_1_5_ref: + name: FromNode + identifier: from_node + weight: 110 + parent: nodes +--- + +The `from` node selects a subset of the data flowing through a [StreamNode.](/kapacitor/v1.5/nodes/stream_node/) +The stream node allows you to select which portion of the stream you want to process. + +Example: + + +```js +stream + |from() + .database('mydb') + .retentionPolicy('myrp') + .measurement('mymeasurement') + .where(lambda: "host" =~ /logger\d+/) + |window() + ... +``` + +The above example selects only data points from the database `mydb` +and retention policy `myrp` and measurement `mymeasurement` where +the tag `host` matches the regex `logger\d+`. + + +### Constructor + +| Chaining Method | Description | +|:---------|:---------| +| **from ( )** | Creates a new stream node that can be further filtered using the Database, RetentionPolicy, Measurement and Where properties. From can be called multiple times to create multiple independent forks of the data stream. | + +### Property Methods + +| Setters | Description | +|:---|:---| +| **[database](#database) ( `value` `string`)** | The database name. If empty any database will be used. | +| **[groupBy](#groupby) ( `tag` `...interface{}`)** | Group the data by a set of tags. | +| **[groupByMeasurement](#groupbymeasurement) ( )** | If set will include the measurement name in the group ID. Along with any other group by dimensions. | +| **[measurement](#measurement) ( `value` `string`)** | The measurement name If empty any measurement will be used. | +| **[quiet](#quiet) ( )** | Suppress all error logging events from this node. | +| **[retentionPolicy](#retentionpolicy) ( `value` `string`)** | The retention policy name If empty any retention policy will be used. | +| **[round](#round) ( `value` `time.Duration`)** | Optional duration for rounding timestamps. Helpful to ensure data points land on specific boundaries Example: stream |from() .measurement('mydata') .round(1s) | +| **[truncate](#truncate) ( `value` `time.Duration`)** | Optional duration for truncating timestamps. Helpful to ensure data points land on specific boundaries Example: stream |from() .measurement('mydata') .truncate(1s) | +| **[where](#where) ( `lambda` `ast.LambdaNode`)** | Filter the current stream using the given expression. This expression is a Kapacitor expression. Kapacitor expressions are a superset of InfluxQL WHERE expressions. See the [expression](https://docs.influxdata.com/kapacitor/latest/tick/expr/) docs for more information. | + + + +### Chaining Methods +[Alert](#alert), +[Barrier](#barrier), +[Bottom](#bottom), +[ChangeDetect](#changedetect), +[Combine](#combine), +[Count](#count), +[CumulativeSum](#cumulativesum), +[Deadman](#deadman), +[Default](#default), +[Delete](#delete), +[Derivative](#derivative), +[Difference](#difference), +[Distinct](#distinct), +[Ec2Autoscale](#ec2autoscale), +[Elapsed](#elapsed), +[Eval](#eval), +[First](#first), +[Flatten](#flatten), +[From](#from), +[HoltWinters](#holtwinters), +[HoltWintersWithFit](#holtwinterswithfit), +[HttpOut](#httpout), +[HttpPost](#httppost), +[InfluxDBOut](#influxdbout), +[Join](#join), +[K8sAutoscale](#k8sautoscale), +[KapacitorLoopback](#kapacitorloopback), +[Last](#last), +[Log](#log), +[Max](#max), +[Mean](#mean), +[Median](#median), +[Min](#min), +[Mode](#mode), +[MovingAverage](#movingaverage), +[Percentile](#percentile), +[Sample](#sample), +[Shift](#shift), +[Sideload](#sideload), +[Spread](#spread), +[StateCount](#statecount), +[StateDuration](#stateduration), +[Stats](#stats), +[Stddev](#stddev), +[Sum](#sum), +[SwarmAutoscale](#swarmautoscale), +[Top](#top), +[Union](#union), +[Window](#window) + +--- + +## Properties + +Property methods modify state on the calling node. +They do not add another node to the pipeline, and always return a reference to the calling node. +Property methods are marked using the `.` operator. + + +### Database + +The database name. +If empty any database will be used. + + +```js +from.database(value string) +``` + + + + +### GroupBy + +Group the data by a set of tags. + +Can pass literal * to group by all dimensions. +Example: + + +```js + stream + |from() + .groupBy(*) +``` + + + +```js +from.groupBy(tag ...interface{}) +``` + + + + +### GroupByMeasurement + +If set will include the measurement name in the group ID. +Along with any other group by dimensions. + +Example: + + +```js + stream + |from() + .database('mydb') + .groupByMeasurement() + .groupBy('host') +``` + +The above example selects all measurements from the database 'mydb' and +then each point is grouped by the host tag and measurement name. +Thus keeping measurements in their own groups. + + +```js +from.groupByMeasurement() +``` + + + + +### Measurement + +The measurement name +If empty any measurement will be used. + + +```js +from.measurement(value string) +``` + + + + +### Quiet + +Suppress all error logging events from this node. + + +```js +from.quiet() +``` + + + + +### RetentionPolicy + +The retention policy name +If empty any retention policy will be used. + + +```js +from.retentionPolicy(value string) +``` + + + + +### Round + +Optional duration for rounding timestamps. +Helpful to ensure data points land on specific boundaries +Example: + + +```js + stream + |from() + .measurement('mydata') + .round(1s) +``` + +All incoming data will be rounded to the nearest 1 second boundary. + + +```js +from.round(value time.Duration) +``` + + + + +### Truncate + +Optional duration for truncating timestamps. +Helpful to ensure data points land on specific boundaries +Example: + + +```js + stream + |from() + .measurement('mydata') + .truncate(1s) +``` + +All incoming data will be truncated to 1 second resolution. + + +```js +from.truncate(value time.Duration) +``` + + + + +### Where + +Filter the current stream using the given expression. +This expression is a Kapacitor expression. Kapacitor +expressions are a superset of InfluxQL WHERE expressions. +See the [expression](https://docs.influxdata.com/kapacitor/latest/tick/expr/) docs for more information. + +Multiple calls to the Where method will `AND` together each expression. + +Example: + + +```js + stream + |from() + .where(lambda: condition1) + .where(lambda: condition2) +``` + +The above is equivalent to this example: + + +```js + stream + |from() + .where(lambda: condition1 AND condition2) +``` + + +NOTE: Becareful to always use `|from` if you want multiple different streams. + +Example: + + +```js + var data = stream + |from() + .measurement('cpu') + var total = data + .where(lambda: "cpu" == 'cpu-total') + var others = data + .where(lambda: "cpu" != 'cpu-total') +``` + +The example above is equivalent to the example below, +which is obviously not what was intended. + +Example: + + +```js + var data = stream + |from() + .measurement('cpu') + .where(lambda: "cpu" == 'cpu-total' AND "cpu" != 'cpu-total') + var total = data + var others = total +``` + +The example below will create two different streams each selecting +a different subset of the original stream. + +Example: + + +```js + var data = stream + |from() + .measurement('cpu') + var total = stream + |from() + .measurement('cpu') + .where(lambda: "cpu" == 'cpu-total') + var others = stream + |from() + .measurement('cpu') + .where(lambda: "cpu" != 'cpu-total') +``` + + +If empty then all data points are considered to match. + + +```js +from.where(lambda ast.LambdaNode) +``` + + + + +## Chaining Methods + +Chaining methods create a new node in the pipeline as a child of the calling node. +They do not modify the calling node. +Chaining methods are marked using the `|` operator. + + +### Alert + +Create an alert node, which can trigger alerts. + + +```js +from|alert() +``` + +Returns: [AlertNode](/kapacitor/v1.5/nodes/alert_node/) + + + +### Barrier + +Create a new Barrier node that emits a BarrierMessage periodically. + +One BarrierMessage will be emitted every period duration. + + +```js +from|barrier() +``` + +Returns: [BarrierNode](/kapacitor/v1.5/nodes/barrier_node/) + + + +### Bottom + +Select the bottom `num` points for `field` and sort by any extra tags or fields. + + +```js +from|bottom(num int64, field string, fieldsAndTags ...string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### ChangeDetect + +Create a new node that only emits new points if different from the previous point. + +```js +from|changeDetect(field string) +``` + +Returns: [ChangeDetectNode](/kapacitor/v1.5/nodes/change_detect_node/) + + + +### Combine + +Combine this node with itself. The data is combined on timestamp. + + +```js +from|combine(expressions ...ast.LambdaNode) +``` + +Returns: [CombineNode](/kapacitor/v1.5/nodes/combine_node/) + + + +### Count + +Count the number of points. + + +```js +from|count(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### CumulativeSum + +Compute a cumulative sum of each point that is received. +A point is emitted for every point collected. + + +```js +from|cumulativeSum(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Deadman + +Helper function for creating an alert on low throughput, a.k.a. deadman's switch. + +- Threshold: trigger alert if throughput drops below threshold in points/interval. +- Interval: how often to check the throughput. +- Expressions: optional list of expressions to also evaluate. Useful for time of day alerting. + +Example: + + +```js + var data = stream + |from()... + // Trigger critical alert if the throughput drops below 100 points per 10s and checked every 10s. + data + |deadman(100.0, 10s) + //Do normal processing of data + data... +``` + +The above is equivalent to this example: + + +```js + var data = stream + |from()... + // Trigger critical alert if the throughput drops below 100 points per 10s and checked every 10s. + data + |stats(10s) + .align() + |derivative('emitted') + .unit(10s) + .nonNegative() + |alert() + .id('node \'stream0\' in task \'{{ .TaskName }}\'') + .message('{{ .ID }} is {{ if eq .Level "OK" }}alive{{ else }}dead{{ end }}: {{ index .Fields "emitted" | printf "%0.3f" }} points/10s.') + .crit(lambda: "emitted" <= 100.0) + //Do normal processing of data + data... +``` + +The `id` and `message` alert properties can be configured globally via the 'deadman' configuration section. + +Since the [AlertNode](/kapacitor/v1.5/nodes/alert_node/) is the last piece it can be further modified as usual. +Example: + + +```js + var data = stream + |from()... + // Trigger critical alert if the throughput drops below 100 points per 10s and checked every 10s. + data + |deadman(100.0, 10s) + .slack() + .channel('#dead_tasks') + //Do normal processing of data + data... +``` + +You can specify additional lambda expressions to further constrain when the deadman's switch is triggered. +Example: + + +```js + var data = stream + |from()... + // Trigger critical alert if the throughput drops below 100 points per 10s and checked every 10s. + // Only trigger the alert if the time of day is between 8am-5pm. + data + |deadman(100.0, 10s, lambda: hour("time") >= 8 AND hour("time") <= 17) + //Do normal processing of data + data... +``` + + + +```js +from|deadman(threshold float64, interval time.Duration, expr ...ast.LambdaNode) +``` + +Returns: [AlertNode](/kapacitor/v1.5/nodes/alert_node/) + + + +### Default + +Create a node that can set defaults for missing tags or fields. + + +```js +from|default() +``` + +Returns: [DefaultNode](/kapacitor/v1.5/nodes/default_node/) + + + +### Delete + +Create a node that can delete tags or fields. + + +```js +from|delete() +``` + +Returns: [DeleteNode](/kapacitor/v1.5/nodes/delete_node/) + + + +### Derivative + +Create a new node that computes the derivative of adjacent points. + + +```js +from|derivative(field string) +``` + +Returns: [DerivativeNode](/kapacitor/v1.5/nodes/derivative_node/) + + + +### Difference + +Compute the difference between points independent of elapsed time. + + +```js +from|difference(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Distinct + +Produce batch of only the distinct points. + + +```js +from|distinct(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Ec2Autoscale + +Create a node that can trigger autoscale events for a ec2 autoscalegroup. + + +```js +from|ec2Autoscale() +``` + +Returns: [Ec2AutoscaleNode](/kapacitor/v1.5/nodes/ec2_autoscale_node/) + + + +### Elapsed + +Compute the elapsed time between points. + + +```js +from|elapsed(field string, unit time.Duration) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Eval + +Create an eval node that will evaluate the given transformation function to each data point. +A list of expressions may be provided and will be evaluated in the order they are given. +The results are available to later expressions. + + +```js +from|eval(expressions ...ast.LambdaNode) +``` + +Returns: [EvalNode](/kapacitor/v1.5/nodes/eval_node/) + + + +### First + +Select the first point. + + +```js +from|first(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Flatten + +Flatten points with similar times into a single point. + + +```js +from|flatten() +``` + +Returns: [FlattenNode](/kapacitor/v1.5/nodes/flatten_node/) + + + +### From + +Creates a new stream node that can be further +filtered using the Database, RetentionPolicy, Measurement and Where properties. +From can be called multiple times to create multiple +independent forks of the data stream. + +Example: + + +```js + // Select the 'cpu' measurement from just the database 'mydb' + // and retention policy 'myrp'. + var cpu = stream + |from() + .database('mydb') + .retentionPolicy('myrp') + .measurement('cpu') + // Select the 'load' measurement from any database and retention policy. + var load = stream + |from() + .measurement('load') + // Join cpu and load streams and do further processing. + cpu + |join(load) + .as('cpu', 'load') + ... +``` + + + +```js +from|from() +``` + +Returns: [FromNode](/kapacitor/v1.5/nodes/from_node/) + + + +### HoltWinters + +Compute the Holt-Winters (https://docs.influxdata.com/influxdb/latest/query_language/functions/#holt-winters) forecast of a data set. + + +```js +from|holtWinters(field string, h int64, m int64, interval time.Duration) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### HoltWintersWithFit + +Compute the Holt-Winters (https://docs.influxdata.com/influxdb/latest/query_language/functions/#holt-winters) forecast of a data set. +This method also outputs all the points used to fit the data in addition to the forecasted data. + + +```js +from|holtWintersWithFit(field string, h int64, m int64, interval time.Duration) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### HttpOut + +Create an HTTP output node that caches the most recent data it has received. +The cached data is available at the given endpoint. +The endpoint is the relative path from the API endpoint of the running task. +For example, if the task endpoint is at `/kapacitor/v1/tasks/` and endpoint is +`top10`, then the data can be requested from `/kapacitor/v1/tasks//top10`. + + +```js +from|httpOut(endpoint string) +``` + +Returns: [HTTPOutNode](/kapacitor/v1.5/nodes/http_out_node/) + + + +### HttpPost + +Creates an HTTP Post node that POSTS received data to the provided HTTP endpoint. +HttpPost expects 0 or 1 arguments. If 0 arguments are provided, you must specify an +endpoint property method. + + +```js +from|httpPost(url ...string) +``` + +Returns: [HTTPPostNode](/kapacitor/v1.5/nodes/http_post_node/) + + + +### InfluxDBOut + +Create an influxdb output node that will store the incoming data into InfluxDB. + + +```js +from|influxDBOut() +``` + +Returns: [InfluxDBOutNode](/kapacitor/v1.5/nodes/influx_d_b_out_node/) + + + +### Join + +Join this node with other nodes. The data is joined on timestamp. + + +```js +from|join(others ...Node) +``` + +Returns: [JoinNode](/kapacitor/v1.5/nodes/join_node/) + + + +### K8sAutoscale + +Create a node that can trigger autoscale events for a kubernetes cluster. + + +```js +from|k8sAutoscale() +``` + +Returns: [K8sAutoscaleNode](/kapacitor/v1.5/nodes/k8s_autoscale_node/) + + + +### KapacitorLoopback + +Create an kapacitor loopback node that will send data back into Kapacitor as a stream. + + +```js +from|kapacitorLoopback() +``` + +Returns: [KapacitorLoopbackNode](/kapacitor/v1.5/nodes/kapacitor_loopback_node/) + + + +### Last + +Select the last point. + + +```js +from|last(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Log + +Create a node that logs all data it receives. + + +```js +from|log() +``` + +Returns: [LogNode](/kapacitor/v1.5/nodes/log_node/) + + + +### Max + +Select the maximum point. + + +```js +from|max(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Mean + +Compute the mean of the data. + + +```js +from|mean(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Median + +Compute the median of the data. + +> **Note:** This method is not a selector. +If you want the median point, use `.percentile(field, 50.0)`. + + +```js +from|median(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Min + +Select the minimum point. + + +```js +from|min(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Mode + +Compute the mode of the data. + + +```js +from|mode(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### MovingAverage + +Compute a moving average of the last window points. +No points are emitted until the window is full. + + +```js +from|movingAverage(field string, window int64) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Percentile + +Select a point at the given percentile. This is a selector function, no interpolation between points is performed. + + +```js +from|percentile(field string, percentile float64) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Sample + +Create a new node that samples the incoming points or batches. + +One point will be emitted every count or duration specified. + + +```js +from|sample(rate interface{}) +``` + +Returns: [SampleNode](/kapacitor/v1.5/nodes/sample_node/) + + + +### Shift + +Create a new node that shifts the incoming points or batches in time. + + +```js +from|shift(shift time.Duration) +``` + +Returns: [ShiftNode](/kapacitor/v1.5/nodes/shift_node/) + + + +### Sideload + +Create a node that can load data from external sources. + + +```js +from|sideload() +``` + +Returns: [SideloadNode](/kapacitor/v1.5/nodes/sideload_node/) + + + +### Spread + +Compute the difference between `min` and `max` points. + + +```js +from|spread(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### StateCount + +Create a node that tracks number of consecutive points in a given state. + + +```js +from|stateCount(expression ast.LambdaNode) +``` + +Returns: [StateCountNode](/kapacitor/v1.5/nodes/state_count_node/) + + + +### StateDuration + +Create a node that tracks duration in a given state. + + +```js +from|stateDuration(expression ast.LambdaNode) +``` + +Returns: [StateDurationNode](/kapacitor/v1.5/nodes/state_duration_node/) + + + +### Stats + +Create a new stream of data that contains the internal statistics of the node. +The interval represents how often to emit the statistics based on real time. +This means the interval time is independent of the times of the data points the source node is receiving. + + +```js +from|stats(interval time.Duration) +``` + +Returns: [StatsNode](/kapacitor/v1.5/nodes/stats_node/) + + + +### Stddev + +Compute the standard deviation. + + +```js +from|stddev(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Sum + +Compute the sum of all values. + + +```js +from|sum(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### SwarmAutoscale + +Create a node that can trigger autoscale events for a Docker swarm cluster. + + +```js +from|swarmAutoscale() +``` + +Returns: [SwarmAutoscaleNode](/kapacitor/v1.5/nodes/swarm_autoscale_node/) + + + +### Top + +Select the top `num` points for `field` and sort by any extra tags or fields. + + +```js +from|top(num int64, field string, fieldsAndTags ...string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Union + +Perform the union of this node and all other given nodes. + + +```js +from|union(node ...Node) +``` + +Returns: [UnionNode](/kapacitor/v1.5/nodes/union_node/) + + + +### Window + +Create a new node that windows the stream by time. + +NOTE: Window can only be applied to stream edges. + + +```js +from|window() +``` + +Returns: [WindowNode](/kapacitor/v1.5/nodes/window_node/) + + diff --git a/content/kapacitor/v1.5/nodes/group_by_node.md b/content/kapacitor/v1.5/nodes/group_by_node.md new file mode 100644 index 000000000..1bf43ba35 --- /dev/null +++ b/content/kapacitor/v1.5/nodes/group_by_node.md @@ -0,0 +1,929 @@ +--- +title: GroupByNode +description: GroupByNode groups incoming data. Each group is then processed independently for the rest of the pipeline. Only tags that are dimensions in the grouping will be preserved; all other tags are dropped. +note: Auto generated by tickdoc + +menu: + kapacitor_1_5_ref: + name: GroupByNode + identifier: group_by_node + weight: 120 + parent: nodes +--- + +The `groupBy` node will group the incoming data. +Each group is then processed independently for the rest of the pipeline. + +#### groupBy with aggregated data +When using `groupBy` with aggregated data, only tags that are dimensions in the grouping are preserved. +All other tags are dropped. With data that is not being aggregated, all tags are preserved. + +Example: + + +```js +stream + |groupBy('service', 'datacenter') + ... +``` + +The above example groups the data along two dimensions `service` and `datacenter`. +Groups are dynamically created as new data arrives and each group is processed +independently. + + +### Constructor + +| Chaining Method | Description | +|:---------|:---------| +| **groupBy ( `tag` `...interface{}`)** | Group the data by a set of tags. | + +### Property Methods + +| Setters | Description | +|:---|:---| +| **[byMeasurement](#bymeasurement) ( )** | If set will include the measurement name in the group ID. Along with any other group by dimensions. | +| **[quiet](#quiet) ( )** | Suppress all error logging events from this node. | + + + +### Chaining Methods +[Alert](#alert), +[Barrier](#barrier), +[Bottom](#bottom), +[ChangeDetect](#changedetect), +[Combine](#combine), +[Count](#count), +[CumulativeSum](#cumulativesum), +[Deadman](#deadman), +[Default](#default), +[Delete](#delete), +[Derivative](#derivative), +[Difference](#difference), +[Distinct](#distinct), +[Ec2Autoscale](#ec2autoscale), +[Elapsed](#elapsed), +[Eval](#eval), +[Exclude](#exclude), +[First](#first), +[Flatten](#flatten), +[GroupBy](#groupby), +[HoltWinters](#holtwinters), +[HoltWintersWithFit](#holtwinterswithfit), +[HttpOut](#httpout), +[HttpPost](#httppost), +[InfluxDBOut](#influxdbout), +[Join](#join), +[K8sAutoscale](#k8sautoscale), +[KapacitorLoopback](#kapacitorloopback), +[Last](#last), +[Log](#log), +[Max](#max), +[Mean](#mean), +[Median](#median), +[Min](#min), +[Mode](#mode), +[MovingAverage](#movingaverage), +[Percentile](#percentile), +[Sample](#sample), +[Shift](#shift), +[Sideload](#sideload), +[Spread](#spread), +[StateCount](#statecount), +[StateDuration](#stateduration), +[Stats](#stats), +[Stddev](#stddev), +[Sum](#sum), +[SwarmAutoscale](#swarmautoscale), +[Top](#top), +[Union](#union), +[Where](#where), +[Window](#window) + +--- + +## Properties + +Property methods modify state on the calling node. +They do not add another node to the pipeline, and always return a reference to the calling node. +Property methods are marked using the `.` operator. + + +### ByMeasurement + +If set will include the measurement name in the group ID. +Along with any other group by dimensions. + +Example: + + +```js + ... + |groupBy('host') + .byMeasurement() +``` + +The above example groups points by their host tag and measurement name. + +If you want to remove the measurement name from the group ID, +then groupBy all existing dimensions but without specifying 'byMeasurement'. + +Example: + + +```js + |groupBy(*) +``` + +The above removes the group by measurement name if any. + + +```js +groupBy.byMeasurement() +``` + + + + +### Quiet + +Suppress all error logging events from this node. + +```js +groupBy.quiet() +``` + + + + +## Chaining Methods + +Chaining methods create a new node in the pipeline as a child of the calling node. +They do not modify the calling node. +Chaining methods are marked using the `|` operator. + + +### Alert + +Create an alert node, which can trigger alerts. + + +```js +groupBy|alert() +``` + +Returns: [AlertNode](/kapacitor/v1.5/nodes/alert_node/) + + + +### Barrier + +Create a new Barrier node that emits a BarrierMessage periodically. + +One BarrierMessage will be emitted every period duration. + + +```js +groupBy|barrier() +``` + +Returns: [BarrierNode](/kapacitor/v1.5/nodes/barrier_node/) + + + +### Bottom + +Select the bottom `num` points for `field` and sort by any extra tags or fields. + + +```js +groupBy|bottom(num int64, field string, fieldsAndTags ...string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### ChangeDetect + +Create a new node that only emits new points if different from the previous point. + +```js +groupBy|changeDetect(field string) +``` + +Returns: [ChangeDetectNode](/kapacitor/v1.5/nodes/change_detect_node/) + + + +### Combine + +Combine this node with itself. The data is combined on timestamp. + + +```js +groupBy|combine(expressions ...ast.LambdaNode) +``` + +Returns: [CombineNode](/kapacitor/v1.5/nodes/combine_node/) + + + +### Count + +Count the number of points. + + +```js +groupBy|count(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### CumulativeSum + +Compute a cumulative sum of each point that is received. +A point is emitted for every point collected. + + +```js +groupBy|cumulativeSum(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Deadman + +Helper function for creating an alert on low throughput, a.k.a. deadman's switch. + +- Threshold: trigger alert if throughput drops below threshold in points/interval. +- Interval: how often to check the throughput. +- Expressions: optional list of expressions to also evaluate. Useful for time of day alerting. + +Example: + + +```js + var data = stream + |from()... + // Trigger critical alert if the throughput drops below 100 points per 10s and checked every 10s. + data + |deadman(100.0, 10s) + //Do normal processing of data + data... +``` + +The above is equivalent to this example: + + +```js + var data = stream + |from()... + // Trigger critical alert if the throughput drops below 100 points per 10s and checked every 10s. + data + |stats(10s) + .align() + |derivative('emitted') + .unit(10s) + .nonNegative() + |alert() + .id('node \'stream0\' in task \'{{ .TaskName }}\'') + .message('{{ .ID }} is {{ if eq .Level "OK" }}alive{{ else }}dead{{ end }}: {{ index .Fields "emitted" | printf "%0.3f" }} points/10s.') + .crit(lambda: "emitted" <= 100.0) + //Do normal processing of data + data... +``` + +The `id` and `message` alert properties can be configured globally via the 'deadman' configuration section. + +Since the [AlertNode](/kapacitor/v1.5/nodes/alert_node/) is the last piece it can be further modified as usual. +Example: + + +```js + var data = stream + |from()... + // Trigger critical alert if the throughput drops below 100 points per 10s and checked every 10s. + data + |deadman(100.0, 10s) + .slack() + .channel('#dead_tasks') + //Do normal processing of data + data... +``` + +You can specify additional lambda expressions to further constrain when the deadman's switch is triggered. +Example: + + +```js + var data = stream + |from()... + // Trigger critical alert if the throughput drops below 100 points per 10s and checked every 10s. + // Only trigger the alert if the time of day is between 8am-5pm. + data + |deadman(100.0, 10s, lambda: hour("time") >= 8 AND hour("time") <= 17) + //Do normal processing of data + data... +``` + + + +```js +groupBy|deadman(threshold float64, interval time.Duration, expr ...ast.LambdaNode) +``` + +Returns: [AlertNode](/kapacitor/v1.5/nodes/alert_node/) + + + +### Default + +Create a node that can set defaults for missing tags or fields. + + +```js +groupBy|default() +``` + +Returns: [DefaultNode](/kapacitor/v1.5/nodes/default_node/) + + + +### Delete + +Create a node that can delete tags or fields. + + +```js +groupBy|delete() +``` + +Returns: [DeleteNode](/kapacitor/v1.5/nodes/delete_node/) + + + +### Derivative + +Create a new node that computes the derivative of adjacent points. + + +```js +groupBy|derivative(field string) +``` + +Returns: [DerivativeNode](/kapacitor/v1.5/nodes/derivative_node/) + + + +### Difference + +Compute the difference between points independent of elapsed time. + + +```js +groupBy|difference(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Distinct + +Produce batch of only the distinct points. + + +```js +groupBy|distinct(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Ec2Autoscale + +Create a node that can trigger autoscale events for a ec2 autoscalegroup. + + +```js +groupBy|ec2Autoscale() +``` + +Returns: [Ec2AutoscaleNode](/kapacitor/v1.5/nodes/ec2_autoscale_node/) + + + +### Elapsed + +Compute the elapsed time between points. + + +```js +groupBy|elapsed(field string, unit time.Duration) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Eval + +Create an eval node that will evaluate the given transformation function to each data point. +A list of expressions may be provided and will be evaluated in the order they are given. +The results are available to later expressions. + + +```js +groupBy|eval(expressions ...ast.LambdaNode) +``` + +Returns: [EvalNode](/kapacitor/v1.5/nodes/eval_node/) + + + +### Exclude + +Exclude removes any tags from the group. + + +```js +groupBy|exclude(dims ...string) +``` + +Returns: [GroupByNode](/kapacitor/v1.5/nodes/group_by_node/) + + + +### First + +Select the first point. + + +```js +groupBy|first(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Flatten + +Flatten points with similar times into a single point. + + +```js +groupBy|flatten() +``` + +Returns: [FlattenNode](/kapacitor/v1.5/nodes/flatten_node/) + + + +### GroupBy + +Group the data by a set of tags. + +Can pass literal * to group by all dimensions. +Example: + + +```js + |groupBy(*) +``` + + + +```js +groupBy|groupBy(tag ...interface{}) +``` + +Returns: [GroupByNode](/kapacitor/v1.5/nodes/group_by_node/) + + + +### HoltWinters + +Compute the Holt-Winters (https://docs.influxdata.com/influxdb/latest/query_language/functions/#holt-winters) forecast of a data set. + + +```js +groupBy|holtWinters(field string, h int64, m int64, interval time.Duration) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### HoltWintersWithFit + +Compute the Holt-Winters (https://docs.influxdata.com/influxdb/latest/query_language/functions/#holt-winters) forecast of a data set. +This method also outputs all the points used to fit the data in addition to the forecasted data. + + +```js +groupBy|holtWintersWithFit(field string, h int64, m int64, interval time.Duration) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### HttpOut + +Create an HTTP output node that caches the most recent data it has received. +The cached data is available at the given endpoint. +The endpoint is the relative path from the API endpoint of the running task. +For example, if the task endpoint is at `/kapacitor/v1/tasks/` and endpoint is +`top10`, then the data can be requested from `/kapacitor/v1/tasks//top10`. + + +```js +groupBy|httpOut(endpoint string) +``` + +Returns: [HTTPOutNode](/kapacitor/v1.5/nodes/http_out_node/) + + + +### HttpPost + +Creates an HTTP Post node that POSTS received data to the provided HTTP endpoint. +HttpPost expects 0 or 1 arguments. If 0 arguments are provided, you must specify an +endpoint property method. + + +```js +groupBy|httpPost(url ...string) +``` + +Returns: [HTTPPostNode](/kapacitor/v1.5/nodes/http_post_node/) + + + +### InfluxDBOut + +Create an influxdb output node that will store the incoming data into InfluxDB. + + +```js +groupBy|influxDBOut() +``` + +Returns: [InfluxDBOutNode](/kapacitor/v1.5/nodes/influx_d_b_out_node/) + + + +### Join + +Join this node with other nodes. The data is joined on timestamp. + + +```js +groupBy|join(others ...Node) +``` + +Returns: [JoinNode](/kapacitor/v1.5/nodes/join_node/) + + + +### K8sAutoscale + +Create a node that can trigger autoscale events for a kubernetes cluster. + + +```js +groupBy|k8sAutoscale() +``` + +Returns: [K8sAutoscaleNode](/kapacitor/v1.5/nodes/k8s_autoscale_node/) + + + +### KapacitorLoopback + +Create an kapacitor loopback node that will send data back into Kapacitor as a stream. + + +```js +groupBy|kapacitorLoopback() +``` + +Returns: [KapacitorLoopbackNode](/kapacitor/v1.5/nodes/kapacitor_loopback_node/) + + + +### Last + +Select the last point. + + +```js +groupBy|last(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Log + +Create a node that logs all data it receives. + + +```js +groupBy|log() +``` + +Returns: [LogNode](/kapacitor/v1.5/nodes/log_node/) + + + +### Max + +Select the maximum point. + + +```js +groupBy|max(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Mean + +Compute the mean of the data. + + +```js +groupBy|mean(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Median + +Compute the median of the data. + +> **Note:** This method is not a selector. +If you want the median point, use `.percentile(field, 50.0)`. + + +```js +groupBy|median(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Min + +Select the minimum point. + + +```js +groupBy|min(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Mode + +Compute the mode of the data. + + +```js +groupBy|mode(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### MovingAverage + +Compute a moving average of the last window points. +No points are emitted until the window is full. + + +```js +groupBy|movingAverage(field string, window int64) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Percentile + +Select a point at the given percentile. This is a selector function, no interpolation between points is performed. + + +```js +groupBy|percentile(field string, percentile float64) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Sample + +Create a new node that samples the incoming points or batches. + +One point will be emitted every count or duration specified. + + +```js +groupBy|sample(rate interface{}) +``` + +Returns: [SampleNode](/kapacitor/v1.5/nodes/sample_node/) + + + +### Shift + +Create a new node that shifts the incoming points or batches in time. + + +```js +groupBy|shift(shift time.Duration) +``` + +Returns: [ShiftNode](/kapacitor/v1.5/nodes/shift_node/) + + + +### Sideload + +Create a node that can load data from external sources. + + +```js +groupBy|sideload() +``` + +Returns: [SideloadNode](/kapacitor/v1.5/nodes/sideload_node/) + + + +### Spread + +Compute the difference between `min` and `max` points. + + +```js +groupBy|spread(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### StateCount + +Create a node that tracks number of consecutive points in a given state. + + +```js +groupBy|stateCount(expression ast.LambdaNode) +``` + +Returns: [StateCountNode](/kapacitor/v1.5/nodes/state_count_node/) + + + +### StateDuration + +Create a node that tracks duration in a given state. + + +```js +groupBy|stateDuration(expression ast.LambdaNode) +``` + +Returns: [StateDurationNode](/kapacitor/v1.5/nodes/state_duration_node/) + + + +### Stats + +Create a new stream of data that contains the internal statistics of the node. +The interval represents how often to emit the statistics based on real time. +This means the interval time is independent of the times of the data points the source node is receiving. + + +```js +groupBy|stats(interval time.Duration) +``` + +Returns: [StatsNode](/kapacitor/v1.5/nodes/stats_node/) + + + +### Stddev + +Compute the standard deviation. + + +```js +groupBy|stddev(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Sum + +Compute the sum of all values. + + +```js +groupBy|sum(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### SwarmAutoscale + +Create a node that can trigger autoscale events for a Docker swarm cluster. + + +```js +groupBy|swarmAutoscale() +``` + +Returns: [SwarmAutoscaleNode](/kapacitor/v1.5/nodes/swarm_autoscale_node/) + + + +### Top + +Select the top `num` points for `field` and sort by any extra tags or fields. + + +```js +groupBy|top(num int64, field string, fieldsAndTags ...string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Union + +Perform the union of this node and all other given nodes. + + +```js +groupBy|union(node ...Node) +``` + +Returns: [UnionNode](/kapacitor/v1.5/nodes/union_node/) + + + +### Where + +Create a new node that filters the data stream by a given expression. + + +```js +groupBy|where(expression ast.LambdaNode) +``` + +Returns: [WhereNode](/kapacitor/v1.5/nodes/where_node/) + + + +### Window + +Create a new node that windows the stream by time. + +NOTE: Window can only be applied to stream edges. + + +```js +groupBy|window() +``` + +Returns: [WindowNode](/kapacitor/v1.5/nodes/window_node/) + + diff --git a/content/kapacitor/v1.5/nodes/http_out_node.md b/content/kapacitor/v1.5/nodes/http_out_node.md new file mode 100644 index 000000000..e83144aad --- /dev/null +++ b/content/kapacitor/v1.5/nodes/http_out_node.md @@ -0,0 +1,884 @@ +--- +title: HTTPOutNode +description: HTTPOutNode caches the most recent data for each group it has received. The cached data is available at the given endpoint, which is the relative path from the API endpoint of the running task. +note: Auto generated by tickdoc + +menu: + kapacitor_1_5_ref: + name: HTTPOutNode + identifier: http_out_node + weight: 130 + parent: nodes +--- + +The `httpOut` node acts as a simple passthrough and caches the most recent data for each group it has received. +Because of this, any available chaining method can be used to handle the cached data. + +The cached data is available at the given endpoint. +The endpoint is the relative path from the API endpoint of the running task. +For example if the task endpoint is at `/kapacitor/v1/tasks/` and endpoint is +`top10`, then the data can be requested from `/kapacitor/v1/tasks//top10`. + +Example: + + +```js + stream + |window() + .period(10s) + .every(5s) + |top('value', 10) + //Publish the top 10 results over the last 10s updated every 5s. + |httpOut('top10') +``` + +Beware of adding a final slash ‘/’ to the URL. This will result in a 404 error for a +task that does not exist. + +Note that the example script above comes from the +[scores](https://github.com/influxdata/kapacitor/tree/master/examples/scores) example. +See the complete scores example for a concrete demonstration. + + +### Constructor + +| Chaining Method | Description | +|:---------|:---------| +| **httpOut ( `endpoint` `string`)** | Create an HTTP output node that caches the most recent data it has received. The cached data is available at the given endpoint. The endpoint is the relative path from the API endpoint of the running task. For example, if the task endpoint is at `/kapacitor/v1/tasks/` and endpoint is `top10`, then the data can be requested from `/kapacitor/v1/tasks//top10`. | + +### Property Methods + +| Setters | Description | +|:---|:---| +| **[quiet](#quiet) ( )** | Suppress all error logging events from this node. | + + +### Chaining Methods +[Alert](#alert), +[Barrier](#barrier), +[Bottom](#bottom), +[ChangeDetect](#changedetect), +[Combine](#combine), +[Count](#count), +[CumulativeSum](#cumulativesum), +[Deadman](#deadman), +[Default](#default), +[Delete](#delete), +[Derivative](#derivative), +[Difference](#difference), +[Distinct](#distinct), +[Ec2Autoscale](#ec2autoscale), +[Elapsed](#elapsed), +[Eval](#eval), +[First](#first), +[Flatten](#flatten), +[GroupBy](#groupby), +[HoltWinters](#holtwinters), +[HoltWintersWithFit](#holtwinterswithfit), +[HttpOut](#httpout), +[HttpPost](#httppost), +[InfluxDBOut](#influxdbout), +[Join](#join), +[K8sAutoscale](#k8sautoscale), +[KapacitorLoopback](#kapacitorloopback), +[Last](#last), +[Log](#log), +[Max](#max), +[Mean](#mean), +[Median](#median), +[Min](#min), +[Mode](#mode), +[MovingAverage](#movingaverage), +[Percentile](#percentile), +[Sample](#sample), +[Shift](#shift), +[Sideload](#sideload), +[Spread](#spread), +[StateCount](#statecount), +[StateDuration](#stateduration), +[Stats](#stats), +[Stddev](#stddev), +[Sum](#sum), +[SwarmAutoscale](#swarmautoscale), +[Top](#top), +[Union](#union), +[Where](#where), +[Window](#window) + +--- + +## Properties + +Property methods modify state on the calling node. +They do not add another node to the pipeline, and always return a reference to the calling node. +Property methods are marked using the `.` operator. + + +### Quiet + +Suppress all error logging events from this node. + +```js +httpOut.quiet() +``` + + + +## Chaining Methods + +Chaining methods create a new node in the pipeline as a child of the calling node. +They do not modify the calling node. +Chaining methods are marked using the `|` operator. + + +### Alert + +Create an alert node, which can trigger alerts. + + +```js +httpOut|alert() +``` + +Returns: [AlertNode](/kapacitor/v1.5/nodes/alert_node/) + + + +### Barrier + +Create a new Barrier node that emits a BarrierMessage periodically. + +One BarrierMessage will be emitted every period duration. + + +```js +httpOut|barrier() +``` + +Returns: [BarrierNode](/kapacitor/v1.5/nodes/barrier_node/) + + + +### Bottom + +Select the bottom `num` points for `field` and sort by any extra tags or fields. + + +```js +httpOut|bottom(num int64, field string, fieldsAndTags ...string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### ChangeDetect + +Create a new node that only emits new points if different from the previous point. + +```js +httpOut|changeDetect(field string) +``` + +Returns: [ChangeDetectNode](/kapacitor/v1.5/nodes/change_detect_node/) + + + +### Combine + +Combine this node with itself. The data is combined on timestamp. + + +```js +httpOut|combine(expressions ...ast.LambdaNode) +``` + +Returns: [CombineNode](/kapacitor/v1.5/nodes/combine_node/) + + + +### Count + +Count the number of points. + + +```js +httpOut|count(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### CumulativeSum + +Compute a cumulative sum of each point that is received. +A point is emitted for every point collected. + + +```js +httpOut|cumulativeSum(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Deadman + +Helper function for creating an alert on low throughput, a.k.a. deadman's switch. + +- Threshold: trigger alert if throughput drops below threshold in points/interval. +- Interval: how often to check the throughput. +- Expressions: optional list of expressions to also evaluate. Useful for time of day alerting. + +Example: + + +```js + var data = stream + |from()... + // Trigger critical alert if the throughput drops below 100 points per 10s and checked every 10s. + data + |deadman(100.0, 10s) + //Do normal processing of data + data... +``` + +The above is equivalent to this example: + + +```js + var data = stream + |from()... + // Trigger critical alert if the throughput drops below 100 points per 10s and checked every 10s. + data + |stats(10s) + .align() + |derivative('emitted') + .unit(10s) + .nonNegative() + |alert() + .id('node \'stream0\' in task \'{{ .TaskName }}\'') + .message('{{ .ID }} is {{ if eq .Level "OK" }}alive{{ else }}dead{{ end }}: {{ index .Fields "emitted" | printf "%0.3f" }} points/10s.') + .crit(lambda: "emitted" <= 100.0) + //Do normal processing of data + data... +``` + +The `id` and `message` alert properties can be configured globally via the 'deadman' configuration section. + +Since the [AlertNode](/kapacitor/v1.5/nodes/alert_node/) is the last piece it can be further modified as usual. +Example: + + +```js + var data = stream + |from()... + // Trigger critical alert if the throughput drops below 100 points per 10s and checked every 10s. + data + |deadman(100.0, 10s) + .slack() + .channel('#dead_tasks') + //Do normal processing of data + data... +``` + +You can specify additional lambda expressions to further constrain when the deadman's switch is triggered. +Example: + + +```js + var data = stream + |from()... + // Trigger critical alert if the throughput drops below 100 points per 10s and checked every 10s. + // Only trigger the alert if the time of day is between 8am-5pm. + data + |deadman(100.0, 10s, lambda: hour("time") >= 8 AND hour("time") <= 17) + //Do normal processing of data + data... +``` + + + +```js +httpOut|deadman(threshold float64, interval time.Duration, expr ...ast.LambdaNode) +``` + +Returns: [AlertNode](/kapacitor/v1.5/nodes/alert_node/) + + + +### Default + +Create a node that can set defaults for missing tags or fields. + + +```js +httpOut|default() +``` + +Returns: [DefaultNode](/kapacitor/v1.5/nodes/default_node/) + + + +### Delete + +Create a node that can delete tags or fields. + + +```js +httpOut|delete() +``` + +Returns: [DeleteNode](/kapacitor/v1.5/nodes/delete_node/) + + + +### Derivative + +Create a new node that computes the derivative of adjacent points. + + +```js +httpOut|derivative(field string) +``` + +Returns: [DerivativeNode](/kapacitor/v1.5/nodes/derivative_node/) + + + +### Difference + +Compute the difference between points independent of elapsed time. + + +```js +httpOut|difference(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Distinct + +Produce batch of only the distinct points. + + +```js +httpOut|distinct(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Ec2Autoscale + +Create a node that can trigger autoscale events for a ec2 autoscalegroup. + + +```js +httpOut|ec2Autoscale() +``` + +Returns: [Ec2AutoscaleNode](/kapacitor/v1.5/nodes/ec2_autoscale_node/) + + + +### Elapsed + +Compute the elapsed time between points. + + +```js +httpOut|elapsed(field string, unit time.Duration) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Eval + +Create an eval node that will evaluate the given transformation function to each data point. +A list of expressions may be provided and will be evaluated in the order they are given. +The results are available to later expressions. + + +```js +httpOut|eval(expressions ...ast.LambdaNode) +``` + +Returns: [EvalNode](/kapacitor/v1.5/nodes/eval_node/) + + + +### First + +Select the first point. + + +```js +httpOut|first(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Flatten + +Flatten points with similar times into a single point. + + +```js +httpOut|flatten() +``` + +Returns: [FlattenNode](/kapacitor/v1.5/nodes/flatten_node/) + + + +### GroupBy + +Group the data by a set of tags. + +Can pass literal * to group by all dimensions. +Example: + + +```js + |groupBy(*) +``` + + + +```js +httpOut|groupBy(tag ...interface{}) +``` + +Returns: [GroupByNode](/kapacitor/v1.5/nodes/group_by_node/) + + + +### HoltWinters + +Compute the Holt-Winters (https://docs.influxdata.com/influxdb/latest/query_language/functions/#holt-winters) forecast of a data set. + + +```js +httpOut|holtWinters(field string, h int64, m int64, interval time.Duration) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### HoltWintersWithFit + +Compute the Holt-Winters (https://docs.influxdata.com/influxdb/latest/query_language/functions/#holt-winters) forecast of a data set. +This method also outputs all the points used to fit the data in addition to the forecasted data. + + +```js +httpOut|holtWintersWithFit(field string, h int64, m int64, interval time.Duration) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### HttpOut + +Create an HTTP output node that caches the most recent data it has received. +The cached data is available at the given endpoint. +The endpoint is the relative path from the API endpoint of the running task. +For example, if the task endpoint is at `/kapacitor/v1/tasks/` and endpoint is +`top10`, then the data can be requested from `/kapacitor/v1/tasks//top10`. + + +```js +httpOut|httpOut(endpoint string) +``` + +Returns: [HTTPOutNode](/kapacitor/v1.5/nodes/http_out_node/) + + + +### HttpPost + +Creates an HTTP Post node that POSTS received data to the provided HTTP endpoint. +HttpPost expects 0 or 1 arguments. If 0 arguments are provided, you must specify an +endpoint property method. + + +```js +httpOut|httpPost(url ...string) +``` + +Returns: [HTTPPostNode](/kapacitor/v1.5/nodes/http_post_node/) + + + +### InfluxDBOut + +Create an influxdb output node that will store the incoming data into InfluxDB. + + +```js +httpOut|influxDBOut() +``` + +Returns: [InfluxDBOutNode](/kapacitor/v1.5/nodes/influx_d_b_out_node/) + + + +### Join + +Join this node with other nodes. The data is joined on timestamp. + + +```js +httpOut|join(others ...Node) +``` + +Returns: [JoinNode](/kapacitor/v1.5/nodes/join_node/) + + + +### K8sAutoscale + +Create a node that can trigger autoscale events for a kubernetes cluster. + + +```js +httpOut|k8sAutoscale() +``` + +Returns: [K8sAutoscaleNode](/kapacitor/v1.5/nodes/k8s_autoscale_node/) + + + +### KapacitorLoopback + +Create an kapacitor loopback node that will send data back into Kapacitor as a stream. + + +```js +httpOut|kapacitorLoopback() +``` + +Returns: [KapacitorLoopbackNode](/kapacitor/v1.5/nodes/kapacitor_loopback_node/) + + + +### Last + +Select the last point. + + +```js +httpOut|last(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Log + +Create a node that logs all data it receives. + + +```js +httpOut|log() +``` + +Returns: [LogNode](/kapacitor/v1.5/nodes/log_node/) + + + +### Max + +Select the maximum point. + + +```js +httpOut|max(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Mean + +Compute the mean of the data. + + +```js +httpOut|mean(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Median + +Compute the median of the data. + +> **Note:** This method is not a selector. +If you want the median point, use `.percentile(field, 50.0)`. + + +```js +httpOut|median(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Min + +Select the minimum point. + + +```js +httpOut|min(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Mode + +Compute the mode of the data. + + +```js +httpOut|mode(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### MovingAverage + +Compute a moving average of the last window points. +No points are emitted until the window is full. + + +```js +httpOut|movingAverage(field string, window int64) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Percentile + +Select a point at the given percentile. This is a selector function, no interpolation between points is performed. + + +```js +httpOut|percentile(field string, percentile float64) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Sample + +Create a new node that samples the incoming points or batches. + +One point will be emitted every count or duration specified. + + +```js +httpOut|sample(rate interface{}) +``` + +Returns: [SampleNode](/kapacitor/v1.5/nodes/sample_node/) + + + +### Shift + +Create a new node that shifts the incoming points or batches in time. + + +```js +httpOut|shift(shift time.Duration) +``` + +Returns: [ShiftNode](/kapacitor/v1.5/nodes/shift_node/) + + + +### Sideload + +Create a node that can load data from external sources. + + +```js +httpOut|sideload() +``` + +Returns: [SideloadNode](/kapacitor/v1.5/nodes/sideload_node/) + + + +### Spread + +Compute the difference between `min` and `max` points. + + +```js +httpOut|spread(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### StateCount + +Create a node that tracks number of consecutive points in a given state. + + +```js +httpOut|stateCount(expression ast.LambdaNode) +``` + +Returns: [StateCountNode](/kapacitor/v1.5/nodes/state_count_node/) + + + +### StateDuration + +Create a node that tracks duration in a given state. + + +```js +httpOut|stateDuration(expression ast.LambdaNode) +``` + +Returns: [StateDurationNode](/kapacitor/v1.5/nodes/state_duration_node/) + + + +### Stats + +Create a new stream of data that contains the internal statistics of the node. +The interval represents how often to emit the statistics based on real time. +This means the interval time is independent of the times of the data points the source node is receiving. + + +```js +httpOut|stats(interval time.Duration) +``` + +Returns: [StatsNode](/kapacitor/v1.5/nodes/stats_node/) + + + +### Stddev + +Compute the standard deviation. + + +```js +httpOut|stddev(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Sum + +Compute the sum of all values. + + +```js +httpOut|sum(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### SwarmAutoscale + +Create a node that can trigger autoscale events for a Docker swarm cluster. + + +```js +httpOut|swarmAutoscale() +``` + +Returns: [SwarmAutoscaleNode](/kapacitor/v1.5/nodes/swarm_autoscale_node/) + + + +### Top + +Select the top `num` points for `field` and sort by any extra tags or fields. + + +```js +httpOut|top(num int64, field string, fieldsAndTags ...string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Union + +Perform the union of this node and all other given nodes. + + +```js +httpOut|union(node ...Node) +``` + +Returns: [UnionNode](/kapacitor/v1.5/nodes/union_node/) + + + +### Where + +Create a new node that filters the data stream by a given expression. + + +```js +httpOut|where(expression ast.LambdaNode) +``` + +Returns: [WhereNode](/kapacitor/v1.5/nodes/where_node/) + + + +### Window + +Create a new node that windows the stream by time. + +NOTE: Window can only be applied to stream edges. + + +```js +httpOut|window() +``` + +Returns: [WindowNode](/kapacitor/v1.5/nodes/window_node/) + + diff --git a/content/kapacitor/v1.5/nodes/http_post_node.md b/content/kapacitor/v1.5/nodes/http_post_node.md new file mode 100644 index 000000000..c597662c2 --- /dev/null +++ b/content/kapacitor/v1.5/nodes/http_post_node.md @@ -0,0 +1,977 @@ +--- +title: HTTPPostNode +description: HTTPPostNode takes the incoming data stream and will POST it to an HTTP endpoint. That endpoint may be specified as a positional argument, or as an endpoint property method on httpPost. Multiple endpoint property methods may be specified. +note: Auto generated by tickdoc + +menu: + kapacitor_1_5_ref: + name: HTTPPostNode + identifier: http_post_node + weight: 140 + parent: nodes +--- + +The `httpPost` node will take the incoming data stream and POST it to an HTTP endpoint. +That endpoint may be specified as a positional argument, or as an endpoint property +method on httpPost. Multiple endpoint property methods may be specified. + +Example: + + +```js +stream + |window() + .period(10s) + .every(5s) + |top('value', 10) + //Post the top 10 results over the last 10s updated every 5s. + |httpPost('http://example.com/api/top10') +``` + +Example: + + +```js +stream + |window() + .period(10s) + .every(5s) + |top('value', 10) + //Post the top 10 results over the last 10s updated every 5s. + |httpPost() + .endpoint('example') +``` + + +### Constructor + +| Chaining Method | Description | +|:---------|:---------| +| **httpPost ( `url` `...string`)** | Creates an HTTP Post node that POSTS received data to the provided HTTP endpoint. HttpPost expects 0 or 1 arguments. If 0 arguments are provided, you must specify an endpoint property method. | + +### Property Methods + +| Setters | Description | +|:---|:---| +| **[captureResponse](#captureresponse) ( )** | CaptureResponse indicates that the HTTP response should be read and logged if the status code was not an 2xx code. | +| **[codeField](#codefield) ( `value` `string`)** | CodeField is the name of the field in which to place the HTTP status code. If the HTTP request fails at a layer below HTTP, (i.e. rejected TCP connection), then the status code is set to 0. | +| **[endpoint](#endpoint) ( `endpoint` `string`)** | Name of the endpoint to be used, as is defined in the configuration file. | +| **[header](#header) ( `k` `string`, `v` `string`)** | Add a header to the POST request | +| **[quiet](#quiet) ( )** | Suppress all error logging events from this node. | +| **[timeout](#timeout) ( `value` `time.Duration`)** | Timeout for HTTP Post | + + + +### Chaining Methods +[Alert](#alert), +[Barrier](#barrier), +[Bottom](#bottom), +[ChangeDetect](#changedetect), +[Combine](#combine), +[Count](#count), +[CumulativeSum](#cumulativesum), +[Deadman](#deadman), +[Default](#default), +[Delete](#delete), +[Derivative](#derivative), +[Difference](#difference), +[Distinct](#distinct), +[Ec2Autoscale](#ec2autoscale), +[Elapsed](#elapsed), +[Eval](#eval), +[First](#first), +[Flatten](#flatten), +[GroupBy](#groupby), +[HoltWinters](#holtwinters), +[HoltWintersWithFit](#holtwinterswithfit), +[HttpOut](#httpout), +[HttpPost](#httppost), +[InfluxDBOut](#influxdbout), +[Join](#join), +[K8sAutoscale](#k8sautoscale), +[KapacitorLoopback](#kapacitorloopback), +[Last](#last), +[Log](#log), +[Max](#max), +[Mean](#mean), +[Median](#median), +[Min](#min), +[Mode](#mode), +[MovingAverage](#movingaverage), +[Percentile](#percentile), +[Sample](#sample), +[Shift](#shift), +[Sideload](#sideload), +[Spread](#spread), +[StateCount](#statecount), +[StateDuration](#stateduration), +[Stats](#stats), +[Stddev](#stddev), +[Sum](#sum), +[SwarmAutoscale](#swarmautoscale), +[Top](#top), +[Union](#union), +[Where](#where), +[Window](#window) + +--- + +## Properties + +Property methods modify state on the calling node. +They do not add another node to the pipeline, and always return a reference to the calling node. +Property methods are marked using the `.` operator. + + +### CaptureResponse + +CaptureResponse indicates that the HTTP response should be read and logged if +the status code was not an 2xx code. + + +```js +httpPost.captureResponse() +``` + + + + +### CodeField + +CodeField is the name of the field in which to place the HTTP status code. +If the HTTP request fails at a layer below HTTP, (i.e. rejected TCP connection), then the status code is set to 0. + + +```js +httpPost.codeField(value string) +``` + + + + +### Endpoint + +Name of the endpoint to be used, as is defined in the configuration file. + +Example: + + +```js + stream + |httpPost() + .endpoint('example') +``` + + + +```js +httpPost.endpoint(endpoint string) +``` + + + + +### Header + +Add a header to the POST request + +Example: + + +```js + stream + |httpPost() + .endpoint('example') + .header('my', 'header') +``` + + + +```js +httpPost.header(k string, v string) +``` + + + + +### Quiet + +Suppress all error logging events from this node. + +```js +httpPost.quiet() +``` + + + + +### Timeout + +Timeout for HTTP Post + + +```js +httpPost.timeout(value time.Duration) +``` + + + + +## Chaining Methods + +Chaining methods create a new node in the pipeline as a child of the calling node. +They do not modify the calling node. +Chaining methods are marked using the `|` operator. + + +### Alert + +Create an alert node, which can trigger alerts. + + +```js +httpPost|alert() +``` + +Returns: [AlertNode](/kapacitor/v1.5/nodes/alert_node/) + + + +### Barrier + +Create a new Barrier node that emits a BarrierMessage periodically. + +One BarrierMessage will be emitted every period duration. + + +```js +httpPost|barrier() +``` + +Returns: [BarrierNode](/kapacitor/v1.5/nodes/barrier_node/) + + + +### Bottom + +Select the bottom `num` points for `field` and sort by any extra tags or fields. + + +```js +httpPost|bottom(num int64, field string, fieldsAndTags ...string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### ChangeDetect + +Create a new node that only emits new points if different from the previous point. + +```js +httpPost|changeDetect(field string) +``` + +Returns: [ChangeDetectNode](/kapacitor/v1.5/nodes/change_detect_node/) + + + +### Combine + +Combine this node with itself. The data is combined on timestamp. + + +```js +httpPost|combine(expressions ...ast.LambdaNode) +``` + +Returns: [CombineNode](/kapacitor/v1.5/nodes/combine_node/) + + + +### Count + +Count the number of points. + + +```js +httpPost|count(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### CumulativeSum + +Compute a cumulative sum of each point that is received. +A point is emitted for every point collected. + + +```js +httpPost|cumulativeSum(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Deadman + +Helper function for creating an alert on low throughput, a.k.a. deadman's switch. + +- Threshold: trigger alert if throughput drops below threshold in points/interval. +- Interval: how often to check the throughput. +- Expressions: optional list of expressions to also evaluate. Useful for time of day alerting. + +Example: + + +```js + var data = stream + |from()... + // Trigger critical alert if the throughput drops below 100 points per 10s and checked every 10s. + data + |deadman(100.0, 10s) + //Do normal processing of data + data... +``` + +The above is equivalent to this example: + + +```js + var data = stream + |from()... + // Trigger critical alert if the throughput drops below 100 points per 10s and checked every 10s. + data + |stats(10s) + .align() + |derivative('emitted') + .unit(10s) + .nonNegative() + |alert() + .id('node \'stream0\' in task \'{{ .TaskName }}\'') + .message('{{ .ID }} is {{ if eq .Level "OK" }}alive{{ else }}dead{{ end }}: {{ index .Fields "emitted" | printf "%0.3f" }} points/10s.') + .crit(lambda: "emitted" <= 100.0) + //Do normal processing of data + data... +``` + +The `id` and `message` alert properties can be configured globally via the 'deadman' configuration section. + +Since the [AlertNode](/kapacitor/v1.5/nodes/alert_node/) is the last piece it can be further modified as usual. +Example: + + +```js + var data = stream + |from()... + // Trigger critical alert if the throughput drops below 100 points per 10s and checked every 10s. + data + |deadman(100.0, 10s) + .slack() + .channel('#dead_tasks') + //Do normal processing of data + data... +``` + +You can specify additional lambda expressions to further constrain when the deadman's switch is triggered. +Example: + + +```js + var data = stream + |from()... + // Trigger critical alert if the throughput drops below 100 points per 10s and checked every 10s. + // Only trigger the alert if the time of day is between 8am-5pm. + data + |deadman(100.0, 10s, lambda: hour("time") >= 8 AND hour("time") <= 17) + //Do normal processing of data + data... +``` + + + +```js +httpPost|deadman(threshold float64, interval time.Duration, expr ...ast.LambdaNode) +``` + +Returns: [AlertNode](/kapacitor/v1.5/nodes/alert_node/) + + + +### Default + +Create a node that can set defaults for missing tags or fields. + + +```js +httpPost|default() +``` + +Returns: [DefaultNode](/kapacitor/v1.5/nodes/default_node/) + + + +### Delete + +Create a node that can delete tags or fields. + + +```js +httpPost|delete() +``` + +Returns: [DeleteNode](/kapacitor/v1.5/nodes/delete_node/) + + + +### Derivative + +Create a new node that computes the derivative of adjacent points. + + +```js +httpPost|derivative(field string) +``` + +Returns: [DerivativeNode](/kapacitor/v1.5/nodes/derivative_node/) + + + +### Difference + +Compute the difference between points independent of elapsed time. + + +```js +httpPost|difference(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Distinct + +Produce batch of only the distinct points. + + +```js +httpPost|distinct(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Ec2Autoscale + +Create a node that can trigger autoscale events for a ec2 autoscalegroup. + + +```js +httpPost|ec2Autoscale() +``` + +Returns: [Ec2AutoscaleNode](/kapacitor/v1.5/nodes/ec2_autoscale_node/) + + + +### Elapsed + +Compute the elapsed time between points. + + +```js +httpPost|elapsed(field string, unit time.Duration) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Eval + +Create an eval node that will evaluate the given transformation function to each data point. +A list of expressions may be provided and will be evaluated in the order they are given. +The results are available to later expressions. + + +```js +httpPost|eval(expressions ...ast.LambdaNode) +``` + +Returns: [EvalNode](/kapacitor/v1.5/nodes/eval_node/) + + + +### First + +Select the first point. + + +```js +httpPost|first(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Flatten + +Flatten points with similar times into a single point. + + +```js +httpPost|flatten() +``` + +Returns: [FlattenNode](/kapacitor/v1.5/nodes/flatten_node/) + + + +### GroupBy + +Group the data by a set of tags. + +Can pass literal * to group by all dimensions. +Example: + + +```js + |groupBy(*) +``` + + + +```js +httpPost|groupBy(tag ...interface{}) +``` + +Returns: [GroupByNode](/kapacitor/v1.5/nodes/group_by_node/) + + + +### HoltWinters + +Compute the Holt-Winters (https://docs.influxdata.com/influxdb/latest/query_language/functions/#holt-winters) forecast of a data set. + + +```js +httpPost|holtWinters(field string, h int64, m int64, interval time.Duration) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### HoltWintersWithFit + +Compute the Holt-Winters (https://docs.influxdata.com/influxdb/latest/query_language/functions/#holt-winters) forecast of a data set. +This method also outputs all the points used to fit the data in addition to the forecasted data. + + +```js +httpPost|holtWintersWithFit(field string, h int64, m int64, interval time.Duration) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### HttpOut + +Create an HTTP output node that caches the most recent data it has received. +The cached data is available at the given endpoint. +The endpoint is the relative path from the API endpoint of the running task. +For example, if the task endpoint is at `/kapacitor/v1/tasks/` and endpoint is +`top10`, then the data can be requested from `/kapacitor/v1/tasks//top10`. + + +```js +httpPost|httpOut(endpoint string) +``` + +Returns: [HTTPOutNode](/kapacitor/v1.5/nodes/http_out_node/) + + + +### HttpPost + +Creates an HTTP Post node that POSTS received data to the provided HTTP endpoint. +HttpPost expects 0 or 1 arguments. If 0 arguments are provided, you must specify an +endpoint property method. + + +```js +httpPost|httpPost(url ...string) +``` + +Returns: [HTTPPostNode](/kapacitor/v1.5/nodes/http_post_node/) + + + +### InfluxDBOut + +Create an influxdb output node that will store the incoming data into InfluxDB. + + +```js +httpPost|influxDBOut() +``` + +Returns: [InfluxDBOutNode](/kapacitor/v1.5/nodes/influx_d_b_out_node/) + + + +### Join + +Join this node with other nodes. The data is joined on timestamp. + + +```js +httpPost|join(others ...Node) +``` + +Returns: [JoinNode](/kapacitor/v1.5/nodes/join_node/) + + + +### K8sAutoscale + +Create a node that can trigger autoscale events for a kubernetes cluster. + + +```js +httpPost|k8sAutoscale() +``` + +Returns: [K8sAutoscaleNode](/kapacitor/v1.5/nodes/k8s_autoscale_node/) + + + +### KapacitorLoopback + +Create an kapacitor loopback node that will send data back into Kapacitor as a stream. + + +```js +httpPost|kapacitorLoopback() +``` + +Returns: [KapacitorLoopbackNode](/kapacitor/v1.5/nodes/kapacitor_loopback_node/) + + + +### Last + +Select the last point. + + +```js +httpPost|last(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Log + +Create a node that logs all data it receives. + + +```js +httpPost|log() +``` + +Returns: [LogNode](/kapacitor/v1.5/nodes/log_node/) + + + +### Max + +Select the maximum point. + + +```js +httpPost|max(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Mean + +Compute the mean of the data. + + +```js +httpPost|mean(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Median + +Compute the median of the data. + +> **Note:** This method is not a selector. +If you want the median point, use `.percentile(field, 50.0)`. + + +```js +httpPost|median(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Min + +Select the minimum point. + + +```js +httpPost|min(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Mode + +Compute the mode of the data. + + +```js +httpPost|mode(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### MovingAverage + +Compute a moving average of the last window points. +No points are emitted until the window is full. + + +```js +httpPost|movingAverage(field string, window int64) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Percentile + +Select a point at the given percentile. This is a selector function, no interpolation between points is performed. + + +```js +httpPost|percentile(field string, percentile float64) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Sample + +Create a new node that samples the incoming points or batches. + +One point will be emitted every count or duration specified. + + +```js +httpPost|sample(rate interface{}) +``` + +Returns: [SampleNode](/kapacitor/v1.5/nodes/sample_node/) + + + +### Shift + +Create a new node that shifts the incoming points or batches in time. + + +```js +httpPost|shift(shift time.Duration) +``` + +Returns: [ShiftNode](/kapacitor/v1.5/nodes/shift_node/) + + + +### Sideload + +Create a node that can load data from external sources. + + +```js +httpPost|sideload() +``` + +Returns: [SideloadNode](/kapacitor/v1.5/nodes/sideload_node/) + + + +### Spread + +Compute the difference between `min` and `max` points. + + +```js +httpPost|spread(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### StateCount + +Create a node that tracks number of consecutive points in a given state. + + +```js +httpPost|stateCount(expression ast.LambdaNode) +``` + +Returns: [StateCountNode](/kapacitor/v1.5/nodes/state_count_node/) + + + +### StateDuration + +Create a node that tracks duration in a given state. + + +```js +httpPost|stateDuration(expression ast.LambdaNode) +``` + +Returns: [StateDurationNode](/kapacitor/v1.5/nodes/state_duration_node/) + + + +### Stats + +Create a new stream of data that contains the internal statistics of the node. +The interval represents how often to emit the statistics based on real time. +This means the interval time is independent of the times of the data points the source node is receiving. + + +```js +httpPost|stats(interval time.Duration) +``` + +Returns: [StatsNode](/kapacitor/v1.5/nodes/stats_node/) + + + +### Stddev + +Compute the standard deviation. + + +```js +httpPost|stddev(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Sum + +Compute the sum of all values. + + +```js +httpPost|sum(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### SwarmAutoscale + +Create a node that can trigger autoscale events for a Docker swarm cluster. + + +```js +httpPost|swarmAutoscale() +``` + +Returns: [SwarmAutoscaleNode](/kapacitor/v1.5/nodes/swarm_autoscale_node/) + + + +### Top + +Select the top `num` points for `field` and sort by any extra tags or fields. + + +```js +httpPost|top(num int64, field string, fieldsAndTags ...string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Union + +Perform the union of this node and all other given nodes. + + +```js +httpPost|union(node ...Node) +``` + +Returns: [UnionNode](/kapacitor/v1.5/nodes/union_node/) + + + +### Where + +Create a new node that filters the data stream by a given expression. + + +```js +httpPost|where(expression ast.LambdaNode) +``` + +Returns: [WhereNode](/kapacitor/v1.5/nodes/where_node/) + + + +### Window + +Create a new node that windows the stream by time. + +NOTE: Window can only be applied to stream edges. + + +```js +httpPost|window() +``` + +Returns: [WindowNode](/kapacitor/v1.5/nodes/window_node/) + + diff --git a/content/kapacitor/v1.5/nodes/influx_d_b_out_node.md b/content/kapacitor/v1.5/nodes/influx_d_b_out_node.md new file mode 100644 index 000000000..36e4cf728 --- /dev/null +++ b/content/kapacitor/v1.5/nodes/influx_d_b_out_node.md @@ -0,0 +1,324 @@ +--- +title: InfluxDBOutNode +description: InfluxDBOutNode writes data to an InfluxDB database as it is received. +note: Auto generated by tickdoc + +menu: + kapacitor_1_5_ref: + name: InfluxDBOutNode + identifier: influx_d_b_out_node + weight: 150 + parent: nodes +--- + +The `influxDBOut` node writes data to InfluxDB as it is received. + +Example: + + +```js +stream + |from() + .measurement('requests') + |eval(lambda: "errors" / "total") + .as('error_percent') + // Write the transformed data to InfluxDB + |influxDBOut() + .database('mydb') + .retentionPolicy('myrp') + .measurement('errors') + .tag('kapacitor', 'true') + .tag('version', '0.2') +``` + +Available Statistics: + +* points_written: number of points written to InfluxDB +* write_errors: number of errors attempting to write to InfluxDB + + +### Constructor + +| Chaining Method | Description | +|:---------|:---------| +| **influxDBOut ( )** | Create an influxdb output node that will store the incoming data into InfluxDB. | + +### Property Methods + +| Setters | Description | +|:---|:---| +| **[buffer](#buffer) ( `value` `int64`)** | Number of points to buffer when writing to InfluxDB. Default: 1000 | +| **[cluster](#cluster) ( `value` `string`)** | The name of the InfluxDB instance to connect to. If empty the configured default will be used. | +| **[create](#create) ( )** | Create indicates that both the database and retention policy will be created, when the task is started. If the retention policy name is empty then no retention policy will be specified and the default retention policy name will be created. | +| **[database](#database) ( `value` `string`)** | The name of the database. | +| **[flushInterval](#flushinterval) ( `value` `time.Duration`)** | Write points to InfluxDB after interval even if buffer is not full. Default: 10s | +| **[measurement](#measurement) ( `value` `string`)** | The name of the measurement. | +| **[precision](#precision) ( `value` `string`)** | The precision to use when writing the data. | +| **[quiet](#quiet) ( )** | Suppress all error logging events from this node. | +| **[retentionPolicy](#retentionpolicy) ( `value` `string`)** | The name of the retention policy. | +| **[tag](#tag) ( `key` `string`, `value` `string`)** | Add a static tag to all data points. Tag can be called more then once. | +| **[writeConsistency](#writeconsistency) ( `value` `string`)** | The write consistency to use when writing the data. | + + + +### Chaining Methods +[Deadman](#deadman), +[Stats](#stats) + +--- + +## Properties + +Property methods modify state on the calling node. +They do not add another node to the pipeline, and always return a reference to the calling node. +Property methods are marked using the `.` operator. + + +### Buffer + +Number of points to buffer when writing to InfluxDB. +Default: 1000 + + +```js +influxDBOut.buffer(value int64) +``` + + + + +### Cluster + +The name of the InfluxDB instance to connect to. +If empty the configured default will be used. + + +```js +influxDBOut.cluster(value string) +``` + + + + +### Create + +Create indicates that both the database and retention policy +will be created, when the task is started. +If the retention policy name is empty then no +retention policy will be specified and +the default retention policy name will be created. + +If the database already exists nothing happens. + + + +```js +influxDBOut.create() +``` + + + + +### Database + +The name of the database. + + +```js +influxDBOut.database(value string) +``` + + + + +### FlushInterval + +Write points to InfluxDB after interval even if buffer is not full. +Default: 10s + + +```js +influxDBOut.flushInterval(value time.Duration) +``` + + + + +### Measurement + +The name of the measurement. + + +```js +influxDBOut.measurement(value string) +``` + + + + +### Precision + +The precision to use when writing the data. + + +```js +influxDBOut.precision(value string) +``` + + + +### Quiet + +Suppress all error logging events from this node. + +```js +influxDBOut.quiet() +``` + + + + +### RetentionPolicy + +The name of the retention policy. + + +```js +influxDBOut.retentionPolicy(value string) +``` + + + + +### Tag + +Add a static tag to all data points. +Tag can be called more then once. + + + +```js +influxDBOut.tag(key string, value string) +``` + + + + +### WriteConsistency + +The write consistency to use when writing the data. + + +```js +influxDBOut.writeConsistency(value string) +``` + + + + +## Chaining Methods + +Chaining methods create a new node in the pipeline as a child of the calling node. +They do not modify the calling node. +Chaining methods are marked using the `|` operator. + + +### Deadman + +Helper function for creating an alert on low throughput, a.k.a. deadman's switch. + +- Threshold: trigger alert if throughput drops below threshold in points/interval. +- Interval: how often to check the throughput. +- Expressions: optional list of expressions to also evaluate. Useful for time of day alerting. + +Example: + + +```js + var data = stream + |from()... + // Trigger critical alert if the throughput drops below 100 points per 10s and checked every 10s. + data + |deadman(100.0, 10s) + //Do normal processing of data + data... +``` + +The above is equivalent to this example: + + +```js + var data = stream + |from()... + // Trigger critical alert if the throughput drops below 100 points per 10s and checked every 10s. + data + |stats(10s) + .align() + |derivative('emitted') + .unit(10s) + .nonNegative() + |alert() + .id('node \'stream0\' in task \'{{ .TaskName }}\'') + .message('{{ .ID }} is {{ if eq .Level "OK" }}alive{{ else }}dead{{ end }}: {{ index .Fields "emitted" | printf "%0.3f" }} points/10s.') + .crit(lambda: "emitted" <= 100.0) + //Do normal processing of data + data... +``` + +The `id` and `message` alert properties can be configured globally via the 'deadman' configuration section. + +Since the [AlertNode](/kapacitor/v1.5/nodes/alert_node/) is the last piece it can be further modified as usual. +Example: + + +```js + var data = stream + |from()... + // Trigger critical alert if the throughput drops below 100 points per 10s and checked every 10s. + data + |deadman(100.0, 10s) + .slack() + .channel('#dead_tasks') + //Do normal processing of data + data... +``` + +You can specify additional lambda expressions to further constrain when the deadman's switch is triggered. +Example: + + +```js + var data = stream + |from()... + // Trigger critical alert if the throughput drops below 100 points per 10s and checked every 10s. + // Only trigger the alert if the time of day is between 8am-5pm. + data + |deadman(100.0, 10s, lambda: hour("time") >= 8 AND hour("time") <= 17) + //Do normal processing of data + data... +``` + + + +```js +influxDBOut|deadman(threshold float64, interval time.Duration, expr ...ast.LambdaNode) +``` + +Returns: [AlertNode](/kapacitor/v1.5/nodes/alert_node/) + + + +### Stats + +Create a new stream of data that contains the internal statistics of the node. +The interval represents how often to emit the statistics based on real time. +This means the interval time is independent of the times of the data points the source node is receiving. + + +```js +influxDBOut|stats(interval time.Duration) +``` + +Returns: [StatsNode](/kapacitor/v1.5/nodes/stats_node/) + + diff --git a/content/kapacitor/v1.5/nodes/influx_q_l_node.md b/content/kapacitor/v1.5/nodes/influx_q_l_node.md new file mode 100644 index 000000000..343021513 --- /dev/null +++ b/content/kapacitor/v1.5/nodes/influx_q_l_node.md @@ -0,0 +1,911 @@ +--- +title: InfluxQLNode +description: InfluxQLNode performs the available function from the InfluxQL language. The function can be performed on a stream or batch edge. The resulting edge is dependent on the function. +note: Auto generated by tickdoc + +menu: + kapacitor_1_5_ref: + name: InfluxQLNode + identifier: influx_q_l_node + weight: 160 + parent: nodes +--- + +The `influxQL` node performs [InfluxQL functions](/influxdb/v1.5/query_language/functions/). +The function can be performed on a stream or batch edge. +The resulting edge is dependent on the function. +For a stream edge, all points with the same time are accumulated into the function. +For a batch edge, all points in the batch are accumulated into the function. + + +Example: + + +```js +stream + |window() + .period(10s) + .every(10s) + // Sum the values for each 10s window of data. + |sum('value') +``` + + +Note: Derivative has its own implementation as a [DerivativeNode](/kapacitor/v1.5/nodes/derivative_node/) instead of as part of the +InfluxQL functions. + + +### Constructor + +| Chaining Method | Description | +|:---------|:---------| +| **influxQL** | Has no constructor signature. | + +### Property Methods + +| Setters | Description | +|:---|:---| +| **[as](#as) ( `value` `string`)** | The name of the field, defaults to the name of function used (i.e. .mean -> 'mean') | +| **[quiet](#quiet) ( )** | Suppress all error logging events from this node. | +| **[usePointTimes](#usepointtimes) ( )** | Use the time of the selected point instead of the time of the batch. | + + + +### Chaining Methods +[Alert](#alert), +[Barrier](#barrier), +[Bottom](#bottom), +[ChangeDetect](#changedetect), +[Combine](#combine), +[Count](#count), +[CumulativeSum](#cumulativesum), +[Deadman](#deadman), +[Default](#default), +[Delete](#delete), +[Derivative](#derivative), +[Difference](#difference), +[Distinct](#distinct), +[Ec2Autoscale](#ec2autoscale), +[Elapsed](#elapsed), +[Eval](#eval), +[First](#first), +[Flatten](#flatten), +[GroupBy](#groupby), +[HoltWinters](#holtwinters), +[HoltWintersWithFit](#holtwinterswithfit), +[HttpOut](#httpout), +[HttpPost](#httppost), +[InfluxDBOut](#influxdbout), +[Join](#join), +[K8sAutoscale](#k8sautoscale), +[KapacitorLoopback](#kapacitorloopback), +[Last](#last), +[Log](#log), +[Max](#max), +[Mean](#mean), +[Median](#median), +[Min](#min), +[Mode](#mode), +[MovingAverage](#movingaverage), +[Percentile](#percentile), +[Sample](#sample), +[Shift](#shift), +[Sideload](#sideload), +[Spread](#spread), +[StateCount](#statecount), +[StateDuration](#stateduration), +[Stats](#stats), +[Stddev](#stddev), +[Sum](#sum), +[SwarmAutoscale](#swarmautoscale), +[Top](#top), +[Union](#union), +[Where](#where), +[Window](#window) + +--- + +## Properties + +Property methods modify state on the calling node. +They do not add another node to the pipeline, and always return a reference to the calling node. +Property methods are marked using the `.` operator. + + +### As + +The name of the field, defaults to the name of +function used (i.e. .mean -> 'mean') + + +```js +influxQL.as(value string) +``` + + + + +### Quiet + +Suppress all error logging events from this node. + +```js +influxQL.quiet() +``` + + + + +### UsePointTimes + +Use the time of the selected point instead of the time of the batch. + +Only applies to selector functions like first, last, top, bottom, etc. +Aggregation functions always use the batch time. + + +```js +influxQL.usePointTimes() +``` + + + + +## Chaining Methods + +Chaining methods create a new node in the pipeline as a child of the calling node. +They do not modify the calling node. +Chaining methods are marked using the `|` operator. + + +### Alert + +Create an alert node, which can trigger alerts. + + +```js +influxQL|alert() +``` + +Returns: [AlertNode](/kapacitor/v1.5/nodes/alert_node/) + + + +### Barrier + +Create a new Barrier node that emits a BarrierMessage periodically. + +One BarrierMessage will be emitted every period duration. + + +```js +influxQL|barrier() +``` + +Returns: [BarrierNode](/kapacitor/v1.5/nodes/barrier_node/) + + + +### Bottom + +Select the bottom `num` points for `field` and sort by any extra tags or fields. + + +```js +influxQL|bottom(num int64, field string, fieldsAndTags ...string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### ChangeDetect + +Create a new node that only emits new points if different from the previous point. + +```js +influxQL|changeDetect(field string) +``` + +Returns: [ChangeDetectNode](/kapacitor/v1.5/nodes/change_detect_node/) + + + +### Combine + +Combine this node with itself. The data is combined on timestamp. + + +```js +influxQL|combine(expressions ...ast.LambdaNode) +``` + +Returns: [CombineNode](/kapacitor/v1.5/nodes/combine_node/) + + + +### Count + +Count the number of points. + + +```js +influxQL|count(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### CumulativeSum + +Compute a cumulative sum of each point that is received. +A point is emitted for every point collected. + + +```js +influxQL|cumulativeSum(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Deadman + +Helper function for creating an alert on low throughput, a.k.a. deadman's switch. + +- Threshold: trigger alert if throughput drops below or equal to threshold "<=" in points/interval. +- Interval: how often to check the throughput. +- Expressions: optional list of expressions to also evaluate. Useful for time of day alerting. + +Example: + + +```js + var data = stream + |from()... + // Trigger critical alert if the throughput drops below or equal to 100 points per 10s and checked every 10s. + data + |deadman(100.0, 10s) + //Do normal processing of data + data... +``` + +The above is equivalent to this example: + + +```js + var data = stream + |from()... + // Trigger critical alert if the throughput drops below or equal to 100 points per 10s and checked every 10s. + data + |stats(10s) + .align() + |derivative('emitted') + .unit(10s) + .nonNegative() + |alert() + .id('node \'stream0\' in task \'{{ .TaskName }}\'') + .message('{{ .ID }} is {{ if eq .Level "OK" }}alive{{ else }}dead{{ end }}: {{ index .Fields "emitted" | printf "%0.3f" }} points/10s.') + .crit(lambda: "emitted" <= 100.0) + //Do normal processing of data + data... +``` + +The `id` and `message` alert properties can be configured globally via the 'deadman' configuration section. + +Since the [AlertNode](/kapacitor/v1.5/nodes/alert_node/) is the last piece it can be further modified as usual. +Example: + + +```js + var data = stream + |from()... + // Trigger critical alert if the throughput drops below or equal to 100 points per 10s and checked every 10s. + data + |deadman(100.0, 10s) + .slack() + .channel('#dead_tasks') + //Do normal processing of data + data... +``` + +You can specify additional lambda expressions to further constrain when the deadman's switch is triggered. +Example: + + +```js + var data = stream + |from()... + // Trigger critical alert if the throughput drops below or equal to 100 points per 10s and checked every 10s. + // Only trigger the alert if the time of day is between 8am-5pm. + data + |deadman(100.0, 10s, lambda: hour("time") >= 8 AND hour("time") <= 17) + //Do normal processing of data + data... +``` + + + +```js +influxQL|deadman(threshold float64, interval time.Duration, expr ...ast.LambdaNode) +``` + +Returns: [AlertNode](/kapacitor/v1.5/nodes/alert_node/) + + + +### Default + +Create a node that can set defaults for missing tags or fields. + + +```js +influxQL|default() +``` + +Returns: [DefaultNode](/kapacitor/v1.5/nodes/default_node/) + + + +### Delete + +Create a node that can delete tags or fields. + + +```js +influxQL|delete() +``` + +Returns: [DeleteNode](/kapacitor/v1.5/nodes/delete_node/) + + + +### Derivative + +Create a new node that computes the derivative of adjacent points. + + +```js +influxQL|derivative(field string) +``` + +Returns: [DerivativeNode](/kapacitor/v1.5/nodes/derivative_node/) + + + +### Difference + +Compute the difference between points independent of elapsed time. + + +```js +influxQL|difference(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Distinct + +Produce batch of only the distinct points. + + +```js +influxQL|distinct(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Ec2Autoscale + +Create a node that can trigger autoscale events for a ec2 autoscalegroup. + + +```js +influxQL|ec2Autoscale() +``` + +Returns: [Ec2AutoscaleNode](/kapacitor/v1.5/nodes/ec2_autoscale_node/) + + + +### Elapsed + +Compute the elapsed time between points. + + +```js +influxQL|elapsed(field string, unit time.Duration) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Eval + +Create an eval node that will evaluate the given transformation function to each data point. +A list of expressions may be provided and will be evaluated in the order they are given. +The results are available to later expressions. + + +```js +influxQL|eval(expressions ...ast.LambdaNode) +``` + +Returns: [EvalNode](/kapacitor/v1.5/nodes/eval_node/) + + + +### First + +Select the first point. + + +```js +influxQL|first(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Flatten + +Flatten points with similar times into a single point. + + +```js +influxQL|flatten() +``` + +Returns: [FlattenNode](/kapacitor/v1.5/nodes/flatten_node/) + + + +### GroupBy + +Group the data by a set of tags. + +Can pass literal * to group by all dimensions. +Example: + + +```js + |groupBy(*) +``` + + + +```js +influxQL|groupBy(tag ...interface{}) +``` + +Returns: [GroupByNode](/kapacitor/v1.5/nodes/group_by_node/) + + + +### HoltWinters + +Compute the Holt-Winters (https://docs.influxdata.com/influxdb/latest/query_language/functions/#holt-winters) forecast of a data set. + + +```js +influxQL|holtWinters(field string, h int64, m int64, interval time.Duration) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### HoltWintersWithFit + +Compute the Holt-Winters (https://docs.influxdata.com/influxdb/latest/query_language/functions/#holt-winters) forecast of a data set. +This method also outputs all the points used to fit the data in addition to the forecasted data. + + +```js +influxQL|holtWintersWithFit(field string, h int64, m int64, interval time.Duration) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### HttpOut + +Create an HTTP output node that caches the most recent data it has received. +The cached data is available at the given endpoint. +The endpoint is the relative path from the API endpoint of the running task. +For example, if the task endpoint is at `/kapacitor/v1/tasks/` and endpoint is +`top10`, then the data can be requested from `/kapacitor/v1/tasks//top10`. + + +```js +influxQL|httpOut(endpoint string) +``` + +Returns: [HTTPOutNode](/kapacitor/v1.5/nodes/http_out_node/) + + + +### HttpPost + +Creates an HTTP Post node that POSTS received data to the provided HTTP endpoint. +HttpPost expects 0 or 1 arguments. If 0 arguments are provided, you must specify an +endpoint property method. + + +```js +influxQL|httpPost(url ...string) +``` + +Returns: [HTTPPostNode](/kapacitor/v1.5/nodes/http_post_node/) + + + +### InfluxDBOut + +Create an influxdb output node that will store the incoming data into InfluxDB. + + +```js +influxQL|influxDBOut() +``` + +Returns: [InfluxDBOutNode](/kapacitor/v1.5/nodes/influx_d_b_out_node/) + + + +### Join + +Join this node with other nodes. The data is joined on timestamp. + + +```js +influxQL|join(others ...Node) +``` + +Returns: [JoinNode](/kapacitor/v1.5/nodes/join_node/) + + + +### K8sAutoscale + +Create a node that can trigger autoscale events for a kubernetes cluster. + + +```js +influxQL|k8sAutoscale() +``` + +Returns: [K8sAutoscaleNode](/kapacitor/v1.5/nodes/k8s_autoscale_node/) + + + +### KapacitorLoopback + +Create an kapacitor loopback node that will send data back into Kapacitor as a stream. + + +```js +influxQL|kapacitorLoopback() +``` + +Returns: [KapacitorLoopbackNode](/kapacitor/v1.5/nodes/kapacitor_loopback_node/) + + + +### Last + +Select the last point. + + +```js +influxQL|last(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Log + +Create a node that logs all data it receives. + + +```js +influxQL|log() +``` + +Returns: [LogNode](/kapacitor/v1.5/nodes/log_node/) + + + +### Max + +Select the maximum point. + + +```js +influxQL|max(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Mean + +Compute the mean of the data. + + +```js +influxQL|mean(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Median + +Compute the median of the data. + +> **Note:** This method is not a selector. +If you want the median point, use `.percentile(field, 50.0)`. + + +```js +influxQL|median(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Min + +Select the minimum point. + + +```js +influxQL|min(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Mode + +Compute the mode of the data. + + +```js +influxQL|mode(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### MovingAverage + +Compute a moving average of the last window points. +No points are emitted until the window is full. + + +```js +influxQL|movingAverage(field string, window int64) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Percentile + +Select a point at the given percentile. This is a selector function, no interpolation between points is performed. + + +```js +influxQL|percentile(field string, percentile float64) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Sample + +Create a new node that samples the incoming points or batches. + +One point will be emitted every count or duration specified. + + +```js +influxQL|sample(rate interface{}) +``` + +Returns: [SampleNode](/kapacitor/v1.5/nodes/sample_node/) + + + +### Shift + +Create a new node that shifts the incoming points or batches in time. + + +```js +influxQL|shift(shift time.Duration) +``` + +Returns: [ShiftNode](/kapacitor/v1.5/nodes/shift_node/) + + + +### Sideload + +Create a node that can load data from external sources. + + +```js +influxQL|sideload() +``` + +Returns: [SideloadNode](/kapacitor/v1.5/nodes/sideload_node/) + + + +### Spread + +Compute the difference between `min` and `max` points. + + +```js +influxQL|spread(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### StateCount + +Create a node that tracks number of consecutive points in a given state. + + +```js +influxQL|stateCount(expression ast.LambdaNode) +``` + +Returns: [StateCountNode](/kapacitor/v1.5/nodes/state_count_node/) + + + +### StateDuration + +Create a node that tracks duration in a given state. + + +```js +influxQL|stateDuration(expression ast.LambdaNode) +``` + +Returns: [StateDurationNode](/kapacitor/v1.5/nodes/state_duration_node/) + + + +### Stats + +Create a new stream of data that contains the internal statistics of the node. +The interval represents how often to emit the statistics based on real time. +This means the interval time is independent of the times of the data points the source node is receiving. + + +```js +influxQL|stats(interval time.Duration) +``` + +Returns: [StatsNode](/kapacitor/v1.5/nodes/stats_node/) + + + +### Stddev + +Compute the standard deviation. + + +```js +influxQL|stddev(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Sum + +Compute the sum of all values. + + +```js +influxQL|sum(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### SwarmAutoscale + +Create a node that can trigger autoscale events for a Docker swarm cluster. + + +```js +influxQL|swarmAutoscale() +``` + +Returns: [SwarmAutoscaleNode](/kapacitor/v1.5/nodes/swarm_autoscale_node/) + + + +### Top + +Select the top `num` points for `field` and sort by any extra tags or fields. + + +```js +influxQL|top(num int64, field string, fieldsAndTags ...string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Union + +Perform the union of this node and all other given nodes. + + +```js +influxQL|union(node ...Node) +``` + +Returns: [UnionNode](/kapacitor/v1.5/nodes/union_node/) + + + +### Where + +Create a new node that filters the data stream by a given expression. + + +```js +influxQL|where(expression ast.LambdaNode) +``` + +Returns: [WhereNode](/kapacitor/v1.5/nodes/where_node/) + + + +### Window + +Create a new node that windows the stream by time. + +NOTE: Window can only be applied to stream edges. + + +```js +influxQL|window() +``` + +Returns: [WindowNode](/kapacitor/v1.5/nodes/window_node/) + + diff --git a/content/kapacitor/v1.5/nodes/join_node.md b/content/kapacitor/v1.5/nodes/join_node.md new file mode 100644 index 000000000..9e24e8573 --- /dev/null +++ b/content/kapacitor/v1.5/nodes/join_node.md @@ -0,0 +1,1159 @@ +--- +title: JoinNode +description: JoinNode joins the data from any number of nodes. As each data point is received from a parent node it is paired with the next data points from the other parent nodes with a matching timestamp. +note: Auto generated by tickdoc + +menu: + kapacitor_1_5_ref: + name: JoinNode + identifier: join_node + weight: 170 + parent: nodes +--- + +The `join` node joins data from any number of nodes. +As each data point is received from a parent node it is paired +with the next data points from the other parent nodes with a +matching timestamp. Each parent node contributes at most one point +to each joined point. A tolerance can be supplied to join points +that do not have perfectly aligned timestamps. +Any points that fall within the tolerance are joined on the timestamp. +If multiple points fall within the same tolerance window than they are joined in the order +they arrive. + +Aliases are used to prefix all fields from the respective nodes. + +The join can be an inner or outer join, see the [JoinNode.Fill](/kapacitor/v1.5/nodes/join_node/#fill) property. + +#### Example: Joining two measurements +In the example below, the `errors` and `requests` streams are joined +and transformed to calculate a combined field. + +```js +var errors = stream + |from() + .measurement('errors') +var requests = stream + |from() + .measurement('requests') +// Join the errors and requests streams +errors + |join(requests) + // Provide prefix names for the fields of the data points. + .as('errors', 'requests') + // points that are within 1 second are considered the same time. + .tolerance(1s) + // fill missing values with 0, implies outer join. + .fill(0.0) + // name the resulting stream + .streamName('error_rate') + // Both the "value" fields from each parent have been prefixed + // with the respective names 'errors' and 'requests'. + |eval(lambda: "errors.value" / "requests.value") + .as('rate') + ... +``` + +#### Example: Joining three or more measurements +In the example below, the `errors`, `missing_page_errors`, and `server_errors` are +joined and transformed to calculate two combined fields: `404_rate` and `500_rate`. + +```js +var errors = stream +|from() + .measurement('errors') + +var missing_page_errors = stream +|from() + .measurement('errors') + .where(lambda: "type" == '404') + +var server_errors = stream +|from() + .measurement('errors') + .where(lambda: "type" == '500') + +// Join the errors, missing_page_errors, and server_errors streams +errors + |join(missing_page_errors, server_errors) + // Provide prefix names for the fields of the data points. + .as('errors', '404', '500') + // points that are within 1 second are considered the same time. + .tolerance(1s) + // fill missing values with 0, implies outer join. + .fill(0.0) + // name the resulting stream + .streamName('error_rates') + // The "value" fields from each parent have been prefixed + // with the respective names 'errors', 'missing_page_errors', 'and server_errors'. + // Calculate the percentage of 404 errors + |eval(lambda: "404.value" / "errors.value") + .as('404_rate') + // Calculate the percentage of 500 errors + |eval(lambda: "500.value" / "errors.value") + .as('500_rate') + ... +``` + + + +### Constructor + +| Chaining Method | Description | +|:---------|:---------| +| **join ( `others` `...Node`)** | Join this node with other nodes. The data is joined on timestamp. | + +### Property Methods + +| Setters | Description | +|:---|:---| +| **[as](#as) ( `names` `...string`)** | Prefix names for all fields from the respective nodes. Each field from the parent nodes will be prefixed with the provided name and a `.`. See the example below. | +| **[delimiter](#delimiter) ( `value` `string`)** | The delimiter for the field name prefixes. Can be the empty string. | +| **[fill](#fill) ( `value` `interface{}`)** | Fill the data. The fill option implies the type of join: inner or full outer. | +| **[on](#on) ( `dims` `...string`)** | Join on a subset of the group by dimensions. This is a special case where you want a single point from one parent to join with multiple points from a different parent. | +| **[quiet](#quiet) ( )** | Suppress all error logging events from this node. | +| **[streamName](#streamname) ( `value` `string`)** | The name of this new joined data stream. If empty the name of the left parent is used. | +| **[tolerance](#tolerance) ( `value` `time.Duration`)** | The maximum duration of time that two incoming points can be apart and still be considered to be equal in time. The joined data point's time will be rounded to the nearest multiple of the tolerance duration. | + + + +### Chaining Methods +[Alert](#alert), +[Barrier](#barrier), +[Bottom](#bottom), +[ChangeDetect](#changedetect), +[Combine](#combine), +[Count](#count), +[CumulativeSum](#cumulativesum), +[Deadman](#deadman), +[Default](#default), +[Delete](#delete), +[Derivative](#derivative), +[Difference](#difference), +[Distinct](#distinct), +[Ec2Autoscale](#ec2autoscale), +[Elapsed](#elapsed), +[Eval](#eval), +[First](#first), +[Flatten](#flatten), +[GroupBy](#groupby), +[HoltWinters](#holtwinters), +[HoltWintersWithFit](#holtwinterswithfit), +[HttpOut](#httpout), +[HttpPost](#httppost), +[InfluxDBOut](#influxdbout), +[Join](#join), +[K8sAutoscale](#k8sautoscale), +[KapacitorLoopback](#kapacitorloopback), +[Last](#last), +[Log](#log), +[Max](#max), +[Mean](#mean), +[Median](#median), +[Min](#min), +[Mode](#mode), +[MovingAverage](#movingaverage), +[Percentile](#percentile), +[Sample](#sample), +[Shift](#shift), +[Sideload](#sideload), +[Spread](#spread), +[StateCount](#statecount), +[StateDuration](#stateduration), +[Stats](#stats), +[Stddev](#stddev), +[Sum](#sum), +[SwarmAutoscale](#swarmautoscale), +[Top](#top), +[Union](#union), +[Where](#where), +[Window](#window) + +--- + +## Properties + +Property methods modify state on the calling node. +They do not add another node to the pipeline, and always return a reference to the calling node. +Property methods are marked using the `.` operator. + + +### As + +Prefix names for all fields from the respective nodes. +Each field from the parent nodes will be prefixed with the provided name and a '.'. +See the example above. + +The names cannot have a dot '.' character. + + + +```js +join.as(names ...string) +``` + + + + +### Delimiter + +The delimiter for the field name prefixes. +Can be the empty string. + + +```js +join.delimiter(value string) +``` + + + + +### Fill + +Fill the data. +The fill option implies the type of join: inner or full outer. +Options are: + +- none - (default) skip rows where a point is missing, inner join. +- null - fill missing points with null, full outer join. +- Any numerical value - fill fields with given value, full outer join. + +> When using a numerical or null fill, the fields names are determined by copying +> the field names from another point. +> This doesn't work well when different sources have different field names. +> Use the [DefaultNode](/kapacitor/v1.5/nodes/default_node/) and [DeleteNode](/kapacitor/v1.5/nodes/delete_node/) +> to finalize the fill operation if necessary. + +```js +join.fill(value interface{}) +``` + +Example: + +```js + var maintlock = stream + |from() + .measurement('maintlock') + .groupBy('service') + var requests = stream + |from() + .measurement('requests') + .groupBy('service') + // Join the maintlock and requests streams + // The intent it to drop any points in maintenance mode. + maintlock + |join(requests) + // Provide prefix names for the fields of the data points. + .as('maintlock', 'requests') + // points that are within 1 second are considered the same time. + .tolerance(1s) + // fill missing fields with null, implies outer join. + // a better default per field will be set later. + .fill('null') + // name the resulting stream. + .streamName('requests') + |default() + // default maintenance mode to false, overwriting the null value if present. + .field('maintlock.mode', false) + // default the requests to 0, again overwriting the null value if present. + .field('requests.value', 0.0) + // drop any points that are in maintenance mode. + |where(lambda: "maintlock.mode") + |... +``` + +#### Handling null fill values in outer joins +When using Kapacitor to perform an outer join, it's important to set default values +for `null` fields resulting from the join and fill operations. +This is done using the [DefaultNode](/kapacitor/latest/nodes/default_node/), +which replaces null values for a specific field key with a specified default value. +Not doing so may result in invalid line protocol (as `null` isn't an appropriate +value for all field types) causing the join to fail. + +```js +source1 + |join(source2) + .as('source1', 'source2') + .fill('null') + |default() + // .field('field-key', default-value) + + // Define a default for an integer field type + .field('source1.rounded', 0) + // Define a default for a float field type + .field('source1.value', 0.0) + // Define a default for a string field type + .field('source2.location', '') + // Define a default for a boolean field type + .field('source2.maintenance', false) +``` + +> When using this method, you must know all fields and field types resulting from +> the join and provide the appropriate default values. + +You can also use the [DeleteNode](/kapacitor/v1.5/nodes/delete_node/) to remove +unnecessary fields or tags resulting from the join. + +```js +source1 + |join(source2) + .as('source1', 'source2') + .fill('null') + |default() + .field('source1.mode', false) + .field('source2.value', 0.0) + |delete() + .field('source1.anon') + .tag('host') +``` + + + + + + + +### On + +Join on a subset of the group by dimensions. +This is a special case where you want a single point from one parent to join with multiple +points from a different parent. + +For example given two measurements: + +1. building_power (a single value): tagged by building, value is the total power consumed by the building. +2. floor_power (multiple values): tagged by building and floor, values are the total power consumed by each floor. + +You want to calculate the percentage of the total building power consumed by each floor. +Since you only have one point per building you need it to join multiple times with +the points from each floor. By defining the `on` dimensions as `building` we are saying +that we want points that only have the building tag to be joined with more specific points that +more tags, in this case the `floor` tag. In other words while we have points with tags building and floor +we only want to join on the building tag. + +Example: + + +```js + var building = stream + |from() + .measurement('building_power') + .groupBy('building') + var floor = stream + |from() + .measurement('floor_power') + .groupBy('building', 'floor') + building + |join(floor) + .as('building', 'floor') + .on('building') + |eval(lambda: "floor.value" / "building.value") + ... // Values here are grouped by 'building' and 'floor' +``` + + + +```js +join.on(dims ...string) +``` + + + + +### Quiet + +Suppress all error logging events from this node. + +```js +join.quiet() +``` + + + + +### StreamName + +The name of this new joined data stream. +If empty the name of the left parent is used. + + +```js +join.streamName(value string) +``` + + + + +### Tolerance + +The maximum duration of time that two incoming points +can be apart and still be considered to be equal in time. +The joined data point's time will be rounded to the nearest +multiple of the tolerance duration. + + +```js +join.tolerance(value time.Duration) +``` + + + + +## Chaining Methods + +Chaining methods create a new node in the pipeline as a child of the calling node. +They do not modify the calling node. +Chaining methods are marked using the `|` operator. + + +### Alert + +Create an alert node, which can trigger alerts. + + +```js +join|alert() +``` + +Returns: [AlertNode](/kapacitor/v1.5/nodes/alert_node/) + + + +### Barrier + +Create a new Barrier node that emits a BarrierMessage periodically. + +One BarrierMessage will be emitted every period duration. + + +```js +join|barrier() +``` + +Returns: [BarrierNode](/kapacitor/v1.5/nodes/barrier_node/) + + + +### Bottom + +Select the bottom `num` points for `field` and sort by any extra tags or fields. + + +```js +join|bottom(num int64, field string, fieldsAndTags ...string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### ChangeDetect + +Create a new node that only emits new points if different from the previous point.. + +```js +join|changeDetect(field string) +``` + +Returns: [ChangeDetectNode](/kapacitor/v1.5/nodes/change_detect_node/) + + + +### Combine + +Combine this node with itself. The data is combined on timestamp. + + +```js +join|combine(expressions ...ast.LambdaNode) +``` + +Returns: [CombineNode](/kapacitor/v1.5/nodes/combine_node/) + + + +### Count + +Count the number of points. + + +```js +join|count(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### CumulativeSum + +Compute a cumulative sum of each point that is received. +A point is emitted for every point collected. + + +```js +join|cumulativeSum(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Deadman + +Helper function for creating an alert on low throughput, a.k.a. deadman's switch. + +- Threshold: trigger alert if throughput drops below threshold in points/interval. +- Interval: how often to check the throughput. +- Expressions: optional list of expressions to also evaluate. Useful for time of day alerting. + +Example: + + +```js + var data = stream + |from()... + // Trigger critical alert if the throughput drops below 100 points per 10s and checked every 10s. + data + |deadman(100.0, 10s) + //Do normal processing of data + data... +``` + +The above is equivalent to this example: + + +```js + var data = stream + |from()... + // Trigger critical alert if the throughput drops below 100 points per 10s and checked every 10s. + data + |stats(10s) + .align() + |derivative('emitted') + .unit(10s) + .nonNegative() + |alert() + .id('node \'stream0\' in task \'{{ .TaskName }}\'') + .message('{{ .ID }} is {{ if eq .Level "OK" }}alive{{ else }}dead{{ end }}: {{ index .Fields "emitted" | printf "%0.3f" }} points/10s.') + .crit(lambda: "emitted" <= 100.0) + //Do normal processing of data + data... +``` + +The `id` and `message` alert properties can be configured globally via the 'deadman' configuration section. + +Since the [AlertNode](/kapacitor/v1.5/nodes/alert_node/) is the last piece it can be further modified as usual. +Example: + + +```js + var data = stream + |from()... + // Trigger critical alert if the throughput drops below 100 points per 10s and checked every 10s. + data + |deadman(100.0, 10s) + .slack() + .channel('#dead_tasks') + //Do normal processing of data + data... +``` + +You can specify additional lambda expressions to further constrain when the deadman's switch is triggered. +Example: + + +```js + var data = stream + |from()... + // Trigger critical alert if the throughput drops below 100 points per 10s and checked every 10s. + // Only trigger the alert if the time of day is between 8am-5pm. + data + |deadman(100.0, 10s, lambda: hour("time") >= 8 AND hour("time") <= 17) + //Do normal processing of data + data... +``` + + + +```js +join|deadman(threshold float64, interval time.Duration, expr ...ast.LambdaNode) +``` + +Returns: [AlertNode](/kapacitor/v1.5/nodes/alert_node/) + + + +### Default + +Create a node that can set defaults for missing tags or fields. + + +```js +join|default() +``` + +Returns: [DefaultNode](/kapacitor/v1.5/nodes/default_node/) + + + +### Delete + +Create a node that can delete tags or fields. + + +```js +join|delete() +``` + +Returns: [DeleteNode](/kapacitor/v1.5/nodes/delete_node/) + + + +### Derivative + +Create a new node that computes the derivative of adjacent points. + + +```js +join|derivative(field string) +``` + +Returns: [DerivativeNode](/kapacitor/v1.5/nodes/derivative_node/) + + + +### Difference + +Compute the difference between points independent of elapsed time. + + +```js +join|difference(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Distinct + +Produce batch of only the distinct points. + + +```js +join|distinct(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Ec2Autoscale + +Create a node that can trigger autoscale events for a ec2 autoscalegroup. + + +```js +join|ec2Autoscale() +``` + +Returns: [Ec2AutoscaleNode](/kapacitor/v1.5/nodes/ec2_autoscale_node/) + + + +### Elapsed + +Compute the elapsed time between points. + + +```js +join|elapsed(field string, unit time.Duration) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Eval + +Create an eval node that will evaluate the given transformation function to each data point. +A list of expressions may be provided and will be evaluated in the order they are given. +The results are available to later expressions. + + +```js +join|eval(expressions ...ast.LambdaNode) +``` + +Returns: [EvalNode](/kapacitor/v1.5/nodes/eval_node/) + + + +### First + +Select the first point. + + +```js +join|first(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Flatten + +Flatten points with similar times into a single point. + + +```js +join|flatten() +``` + +Returns: [FlattenNode](/kapacitor/v1.5/nodes/flatten_node/) + + + +### GroupBy + +Group the data by a set of tags. + +Can pass literal * to group by all dimensions. +Example: + + +```js + |groupBy(*) +``` + + + +```js +join|groupBy(tag ...interface{}) +``` + +Returns: [GroupByNode](/kapacitor/v1.5/nodes/group_by_node/) + + + +### HoltWinters + +Compute the Holt-Winters (https://docs.influxdata.com/influxdb/latest/query_language/functions/#holt-winters) forecast of a data set. + + +```js +join|holtWinters(field string, h int64, m int64, interval time.Duration) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### HoltWintersWithFit + +Compute the Holt-Winters (https://docs.influxdata.com/influxdb/latest/query_language/functions/#holt-winters) forecast of a data set. +This method also outputs all the points used to fit the data in addition to the forecasted data. + + +```js +join|holtWintersWithFit(field string, h int64, m int64, interval time.Duration) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### HttpOut + +Create an HTTP output node that caches the most recent data it has received. +The cached data is available at the given endpoint. +The endpoint is the relative path from the API endpoint of the running task. +For example, if the task endpoint is at `/kapacitor/v1/tasks/` and endpoint is +`top10`, then the data can be requested from `/kapacitor/v1/tasks//top10`. + + +```js +join|httpOut(endpoint string) +``` + +Returns: [HTTPOutNode](/kapacitor/v1.5/nodes/http_out_node/) + + + +### HttpPost + +Creates an HTTP Post node that POSTS received data to the provided HTTP endpoint. +HttpPost expects 0 or 1 arguments. If 0 arguments are provided, you must specify an +endpoint property method. + + +```js +join|httpPost(url ...string) +``` + +Returns: [HTTPPostNode](/kapacitor/v1.5/nodes/http_post_node/) + + + +### InfluxDBOut + +Create an influxdb output node that will store the incoming data into InfluxDB. + + +```js +join|influxDBOut() +``` + +Returns: [InfluxDBOutNode](/kapacitor/v1.5/nodes/influx_d_b_out_node/) + + + +### Join + +Join this node with other nodes. The data is joined on timestamp. + + +```js +join|join(others ...Node) +``` + +Returns: [JoinNode](/kapacitor/v1.5/nodes/join_node/) + + + +### K8sAutoscale + +Create a node that can trigger autoscale events for a kubernetes cluster. + + +```js +join|k8sAutoscale() +``` + +Returns: [K8sAutoscaleNode](/kapacitor/v1.5/nodes/k8s_autoscale_node/) + + + +### KapacitorLoopback + +Create an kapacitor loopback node that will send data back into Kapacitor as a stream. + + +```js +join|kapacitorLoopback() +``` + +Returns: [KapacitorLoopbackNode](/kapacitor/v1.5/nodes/kapacitor_loopback_node/) + + + +### Last + +Select the last point. + + +```js +join|last(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Log + +Create a node that logs all data it receives. + + +```js +join|log() +``` + +Returns: [LogNode](/kapacitor/v1.5/nodes/log_node/) + + + +### Max + +Select the maximum point. + + +```js +join|max(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Mean + +Compute the mean of the data. + + +```js +join|mean(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Median + +Compute the median of the data. + +> **Note:** This method is not a selector. +If you want the median point, use `.percentile(field, 50.0)`. + + +```js +join|median(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Min + +Select the minimum point. + + +```js +join|min(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Mode + +Compute the mode of the data. + + +```js +join|mode(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### MovingAverage + +Compute a moving average of the last window points. +No points are emitted until the window is full. + + +```js +join|movingAverage(field string, window int64) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Percentile + +Select a point at the given percentile. This is a selector function, no interpolation between points is performed. + + +```js +join|percentile(field string, percentile float64) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Sample + +Create a new node that samples the incoming points or batches. + +One point will be emitted every count or duration specified. + + +```js +join|sample(rate interface{}) +``` + +Returns: [SampleNode](/kapacitor/v1.5/nodes/sample_node/) + + + +### Shift + +Create a new node that shifts the incoming points or batches in time. + + +```js +join|shift(shift time.Duration) +``` + +Returns: [ShiftNode](/kapacitor/v1.5/nodes/shift_node/) + + + +### Sideload + +Create a node that can load data from external sources. + + +```js +join|sideload() +``` + +Returns: [SideloadNode](/kapacitor/v1.5/nodes/sideload_node/) + + + +### Spread + +Compute the difference between `min` and `max` points. + + +```js +join|spread(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### StateCount + +Create a node that tracks number of consecutive points in a given state. + + +```js +join|stateCount(expression ast.LambdaNode) +``` + +Returns: [StateCountNode](/kapacitor/v1.5/nodes/state_count_node/) + + + +### StateDuration + +Create a node that tracks duration in a given state. + + +```js +join|stateDuration(expression ast.LambdaNode) +``` + +Returns: [StateDurationNode](/kapacitor/v1.5/nodes/state_duration_node/) + + + +### Stats + +Create a new stream of data that contains the internal statistics of the node. +The interval represents how often to emit the statistics based on real time. +This means the interval time is independent of the times of the data points the source node is receiving. + + +```js +join|stats(interval time.Duration) +``` + +Returns: [StatsNode](/kapacitor/v1.5/nodes/stats_node/) + + + +### Stddev + +Compute the standard deviation. + + +```js +join|stddev(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Sum + +Compute the sum of all values. + + +```js +join|sum(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### SwarmAutoscale + +Create a node that can trigger autoscale events for a Docker swarm cluster. + + +```js +join|swarmAutoscale() +``` + +Returns: [SwarmAutoscaleNode](/kapacitor/v1.5/nodes/swarm_autoscale_node/) + + + +### Top + +Select the top `num` points for `field` and sort by any extra tags or fields. + + +```js +join|top(num int64, field string, fieldsAndTags ...string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Union + +Perform the union of this node and all other given nodes. + + +```js +join|union(node ...Node) +``` + +Returns: [UnionNode](/kapacitor/v1.5/nodes/union_node/) + + + +### Where + +Create a new node that filters the data stream by a given expression. + + +```js +join|where(expression ast.LambdaNode) +``` + +Returns: [WhereNode](/kapacitor/v1.5/nodes/where_node/) + + + +### Window + +Create a new node that windows the stream by time. + +NOTE: Window can only be applied to stream edges. + + +```js +join|window() +``` + +Returns: [WindowNode](/kapacitor/v1.5/nodes/window_node/) + + diff --git a/content/kapacitor/v1.5/nodes/k8s_autoscale_node.md b/content/kapacitor/v1.5/nodes/k8s_autoscale_node.md new file mode 100644 index 000000000..8831df1b5 --- /dev/null +++ b/content/kapacitor/v1.5/nodes/k8s_autoscale_node.md @@ -0,0 +1,1101 @@ +--- +title: K8sAutoscaleNode +description: K8sAutoscaleNode triggers autoscale events for a resource on a Kubernetes cluster and outputs points for the triggered events. +note: Auto generated by tickdoc + +menu: + kapacitor_1_5_ref: + name: K8sAutoscaleNode + identifier: k8s_autoscale_node + weight: 180 + parent: nodes +--- + +The `k8sAutoscale` node triggers autoscale events for a resource on a Kubernetes cluster. +The node also outputs points for the triggered events. + +Example: + + +```js +// Target 100 requests per second per host +var target = 100.0 +var min = 1 +var max = 100 +var period = 5m +var every = period +stream + |from() + .measurement('requests') + .groupBy('host', 'deployment') + .truncate(1s) + |derivative('value') + .as('requests_per_second') + .unit(1s) + .nonNegative() + |groupBy('deployment') + |sum('requests_per_second') + .as('total_requests') + |window() + .period(period) + .every(every) + |mean('total_requests') + .as('total_requests') + |k8sAutoscale() + // Get the name of the deployment from the 'deployment' tag. + .resourceNameTag('deployment') + .min(min) + .max(max) + // Set the desired number of replicas based on target. + .replicas(lambda: int(ceil("total_requests" / target))) + |influxDBOut() + .database('deployments') + .measurement('scale_events') + .precision('s') +``` + + +The above example computes the requests per second by deployment and host. +Then the total_requests per second across all hosts is computed per deployment. +Using the mean of the total_requests over the last time period a desired number of replicas is computed +based on the target number of request per second per host. + +If the desired number of replicas has changed, Kapacitor makes the appropriate API call to Kubernetes +to update the replicas spec. + +Any time the k8sAutoscale node changes a replica count, it emits a point. +The point is tagged with the namespace, kind and resource name, +using the NamespaceTag, KindTag, and ResourceTag properties respectively. +In addition the group by tags will be preserved on the emitted point. +The point contains two fields: `old`, and `new` representing change in the replicas. + +Available Statistics: + +* increase_events: number of times the replica count was increased. +* decrease_events: number of times the replica count was decreased. +* cooldown_drops: number of times an event was dropped because of a cooldown timer. +* errors: number of errors encountered, typically related to communicating with the Kubernetes API. + + +### Constructor + +| Chaining Method | Description | +|:---------|:---------| +| **k8sAutoscale ( )** | Create a node that can trigger autoscale events for a kubernetes cluster. | + +### Property Methods + +| Setters | Description | +|:---|:---| +| **[cluster](#cluster) ( `value` `string`)** | Cluster is the name of the Kubernetes cluster to use. | +| **[currentField](#currentfield) ( `value` `string`)** | CurrentField is the name of a field into which the current replica count will be set as an int. If empty no field will be set. Useful for computing deltas on the current state. | +| **[decreaseCooldown](#decreasecooldown) ( `value` `time.Duration`)** | Only one decrease event can be triggered per resource every DecreaseCooldown interval. | +| **[increaseCooldown](#increasecooldown) ( `value` `time.Duration`)** | Only one increase event can be triggered per resource every IncreaseCooldown interval. | +| **[kind](#kind) ( `value` `string`)** | Kind is the type of resources to autoscale. Currently only "deployments", "replicasets" and "replicationcontrollers" are supported. Default: "deployments" | +| **[kindTag](#kindtag) ( `value` `string`)** | KindTag is the name of a tag to use when tagging emitted points with the kind. If empty the point will not be tagged with the resource. Default: kind | +| **[max](#max) ( `value` `int64`)** | The maximum scale factor to set. If 0 then there is no upper limit. Default: 0, a.k.a no limit. | +| **[min](#min) ( `value` `int64`)** | The minimum scale factor to set. Default: 1 | +| **[namespace](#namespace) ( `value` `string`)** | Namespace is the namespace of the resource, if empty the default namespace will be used. | +| **[namespaceTag](#namespacetag) ( `value` `string`)** | NamespaceTag is the name of a tag to use when tagging emitted points with the namespace. If empty the point will not be tagged with the resource. Default: namespace | +| **[quiet](#quiet) ( )** | Suppress all error logging events from this node. | +| **[replicas](#replicas) ( `value` `ast.LambdaNode`)** | Replicas is a lambda expression that should evaluate to the desired number of replicas for the resource. | +| **[resourceName](#resourcename) ( `value` `string`)** | ResourceName is the name of the resource to autoscale. | +| **[resourceNameTag](#resourcenametag) ( `value` `string`)** | ResourceNameTag is the name of a tag that names the resource to autoscale. | +| **[resourceTag](#resourcetag) ( `value` `string`)** | ResourceTag is the name of a tag to use when tagging emitted points the resource. If empty the point will not be tagged with the resource. Default: resource | + + + +### Chaining Methods +[Alert](#alert), +[Barrier](#barrier), +[Bottom](#bottom), +[ChangeDetect](#changedetect), +[Combine](#combine), +[Count](#count), +[CumulativeSum](#cumulativesum), +[Deadman](#deadman), +[Default](#default), +[Delete](#delete), +[Derivative](#derivative), +[Difference](#difference), +[Distinct](#distinct), +[Ec2Autoscale](#ec2autoscale), +[Elapsed](#elapsed), +[Eval](#eval), +[First](#first), +[Flatten](#flatten), +[GroupBy](#groupby), +[HoltWinters](#holtwinters), +[HoltWintersWithFit](#holtwinterswithfit), +[HttpOut](#httpout), +[HttpPost](#httppost), +[InfluxDBOut](#influxdbout), +[Join](#join), +[K8sAutoscale](#k8sautoscale), +[KapacitorLoopback](#kapacitorloopback), +[Last](#last), +[Log](#log), +[Mean](#mean), +[Median](#median), +[Mode](#mode), +[MovingAverage](#movingaverage), +[Percentile](#percentile), +[Sample](#sample), +[Shift](#shift), +[Sideload](#sideload), +[Spread](#spread), +[StateCount](#statecount), +[StateDuration](#stateduration), +[Stats](#stats), +[Stddev](#stddev), +[Sum](#sum), +[SwarmAutoscale](#swarmautoscale), +[Top](#top), +[Union](#union), +[Where](#where), +[Window](#window) + +--- + +## Properties + +Property methods modify state on the calling node. +They do not add another node to the pipeline, and always return a reference to the calling node. +Property methods are marked using the `.` operator. + + +### Cluster + +Cluster is the name of the Kubernetes cluster to use. + + +```js +k8sAutoscale.cluster(value string) +``` + + + + +### CurrentField + +CurrentField is the name of a field into which the current replica count will be set as an int. +If empty no field will be set. +Useful for computing deltas on the current state. + +Example: + + +```js + |k8sAutoscale() + .currentField('replicas') + // Increase the replicas by 1 if the qps is over the threshold + .replicas(lambda: if("qps" > threshold, "replicas" + 1, "replicas")) +``` + + + +```js +k8sAutoscale.currentField(value string) +``` + + + + +### DecreaseCooldown + +Only one decrease event can be triggered per resource every DecreaseCooldown interval. + + +```js +k8sAutoscale.decreaseCooldown(value time.Duration) +``` + + + + +### IncreaseCooldown + +Only one increase event can be triggered per resource every IncreaseCooldown interval. + + +```js +k8sAutoscale.increaseCooldown(value time.Duration) +``` + + + + +### Kind + +Kind is the type of resources to autoscale. +Currently only "deployments", "replicasets" and "replicationcontrollers" are supported. +Default: "deployments" + + +```js +k8sAutoscale.kind(value string) +``` + + + + +### KindTag + +KindTag is the name of a tag to use when tagging emitted points with the kind. +If empty the point will not be tagged with the resource. +Default: kind + + +```js +k8sAutoscale.kindTag(value string) +``` + + + + +### Max + +The maximum scale factor to set. +If 0 then there is no upper limit. +Default: 0, a.k.a no limit. + + +```js +k8sAutoscale.max(value int64) +``` + + + + +### Min + +The minimum scale factor to set. +Default: 1 + + +```js +k8sAutoscale.min(value int64) +``` + + + + +### Namespace + +Namespace is the namespace of the resource, if empty the default namespace will be used. + + +```js +k8sAutoscale.namespace(value string) +``` + + + + +### NamespaceTag + +NamespaceTag is the name of a tag to use when tagging emitted points with the namespace. +If empty the point will not be tagged with the resource. +Default: namespace + + +```js +k8sAutoscale.namespaceTag(value string) +``` + + + + +### Quiet + +Suppress all error logging events from this node. + +```js +k8sAutoscale.quiet() +``` + + + + +### Replicas + +Replicas is a lambda expression that should evaluate to the desired number of replicas for the resource. + + +```js +k8sAutoscale.replicas(value ast.LambdaNode) +``` + + + + +### ResourceName + +ResourceName is the name of the resource to autoscale. + + +```js +k8sAutoscale.resourceName(value string) +``` + + + + +### ResourceNameTag + +ResourceNameTag is the name of a tag that names the resource to autoscale. + + +```js +k8sAutoscale.resourceNameTag(value string) +``` + + + + +### ResourceTag + +ResourceTag is the name of a tag to use when tagging emitted points the resource. +If empty the point will not be tagged with the resource. +Default: resource + + +```js +k8sAutoscale.resourceTag(value string) +``` + + + + +## Chaining Methods + +Chaining methods create a new node in the pipeline as a child of the calling node. +They do not modify the calling node. +Chaining methods are marked using the `|` operator. + + +### Alert + +Create an alert node, which can trigger alerts. + + +```js +k8sAutoscale|alert() +``` + +Returns: [AlertNode](/kapacitor/v1.5/nodes/alert_node/) + + + +### Barrier + +Create a new Barrier node that emits a BarrierMessage periodically. + +One BarrierMessage will be emitted every period duration. + + +```js +k8sAutoscale|barrier() +``` + +Returns: [BarrierNode](/kapacitor/v1.5/nodes/barrier_node/) + + + +### Bottom + +Select the bottom `num` points for `field` and sort by any extra tags or fields. + + +```js +k8sAutoscale|bottom(num int64, field string, fieldsAndTags ...string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### ChangeDetect + +Create a new node that only emits new points if different from the previous point. + +```js +k8sAutoscale|changeDetect(field string) +``` + +Returns: [ChangeDetectNode](/kapacitor/v1.5/nodes/change_detect_node/) + + + +### Combine + +Combine this node with itself. The data is combined on timestamp. + + +```js +k8sAutoscale|combine(expressions ...ast.LambdaNode) +``` + +Returns: [CombineNode](/kapacitor/v1.5/nodes/combine_node/) + + + +### Count + +Count the number of points. + + +```js +k8sAutoscale|count(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### CumulativeSum + +Compute a cumulative sum of each point that is received. +A point is emitted for every point collected. + + +```js +k8sAutoscale|cumulativeSum(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Deadman + +Helper function for creating an alert on low throughput, a.k.a. deadman's switch. + +- Threshold: trigger alert if throughput drops below threshold in points/interval. +- Interval: how often to check the throughput. +- Expressions: optional list of expressions to also evaluate. Useful for time of day alerting. + +Example: + + +```js + var data = stream + |from()... + // Trigger critical alert if the throughput drops below 100 points per 10s and checked every 10s. + data + |deadman(100.0, 10s) + //Do normal processing of data + data... +``` + +The above is equivalent to this example: + + +```js + var data = stream + |from()... + // Trigger critical alert if the throughput drops below 100 points per 10s and checked every 10s. + data + |stats(10s) + .align() + |derivative('emitted') + .unit(10s) + .nonNegative() + |alert() + .id('node \'stream0\' in task \'{{ .TaskName }}\'') + .message('{{ .ID }} is {{ if eq .Level "OK" }}alive{{ else }}dead{{ end }}: {{ index .Fields "emitted" | printf "%0.3f" }} points/10s.') + .crit(lambda: "emitted" <= 100.0) + //Do normal processing of data + data... +``` + +The `id` and `message` alert properties can be configured globally via the 'deadman' configuration section. + +Since the [AlertNode](/kapacitor/v1.5/nodes/alert_node/) is the last piece it can be further modified as usual. +Example: + + +```js + var data = stream + |from()... + // Trigger critical alert if the throughput drops below 100 points per 10s and checked every 10s. + data + |deadman(100.0, 10s) + .slack() + .channel('#dead_tasks') + //Do normal processing of data + data... +``` + +You can specify additional lambda expressions to further constrain when the deadman's switch is triggered. +Example: + + +```js + var data = stream + |from()... + // Trigger critical alert if the throughput drops below 100 points per 10s and checked every 10s. + // Only trigger the alert if the time of day is between 8am-5pm. + data + |deadman(100.0, 10s, lambda: hour("time") >= 8 AND hour("time") <= 17) + //Do normal processing of data + data... +``` + + + +```js +k8sAutoscale|deadman(threshold float64, interval time.Duration, expr ...ast.LambdaNode) +``` + +Returns: [AlertNode](/kapacitor/v1.5/nodes/alert_node/) + + + +### Default + +Create a node that can set defaults for missing tags or fields. + + +```js +k8sAutoscale|default() +``` + +Returns: [DefaultNode](/kapacitor/v1.5/nodes/default_node/) + + + +### Delete + +Create a node that can delete tags or fields. + + +```js +k8sAutoscale|delete() +``` + +Returns: [DeleteNode](/kapacitor/v1.5/nodes/delete_node/) + + + +### Derivative + +Create a new node that computes the derivative of adjacent points. + + +```js +k8sAutoscale|derivative(field string) +``` + +Returns: [DerivativeNode](/kapacitor/v1.5/nodes/derivative_node/) + + + +### Difference + +Compute the difference between points independent of elapsed time. + + +```js +k8sAutoscale|difference(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Distinct + +Produce batch of only the distinct points. + + +```js +k8sAutoscale|distinct(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Ec2Autoscale + +Create a node that can trigger autoscale events for a ec2 autoscalegroup. + + +```js +k8sAutoscale|ec2Autoscale() +``` + +Returns: [Ec2AutoscaleNode](/kapacitor/v1.5/nodes/ec2_autoscale_node/) + + + +### Elapsed + +Compute the elapsed time between points. + + +```js +k8sAutoscale|elapsed(field string, unit time.Duration) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Eval + +Create an eval node that will evaluate the given transformation function to each data point. +A list of expressions may be provided and will be evaluated in the order they are given. +The results are available to later expressions. + + +```js +k8sAutoscale|eval(expressions ...ast.LambdaNode) +``` + +Returns: [EvalNode](/kapacitor/v1.5/nodes/eval_node/) + + + +### First + +Select the first point. + + +```js +k8sAutoscale|first(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Flatten + +Flatten points with similar times into a single point. + + +```js +k8sAutoscale|flatten() +``` + +Returns: [FlattenNode](/kapacitor/v1.5/nodes/flatten_node/) + + + +### GroupBy + +Group the data by a set of tags. + +Can pass literal * to group by all dimensions. +Example: + + +```js + |groupBy(*) +``` + + + +```js +k8sAutoscale|groupBy(tag ...interface{}) +``` + +Returns: [GroupByNode](/kapacitor/v1.5/nodes/group_by_node/) + + + +### HoltWinters + +Compute the Holt-Winters (https://docs.influxdata.com/influxdb/latest/query_language/functions/#holt-winters) forecast of a data set. + + +```js +k8sAutoscale|holtWinters(field string, h int64, m int64, interval time.Duration) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### HoltWintersWithFit + +Compute the Holt-Winters (https://docs.influxdata.com/influxdb/latest/query_language/functions/#holt-winters) forecast of a data set. +This method also outputs all the points used to fit the data in addition to the forecasted data. + + +```js +k8sAutoscale|holtWintersWithFit(field string, h int64, m int64, interval time.Duration) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### HttpOut + +Create an HTTP output node that caches the most recent data it has received. +The cached data is available at the given endpoint. +The endpoint is the relative path from the API endpoint of the running task. +For example, if the task endpoint is at `/kapacitor/v1/tasks/` and endpoint is +`top10`, then the data can be requested from `/kapacitor/v1/tasks//top10`. + + +```js +k8sAutoscale|httpOut(endpoint string) +``` + +Returns: [HTTPOutNode](/kapacitor/v1.5/nodes/http_out_node/) + + + +### HttpPost + +Creates an HTTP Post node that POSTS received data to the provided HTTP endpoint. +HttpPost expects 0 or 1 arguments. If 0 arguments are provided, you must specify an +endpoint property method. + + +```js +k8sAutoscale|httpPost(url ...string) +``` + +Returns: [HTTPPostNode](/kapacitor/v1.5/nodes/http_post_node/) + + + +### InfluxDBOut + +Create an influxdb output node that will store the incoming data into InfluxDB. + + +```js +k8sAutoscale|influxDBOut() +``` + +Returns: [InfluxDBOutNode](/kapacitor/v1.5/nodes/influx_d_b_out_node/) + + + +### Join + +Join this node with other nodes. The data is joined on timestamp. + + +```js +k8sAutoscale|join(others ...Node) +``` + +Returns: [JoinNode](/kapacitor/v1.5/nodes/join_node/) + + + +### K8sAutoscale + +Create a node that can trigger autoscale events for a kubernetes cluster. + + +```js +k8sAutoscale|k8sAutoscale() +``` + +Returns: [K8sAutoscaleNode](/kapacitor/v1.5/nodes/k8s_autoscale_node/) + + + +### KapacitorLoopback + +Create an kapacitor loopback node that will send data back into Kapacitor as a stream. + + +```js +k8sAutoscale|kapacitorLoopback() +``` + +Returns: [KapacitorLoopbackNode](/kapacitor/v1.5/nodes/kapacitor_loopback_node/) + + + +### Last + +Select the last point. + + +```js +k8sAutoscale|last(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Log + +Create a node that logs all data it receives. + + +```js +k8sAutoscale|log() +``` + +Returns: [LogNode](/kapacitor/v1.5/nodes/log_node/) + + + +### Mean + +Compute the mean of the data. + + +```js +k8sAutoscale|mean(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Median + +Compute the median of the data. + +> **Note:** This method is not a selector. +If you want the median point, use `.percentile(field, 50.0)`. + + +```js +k8sAutoscale|median(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Mode + +Compute the mode of the data. + + +```js +k8sAutoscale|mode(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### MovingAverage + +Compute a moving average of the last window points. +No points are emitted until the window is full. + + +```js +k8sAutoscale|movingAverage(field string, window int64) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Percentile + +Select a point at the given percentile. This is a selector function, no interpolation between points is performed. + + +```js +k8sAutoscale|percentile(field string, percentile float64) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Sample + +Create a new node that samples the incoming points or batches. + +One point will be emitted every count or duration specified. + + +```js +k8sAutoscale|sample(rate interface{}) +``` + +Returns: [SampleNode](/kapacitor/v1.5/nodes/sample_node/) + + + +### Shift + +Create a new node that shifts the incoming points or batches in time. + + +```js +k8sAutoscale|shift(shift time.Duration) +``` + +Returns: [ShiftNode](/kapacitor/v1.5/nodes/shift_node/) + + + +### Sideload + +Create a node that can load data from external sources. + + +```js +k8sAutoscale|sideload() +``` + +Returns: [SideloadNode](/kapacitor/v1.5/nodes/sideload_node/) + + + +### Spread + +Compute the difference between `min` and `max` points. + + +```js +k8sAutoscale|spread(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### StateCount + +Create a node that tracks number of consecutive points in a given state. + + +```js +k8sAutoscale|stateCount(expression ast.LambdaNode) +``` + +Returns: [StateCountNode](/kapacitor/v1.5/nodes/state_count_node/) + + + +### StateDuration + +Create a node that tracks duration in a given state. + + +```js +k8sAutoscale|stateDuration(expression ast.LambdaNode) +``` + +Returns: [StateDurationNode](/kapacitor/v1.5/nodes/state_duration_node/) + + + +### Stats + +Create a new stream of data that contains the internal statistics of the node. +The interval represents how often to emit the statistics based on real time. +This means the interval time is independent of the times of the data points the source node is receiving. + + +```js +k8sAutoscale|stats(interval time.Duration) +``` + +Returns: [StatsNode](/kapacitor/v1.5/nodes/stats_node/) + + + +### Stddev + +Compute the standard deviation. + + +```js +k8sAutoscale|stddev(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Sum + +Compute the sum of all values. + + +```js +k8sAutoscale|sum(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### SwarmAutoscale + +Create a node that can trigger autoscale events for a Docker swarm cluster. + + +```js +k8sAutoscale|swarmAutoscale() +``` + +Returns: [SwarmAutoscaleNode](/kapacitor/v1.5/nodes/swarm_autoscale_node/) + + + +### Top + +Select the top `num` points for `field` and sort by any extra tags or fields. + + +```js +k8sAutoscale|top(num int64, field string, fieldsAndTags ...string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Union + +Perform the union of this node and all other given nodes. + + +```js +k8sAutoscale|union(node ...Node) +``` + +Returns: [UnionNode](/kapacitor/v1.5/nodes/union_node/) + + + +### Where + +Create a new node that filters the data stream by a given expression. + + +```js +k8sAutoscale|where(expression ast.LambdaNode) +``` + +Returns: [WhereNode](/kapacitor/v1.5/nodes/where_node/) + + + +### Window + +Create a new node that windows the stream by time. + +NOTE: Window can only be applied to stream edges. + + +```js +k8sAutoscale|window() +``` + +Returns: [WindowNode](/kapacitor/v1.5/nodes/window_node/) + + diff --git a/content/kapacitor/v1.5/nodes/kapacitor_loopback_node.md b/content/kapacitor/v1.5/nodes/kapacitor_loopback_node.md new file mode 100644 index 000000000..7ca9a4ff7 --- /dev/null +++ b/content/kapacitor/v1.5/nodes/kapacitor_loopback_node.md @@ -0,0 +1,255 @@ +--- +title: KapacitorLoopbackNode +description: KapacitorLoopbackNode writes data back into the Kapacitor stream. +note: Auto generated by tickdoc + +menu: + kapacitor_1_5_ref: + name: KapacitorLoopback + identifier: kapacitor_loopback_node + weight: 190 + parent: nodes +--- + +The `kapacitorLoopback` node writes data back into the Kapacitor stream. +To write data to a remote Kapacitor instance use the [InfluxDBOutNode](/kapacitor/v1.5/nodes/influx_d_b_out_node/). + +Example: + + +```js +|kapacitorLoopback() + .database('mydb') + .retentionPolicy('myrp') + .measurement('errors') + .tag('kapacitor', 'true') + .tag('version', '0.2') +``` + +{{% note %}} +#### Beware of infinite loops +It is possible to create infinite loops using the KapacitorLoopback node. +Take care to ensure you do not chain tasks together creating a loop. +{{% /note %}} + +{{% warn %}} +#### Avoid name collisions with multiple subscriptions +When using the KapacitorLoopback node, don't subscribe to identically named +databases and retention policies in multiple InfluxDB instances or clusters. +If Kapacitor is subscribed to multiple instances of InfluxDB, make each database +and retention policy combination unique. For example: + +``` +influxdb_1 + └─ db1/rp1 + +influxdb_2 + └─ db2/rp2 +``` +{{% /warn %}} + + + +Available Statistics: + +* `points_written`: number of points written back to Kapacitor + + +### Constructor + +| Chaining Method | Description | +|:---------|:---------| +| **kapacitorLoopback ( )** | Create an kapacitor loopback node that will send data back into Kapacitor as a stream. | + +### Property Methods + +| Setters | Description | +|:---|:---| +| **[database](#database) ( `value` `string`)** | The name of the database. | +| **[measurement](#measurement) ( `value` `string`)** | The name of the measurement. | +| **[quiet](#quiet) ( )** | Suppress all error logging events from this node. | +| **[retentionPolicy](#retentionpolicy) ( `value` `string`)** | The name of the retention policy. | +| **[tag](#tag) ( `key` `string`, `value` `string`)** | Add a static tag to all data points. Tag can be called more than once. | + + + +### Chaining Methods +[Deadman](#deadman), +[Stats](#stats) + +--- + +## Properties + +Property methods modify state on the calling node. +They do not add another node to the pipeline, and always return a reference to the calling node. +Property methods are marked using the `.` operator. + + +### Database + +The name of the database. + + +```js +kapacitorLoopback.database(value string) +``` + + + + +### Measurement + +The name of the measurement. + + +```js +kapacitorLoopback.measurement(value string) +``` + + + + +### Quiet + +Suppress all error logging events from this node. + +```js +kapacitorLoopback.quiet() +``` + + + + +### RetentionPolicy + +The name of the retention policy. + + +```js +kapacitorLoopback.retentionPolicy(value string) +``` + + + + +### Tag + +Add a static tag to all data points. +Tag can be called more than once. + + + +```js +kapacitorLoopback.tag(key string, value string) +``` + + + + +## Chaining Methods + +Chaining methods create a new node in the pipeline as a child of the calling node. +They do not modify the calling node. +Chaining methods are marked using the `|` operator. + + +### Deadman + +Helper function for creating an alert on low throughput, a.k.a. deadman's switch. + +- Threshold: trigger alert if throughput drops below threshold in points/interval. +- Interval: how often to check the throughput. +- Expressions: optional list of expressions to also evaluate. Useful for time of day alerting. + +Example: + + +```js + var data = stream + |from()... + // Trigger critical alert if the throughput drops below 100 points per 10s and checked every 10s. + data + |deadman(100.0, 10s) + //Do normal processing of data + data... +``` + +The above is equivalent to this example: + + +```js + var data = stream + |from()... + // Trigger critical alert if the throughput drops below 100 points per 10s and checked every 10s. + data + |stats(10s) + .align() + |derivative('emitted') + .unit(10s) + .nonNegative() + |alert() + .id('node \'stream0\' in task \'{{ .TaskName }}\'') + .message('{{ .ID }} is {{ if eq .Level "OK" }}alive{{ else }}dead{{ end }}: {{ index .Fields "emitted" | printf "%0.3f" }} points/10s.') + .crit(lambda: "emitted" <= 100.0) + //Do normal processing of data + data... +``` + +The `id` and `message` alert properties can be configured globally via the 'deadman' configuration section. + +Since the [AlertNode](/kapacitor/v1.5/nodes/alert_node/) is the last piece it can be further modified as usual. +Example: + + +```js + var data = stream + |from()... + // Trigger critical alert if the throughput drops below 100 points per 10s and checked every 10s. + data + |deadman(100.0, 10s) + .slack() + .channel('#dead_tasks') + //Do normal processing of data + data... +``` + +You can specify additional lambda expressions to further constrain when the deadman's switch is triggered. +Example: + + +```js + var data = stream + |from()... + // Trigger critical alert if the throughput drops below 100 points per 10s and checked every 10s. + // Only trigger the alert if the time of day is between 8am-5pm. + data + |deadman(100.0, 10s, lambda: hour("time") >= 8 AND hour("time") <= 17) + //Do normal processing of data + data... +``` + + + +```js +kapacitorLoopback|deadman(threshold float64, interval time.Duration, expr ...ast.LambdaNode) +``` + +Returns: [AlertNode](/kapacitor/v1.5/nodes/alert_node/) + + + +### Stats + +Create a new stream of data that contains the internal statistics of the node. +The interval represents how often to emit the statistics based on real time. +This means the interval time is independent of the times of the data points the source node is receiving. + + +```js +kapacitorLoopback|stats(interval time.Duration) +``` + +Returns: [StatsNode](/kapacitor/v1.5/nodes/stats_node/) + + diff --git a/content/kapacitor/v1.5/nodes/log_node.md b/content/kapacitor/v1.5/nodes/log_node.md new file mode 100644 index 000000000..c6434ba14 --- /dev/null +++ b/content/kapacitor/v1.5/nodes/log_node.md @@ -0,0 +1,900 @@ +--- +title: LogNode +description: LogNode logs all data that passes through the node. +note: Auto generated by tickdoc + +menu: + kapacitor_1_5_ref: + name: LogNode + identifier: log_node + weight: 200 + parent: nodes +--- + +The `log` node logs all data that passes through it. + +Example: + + +```js +stream.from()... + |window() + .period(10s) + .every(10s) + |log() + |count('value') +``` + + +### Constructor + +| Chaining Method | Description | +|:---------|:---------| +| **log ( )** | Create a node that logs all data it receives. | + +### Property Methods + +| Setters | Description | +|:---|:---| +| **[level](#level) ( `value` `string`)** | The level at which to log the data. One of: DEBUG, INFO, WARN, ERROR Default: INFO | +| **[prefix](#prefix) ( `value` `string`)** | Optional prefix to add to all log messages | +| **[quiet](#quiet) ( )** | Suppress all error logging events from this node. | + + + +### Chaining Methods +[Alert](#alert), +[Barrier](#barrier), +[Bottom](#bottom), +[ChangeDetect](#changedetect), +[Combine](#combine), +[Count](#count), +[CumulativeSum](#cumulativesum), +[Deadman](#deadman), +[Default](#default), +[Delete](#delete), +[Derivative](#derivative), +[Difference](#difference), +[Distinct](#distinct), +[Ec2Autoscale](#ec2autoscale), +[Elapsed](#elapsed), +[Eval](#eval), +[First](#first), +[Flatten](#flatten), +[GroupBy](#groupby), +[HoltWinters](#holtwinters), +[HoltWintersWithFit](#holtwinterswithfit), +[HttpOut](#httpout), +[HttpPost](#httppost), +[InfluxDBOut](#influxdbout), +[Join](#join), +[K8sAutoscale](#k8sautoscale), +[KapacitorLoopback](#kapacitorloopback), +[Last](#last), +[Log](#log), +[Max](#max), +[Mean](#mean), +[Median](#median), +[Min](#min), +[Mode](#mode), +[MovingAverage](#movingaverage), +[Percentile](#percentile), +[Sample](#sample), +[Shift](#shift), +[Sideload](#sideload), +[Spread](#spread), +[StateCount](#statecount), +[StateDuration](#stateduration), +[Stats](#stats), +[Stddev](#stddev), +[Sum](#sum), +[SwarmAutoscale](#swarmautoscale), +[Top](#top), +[Union](#union), +[Where](#where), +[Window](#window) + +--- + +## Properties + +Property methods modify state on the calling node. +They do not add another node to the pipeline, and always return a reference to the calling node. +Property methods are marked using the `.` operator. + + +### Level + +The level at which to log the data. +One of: DEBUG, INFO, WARN, ERROR +Default: INFO + + +```js +log.level(value string) +``` + + + + +### Prefix + +Optional prefix to add to all log messages + + +```js +log.prefix(value string) +``` + + + + +### Quiet + +Suppress all error logging events from this node. + +```js +log.quiet() +``` + + + + +## Chaining Methods + +Chaining methods create a new node in the pipeline as a child of the calling node. +They do not modify the calling node. +Chaining methods are marked using the `|` operator. + + +### Alert + +Create an alert node, which can trigger alerts. + + +```js +log|alert() +``` + +Returns: [AlertNode](/kapacitor/v1.5/nodes/alert_node/) + + + +### Barrier + +Create a new Barrier node that emits a BarrierMessage periodically. + +One BarrierMessage will be emitted every period duration. + + +```js +log|barrier() +``` + +Returns: [BarrierNode](/kapacitor/v1.5/nodes/barrier_node/) + + + +### Bottom + +Select the bottom `num` points for `field` and sort by any extra tags or fields. + + +```js +log|bottom(num int64, field string, fieldsAndTags ...string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### ChangeDetect + +Create a new node that only emits new points if different from the previous point. + +```js +log|changeDetect(field string) +``` + +Returns: [ChangeDetectNode](/kapacitor/v1.5/nodes/change_detect_node/) + + + +### Combine + +Combine this node with itself. The data is combined on timestamp. + + +```js +log|combine(expressions ...ast.LambdaNode) +``` + +Returns: [CombineNode](/kapacitor/v1.5/nodes/combine_node/) + + + +### Count + +Count the number of points. + + +```js +log|count(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### CumulativeSum + +Compute a cumulative sum of each point that is received. +A point is emitted for every point collected. + + +```js +log|cumulativeSum(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Deadman + +Helper function for creating an alert on low throughput, a.k.a. deadman's switch. + +- Threshold: trigger alert if throughput drops below threshold in points/interval. +- Interval: how often to check the throughput. +- Expressions: optional list of expressions to also evaluate. Useful for time of day alerting. + +Example: + + +```js + var data = stream + |from()... + // Trigger critical alert if the throughput drops below 100 points per 10s and checked every 10s. + data + |deadman(100.0, 10s) + //Do normal processing of data + data... +``` + +The above is equivalent to this example: + + +```js + var data = stream + |from()... + // Trigger critical alert if the throughput drops below 100 points per 10s and checked every 10s. + data + |stats(10s) + .align() + |derivative('emitted') + .unit(10s) + .nonNegative() + |alert() + .id('node \'stream0\' in task \'{{ .TaskName }}\'') + .message('{{ .ID }} is {{ if eq .Level "OK" }}alive{{ else }}dead{{ end }}: {{ index .Fields "emitted" | printf "%0.3f" }} points/10s.') + .crit(lambda: "emitted" <= 100.0) + //Do normal processing of data + data... +``` + +The `id` and `message` alert properties can be configured globally via the 'deadman' configuration section. + +Since the [AlertNode](/kapacitor/v1.5/nodes/alert_node/) is the last piece it can be further modified as usual. +Example: + + +```js + var data = stream + |from()... + // Trigger critical alert if the throughput drops below 100 points per 10s and checked every 10s. + data + |deadman(100.0, 10s) + .slack() + .channel('#dead_tasks') + //Do normal processing of data + data... +``` + +You can specify additional lambda expressions to further constrain when the deadman's switch is triggered. +Example: + + +```js + var data = stream + |from()... + // Trigger critical alert if the throughput drops below 100 points per 10s and checked every 10s. + // Only trigger the alert if the time of day is between 8am-5pm. + data + |deadman(100.0, 10s, lambda: hour("time") >= 8 AND hour("time") <= 17) + //Do normal processing of data + data... +``` + + + +```js +log|deadman(threshold float64, interval time.Duration, expr ...ast.LambdaNode) +``` + +Returns: [AlertNode](/kapacitor/v1.5/nodes/alert_node/) + + + +### Default + +Create a node that can set defaults for missing tags or fields. + + +```js +log|default() +``` + +Returns: [DefaultNode](/kapacitor/v1.5/nodes/default_node/) + + + +### Delete + +Create a node that can delete tags or fields. + + +```js +log|delete() +``` + +Returns: [DeleteNode](/kapacitor/v1.5/nodes/delete_node/) + + + +### Derivative + +Create a new node that computes the derivative of adjacent points. + + +```js +log|derivative(field string) +``` + +Returns: [DerivativeNode](/kapacitor/v1.5/nodes/derivative_node/) + + + +### Difference + +Compute the difference between points independent of elapsed time. + + +```js +log|difference(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Distinct + +Produce batch of only the distinct points. + + +```js +log|distinct(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Ec2Autoscale + +Create a node that can trigger autoscale events for a ec2 autoscalegroup. + + +```js +log|ec2Autoscale() +``` + +Returns: [Ec2AutoscaleNode](/kapacitor/v1.5/nodes/ec2_autoscale_node/) + + + +### Elapsed + +Compute the elapsed time between points. + + +```js +log|elapsed(field string, unit time.Duration) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Eval + +Create an eval node that will evaluate the given transformation function to each data point. +A list of expressions may be provided and will be evaluated in the order they are given. +The results are available to later expressions. + + +```js +log|eval(expressions ...ast.LambdaNode) +``` + +Returns: [EvalNode](/kapacitor/v1.5/nodes/eval_node/) + + + +### First + +Select the first point. + + +```js +log|first(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Flatten + +Flatten points with similar times into a single point. + + +```js +log|flatten() +``` + +Returns: [FlattenNode](/kapacitor/v1.5/nodes/flatten_node/) + + + +### GroupBy + +Group the data by a set of tags. + +Can pass literal * to group by all dimensions. +Example: + + +```js + |groupBy(*) +``` + + + +```js +log|groupBy(tag ...interface{}) +``` + +Returns: [GroupByNode](/kapacitor/v1.5/nodes/group_by_node/) + + + +### HoltWinters + +Compute the Holt-Winters (https://docs.influxdata.com/influxdb/latest/query_language/functions/#holt-winters) forecast of a data set. + + +```js +log|holtWinters(field string, h int64, m int64, interval time.Duration) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### HoltWintersWithFit + +Compute the Holt-Winters (https://docs.influxdata.com/influxdb/latest/query_language/functions/#holt-winters) forecast of a data set. +This method also outputs all the points used to fit the data in addition to the forecasted data. + + +```js +log|holtWintersWithFit(field string, h int64, m int64, interval time.Duration) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### HttpOut + +Create an HTTP output node that caches the most recent data it has received. +The cached data is available at the given endpoint. +The endpoint is the relative path from the API endpoint of the running task. +For example, if the task endpoint is at `/kapacitor/v1/tasks/` and endpoint is +`top10`, then the data can be requested from `/kapacitor/v1/tasks//top10`. + + +```js +log|httpOut(endpoint string) +``` + +Returns: [HTTPOutNode](/kapacitor/v1.5/nodes/http_out_node/) + + + +### HttpPost + +Creates an HTTP Post node that POSTS received data to the provided HTTP endpoint. +HttpPost expects 0 or 1 arguments. If 0 arguments are provided, you must specify an +endpoint property method. + + +```js +log|httpPost(url ...string) +``` + +Returns: [HTTPPostNode](/kapacitor/v1.5/nodes/http_post_node/) + + + +### InfluxDBOut + +Create an influxdb output node that will store the incoming data into InfluxDB. + + +```js +log|influxDBOut() +``` + +Returns: [InfluxDBOutNode](/kapacitor/v1.5/nodes/influx_d_b_out_node/) + + + +### Join + +Join this node with other nodes. The data is joined on timestamp. + + +```js +log|join(others ...Node) +``` + +Returns: [JoinNode](/kapacitor/v1.5/nodes/join_node/) + + + +### K8sAutoscale + +Create a node that can trigger autoscale events for a kubernetes cluster. + + +```js +log|k8sAutoscale() +``` + +Returns: [K8sAutoscaleNode](/kapacitor/v1.5/nodes/k8s_autoscale_node/) + + + +### KapacitorLoopback + +Create an kapacitor loopback node that will send data back into Kapacitor as a stream. + + +```js +log|kapacitorLoopback() +``` + +Returns: [KapacitorLoopbackNode](/kapacitor/v1.5/nodes/kapacitor_loopback_node/) + + + +### Last + +Select the last point. + + +```js +log|last(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Log + +Create a node that logs all data it receives. + + +```js +log|log() +``` + +Returns: [LogNode](/kapacitor/v1.5/nodes/log_node/) + + + +### Max + +Select the maximum point. + + +```js +log|max(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Mean + +Compute the mean of the data. + + +```js +log|mean(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Median + +Compute the median of the data. + +> **Note:** This method is not a selector. +If you want the median point, use `.percentile(field, 50.0)`. + + +```js +log|median(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Min + +Select the minimum point. + + +```js +log|min(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Mode + +Compute the mode of the data. + + +```js +log|mode(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### MovingAverage + +Compute a moving average of the last window points. +No points are emitted until the window is full. + + +```js +log|movingAverage(field string, window int64) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Percentile + +Select a point at the given percentile. This is a selector function, no interpolation between points is performed. + + +```js +log|percentile(field string, percentile float64) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Sample + +Create a new node that samples the incoming points or batches. + +One point will be emitted every count or duration specified. + + +```js +log|sample(rate interface{}) +``` + +Returns: [SampleNode](/kapacitor/v1.5/nodes/sample_node/) + + + +### Shift + +Create a new node that shifts the incoming points or batches in time. + + +```js +log|shift(shift time.Duration) +``` + +Returns: [ShiftNode](/kapacitor/v1.5/nodes/shift_node/) + + + +### Sideload + +Create a node that can load data from external sources. + + +```js +log|sideload() +``` + +Returns: [SideloadNode](/kapacitor/v1.5/nodes/sideload_node/) + + + +### Spread + +Compute the difference between `min` and `max` points. + + +```js +log|spread(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### StateCount + +Create a node that tracks number of consecutive points in a given state. + + +```js +log|stateCount(expression ast.LambdaNode) +``` + +Returns: [StateCountNode](/kapacitor/v1.5/nodes/state_count_node/) + + + +### StateDuration + +Create a node that tracks duration in a given state. + + +```js +log|stateDuration(expression ast.LambdaNode) +``` + +Returns: [StateDurationNode](/kapacitor/v1.5/nodes/state_duration_node/) + + + +### Stats + +Create a new stream of data that contains the internal statistics of the node. +The interval represents how often to emit the statistics based on real time. +This means the interval time is independent of the times of the data points the source node is receiving. + + +```js +log|stats(interval time.Duration) +``` + +Returns: [StatsNode](/kapacitor/v1.5/nodes/stats_node/) + + + +### Stddev + +Compute the standard deviation. + + +```js +log|stddev(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Sum + +Compute the sum of all values. + + +```js +log|sum(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### SwarmAutoscale + +Create a node that can trigger autoscale events for a Docker swarm cluster. + + +```js +log|swarmAutoscale() +``` + +Returns: [SwarmAutoscaleNode](/kapacitor/v1.5/nodes/swarm_autoscale_node/) + + + +### Top + +Select the top `num` points for `field` and sort by any extra tags or fields. + + +```js +log|top(num int64, field string, fieldsAndTags ...string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Union + +Perform the union of this node and all other given nodes. + + +```js +log|union(node ...Node) +``` + +Returns: [UnionNode](/kapacitor/v1.5/nodes/union_node/) + + + +### Where + +Create a new node that filters the data stream by a given expression. + + +```js +log|where(expression ast.LambdaNode) +``` + +Returns: [WhereNode](/kapacitor/v1.5/nodes/where_node/) + + + +### Window + +Create a new node that windows the stream by time. + +NOTE: Window can only be applied to stream edges. + + +```js +log|window() +``` + +Returns: [WindowNode](/kapacitor/v1.5/nodes/window_node/) + + diff --git a/content/kapacitor/v1.5/nodes/no_op_node.md b/content/kapacitor/v1.5/nodes/no_op_node.md new file mode 100644 index 000000000..f941de6a7 --- /dev/null +++ b/content/kapacitor/v1.5/nodes/no_op_node.md @@ -0,0 +1,867 @@ +--- +title: NoOpNode +dnote: Auto generated by tickdoc + +menu: + kapacitor_1_5_ref: + name: NoOpNode + identifier: no_op_node + weight: 210 + parent: nodes +--- + +The `noOp` node does not perform any operation. + +> Do not use this node in a TICKscript. There should be no need for it. + +If a node does not have any children, then its emitted count remains zero. +Using a [NoOpNode](/kapacitor/v1.5/nodes/no_op_node/) is a work around so that statistics are accurately reported +for nodes with no real children. +A [NoOpNode](/kapacitor/v1.5/nodes/no_op_node/) is automatically appended to any node that is a source for a [StatsNode](/kapacitor/v1.5/nodes/stats_node/) +and does not have any children. + + +### Constructor + +| Chaining Method | Description | +|:---------|:---------| +| **noOp** | Has no constructor signature. | + +### Property Methods + +| Setters | Description | +|:---|:---| +| **[quiet](#quiet) ( )** | Suppress all error logging events from this node. | + + +### Chaining Methods +[Alert](#alert), +[Barrier](#barrier), +[Bottom](#bottom), +[ChangeDetect](#changedetect), +[Combine](#combine), +[Count](#count), +[CumulativeSum](#cumulativesum), +[Deadman](#deadman), +[Default](#default), +[Delete](#delete), +[Derivative](#derivative), +[Difference](#difference), +[Distinct](#distinct), +[Ec2Autoscale](#ec2autoscale), +[Elapsed](#elapsed), +[Eval](#eval), +[First](#first), +[Flatten](#flatten), +[GroupBy](#groupby), +[HoltWinters](#holtwinters), +[HoltWintersWithFit](#holtwinterswithfit), +[HttpOut](#httpout), +[HttpPost](#httppost), +[InfluxDBOut](#influxdbout), +[Join](#join), +[K8sAutoscale](#k8sautoscale), +[KapacitorLoopback](#kapacitorloopback), +[Last](#last), +[Log](#log), +[Max](#max), +[Mean](#mean), +[Median](#median), +[Min](#min), +[Mode](#mode), +[MovingAverage](#movingaverage), +[Percentile](#percentile), +[Sample](#sample), +[Shift](#shift), +[Sideload](#sideload), +[Spread](#spread), +[StateCount](#statecount), +[StateDuration](#stateduration), +[Stats](#stats), +[Stddev](#stddev), +[Sum](#sum), +[SwarmAutoscale](#swarmautoscale), +[Top](#top), +[Union](#union), +[Where](#where), +[Window](#window) + +--- + + +## Properties + +Property methods modify state on the calling node. +They do not add another node to the pipeline, and always return a reference to the calling node. +Property methods are marked using the `.` operator. + + +### Quiet + +Suppress all error logging events from this node. + +```js +noOp.quiet() +``` + + + + +## Chaining Methods + +Chaining methods create a new node in the pipeline as a child of the calling node. +They do not modify the calling node. +Chaining methods are marked using the `|` operator. + + +### Alert + +Create an alert node, which can trigger alerts. + + +```js +noOp|alert() +``` + +Returns: [AlertNode](/kapacitor/v1.5/nodes/alert_node/) + + + +### Barrier + +Create a new Barrier node that emits a BarrierMessage periodically. + +One BarrierMessage will be emitted every period duration. + + +```js +noOp|barrier() +``` + +Returns: [BarrierNode](/kapacitor/v1.5/nodes/barrier_node/) + + + +### Bottom + +Select the bottom `num` points for `field` and sort by any extra tags or fields. + + +```js +noOp|bottom(num int64, field string, fieldsAndTags ...string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### ChangeDetect + +Create a new node that only emits new points if different from the previous point. + +```js +noOp|changeDetect(field string) +``` + +Returns: [ChangeDetectNode](/kapacitor/v1.5/nodes/change_detect_node/) + + + +### Combine + +Combine this node with itself. The data is combined on timestamp. + + +```js +noOp|combine(expressions ...ast.LambdaNode) +``` + +Returns: [CombineNode](/kapacitor/v1.5/nodes/combine_node/) + + + +### Count + +Count the number of points. + + +```js +noOp|count(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### CumulativeSum + +Compute a cumulative sum of each point that is received. +A point is emitted for every point collected. + + +```js +noOp|cumulativeSum(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Deadman + +Helper function for creating an alert on low throughput, a.k.a. deadman's switch. + +- Threshold: trigger alert if throughput drops below threshold in points/interval. +- Interval: how often to check the throughput. +- Expressions: optional list of expressions to also evaluate. Useful for time of day alerting. + +Example: + + +```js + var data = stream + |from()... + // Trigger critical alert if the throughput drops below 100 points per 10s and checked every 10s. + data + |deadman(100.0, 10s) + //Do normal processing of data + data... +``` + +The above is equivalent to this example: + + +```js + var data = stream + |from()... + // Trigger critical alert if the throughput drops below 100 points per 10s and checked every 10s. + data + |stats(10s) + .align() + |derivative('emitted') + .unit(10s) + .nonNegative() + |alert() + .id('node \'stream0\' in task \'{{ .TaskName }}\'') + .message('{{ .ID }} is {{ if eq .Level "OK" }}alive{{ else }}dead{{ end }}: {{ index .Fields "emitted" | printf "%0.3f" }} points/10s.') + .crit(lambda: "emitted" <= 100.0) + //Do normal processing of data + data... +``` + +The `id` and `message` alert properties can be configured globally via the 'deadman' configuration section. + +Since the [AlertNode](/kapacitor/v1.5/nodes/alert_node/) is the last piece it can be further modified as usual. +Example: + + +```js + var data = stream + |from()... + // Trigger critical alert if the throughput drops below 100 points per 10s and checked every 10s. + data + |deadman(100.0, 10s) + .slack() + .channel('#dead_tasks') + //Do normal processing of data + data... +``` + +You can specify additional lambda expressions to further constrain when the deadman's switch is triggered. +Example: + + +```js + var data = stream + |from()... + // Trigger critical alert if the throughput drops below 100 points per 10s and checked every 10s. + // Only trigger the alert if the time of day is between 8am-5pm. + data + |deadman(100.0, 10s, lambda: hour("time") >= 8 AND hour("time") <= 17) + //Do normal processing of data + data... +``` + + + +```js +noOp|deadman(threshold float64, interval time.Duration, expr ...ast.LambdaNode) +``` + +Returns: [AlertNode](/kapacitor/v1.5/nodes/alert_node/) + + + +### Default + +Create a node that can set defaults for missing tags or fields. + + +```js +noOp|default() +``` + +Returns: [DefaultNode](/kapacitor/v1.5/nodes/default_node/) + + + +### Delete + +Create a node that can delete tags or fields. + + +```js +noOp|delete() +``` + +Returns: [DeleteNode](/kapacitor/v1.5/nodes/delete_node/) + + + +### Derivative + +Create a new node that computes the derivative of adjacent points. + + +```js +noOp|derivative(field string) +``` + +Returns: [DerivativeNode](/kapacitor/v1.5/nodes/derivative_node/) + + + +### Difference + +Compute the difference between points independent of elapsed time. + + +```js +noOp|difference(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Distinct + +Produce batch of only the distinct points. + + +```js +noOp|distinct(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Ec2Autoscale + +Create a node that can trigger autoscale events for a ec2 autoscalegroup. + + +```js +noOp|ec2Autoscale() +``` + +Returns: [Ec2AutoscaleNode](/kapacitor/v1.5/nodes/ec2_autoscale_node/) + + + +### Elapsed + +Compute the elapsed time between points. + + +```js +noOp|elapsed(field string, unit time.Duration) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Eval + +Create an eval node that will evaluate the given transformation function to each data point. +A list of expressions may be provided and will be evaluated in the order they are given. +The results are available to later expressions. + + +```js +noOp|eval(expressions ...ast.LambdaNode) +``` + +Returns: [EvalNode](/kapacitor/v1.5/nodes/eval_node/) + + + +### First + +Select the first point. + + +```js +noOp|first(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Flatten + +Flatten points with similar times into a single point. + + +```js +noOp|flatten() +``` + +Returns: [FlattenNode](/kapacitor/v1.5/nodes/flatten_node/) + + + +### GroupBy + +Group the data by a set of tags. + +Can pass literal * to group by all dimensions. +Example: + + +```js + |groupBy(*) +``` + + + +```js +noOp|groupBy(tag ...interface{}) +``` + +Returns: [GroupByNode](/kapacitor/v1.5/nodes/group_by_node/) + + + +### HoltWinters + +Compute the Holt-Winters (https://docs.influxdata.com/influxdb/latest/query_language/functions/#holt-winters) forecast of a data set. + + +```js +noOp|holtWinters(field string, h int64, m int64, interval time.Duration) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### HoltWintersWithFit + +Compute the Holt-Winters (https://docs.influxdata.com/influxdb/latest/query_language/functions/#holt-winters) forecast of a data set. +This method also outputs all the points used to fit the data in addition to the forecasted data. + + +```js +noOp|holtWintersWithFit(field string, h int64, m int64, interval time.Duration) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### HttpOut + +Create an HTTP output node that caches the most recent data it has received. +The cached data is available at the given endpoint. +The endpoint is the relative path from the API endpoint of the running task. +For example, if the task endpoint is at `/kapacitor/v1/tasks/` and endpoint is +`top10`, then the data can be requested from `/kapacitor/v1/tasks//top10`. + + +```js +noOp|httpOut(endpoint string) +``` + +Returns: [HTTPOutNode](/kapacitor/v1.5/nodes/http_out_node/) + + + +### HttpPost + +Creates an HTTP Post node that POSTS received data to the provided HTTP endpoint. +HttpPost expects 0 or 1 arguments. If 0 arguments are provided, you must specify an +endpoint property method. + + +```js +noOp|httpPost(url ...string) +``` + +Returns: [HTTPPostNode](/kapacitor/v1.5/nodes/http_post_node/) + + + +### InfluxDBOut + +Create an influxdb output node that will store the incoming data into InfluxDB. + + +```js +noOp|influxDBOut() +``` + +Returns: [InfluxDBOutNode](/kapacitor/v1.5/nodes/influx_d_b_out_node/) + + + +### Join + +Join this node with other nodes. The data is joined on timestamp. + + +```js +noOp|join(others ...Node) +``` + +Returns: [JoinNode](/kapacitor/v1.5/nodes/join_node/) + + + +### K8sAutoscale + +Create a node that can trigger autoscale events for a kubernetes cluster. + + +```js +noOp|k8sAutoscale() +``` + +Returns: [K8sAutoscaleNode](/kapacitor/v1.5/nodes/k8s_autoscale_node/) + + + +### KapacitorLoopback + +Create an kapacitor loopback node that will send data back into Kapacitor as a stream. + + +```js +noOp|kapacitorLoopback() +``` + +Returns: [KapacitorLoopbackNode](/kapacitor/v1.5/nodes/kapacitor_loopback_node/) + + + +### Last + +Select the last point. + + +```js +noOp|last(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Log + +Create a node that logs all data it receives. + + +```js +noOp|log() +``` + +Returns: [LogNode](/kapacitor/v1.5/nodes/log_node/) + + + +### Max + +Select the maximum point. + + +```js +noOp|max(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Mean + +Compute the mean of the data. + + +```js +noOp|mean(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Median + +Compute the median of the data. + +> **Note:** This method is not a selector. +If you want the median point, use `.percentile(field, 50.0)`. + + +```js +noOp|median(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Min + +Select the minimum point. + + +```js +noOp|min(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Mode + +Compute the mode of the data. + + +```js +noOp|mode(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### MovingAverage + +Compute a moving average of the last window points. +No points are emitted until the window is full. + + +```js +noOp|movingAverage(field string, window int64) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Percentile + +Select a point at the given percentile. This is a selector function, no interpolation between points is performed. + + +```js +noOp|percentile(field string, percentile float64) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Sample + +Create a new node that samples the incoming points or batches. + +One point will be emitted every count or duration specified. + + +```js +noOp|sample(rate interface{}) +``` + +Returns: [SampleNode](/kapacitor/v1.5/nodes/sample_node/) + + + +### Shift + +Create a new node that shifts the incoming points or batches in time. + + +```js +noOp|shift(shift time.Duration) +``` + +Returns: [ShiftNode](/kapacitor/v1.5/nodes/shift_node/) + + + +### Sideload + +Create a node that can load data from external sources. + + +```js +noOp|sideload() +``` + +Returns: [SideloadNode](/kapacitor/v1.5/nodes/sideload_node/) + + + +### Spread + +Compute the difference between `min` and `max` points. + + +```js +noOp|spread(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### StateCount + +Create a node that tracks number of consecutive points in a given state. + + +```js +noOp|stateCount(expression ast.LambdaNode) +``` + +Returns: [StateCountNode](/kapacitor/v1.5/nodes/state_count_node/) + + + +### StateDuration + +Create a node that tracks duration in a given state. + + +```js +noOp|stateDuration(expression ast.LambdaNode) +``` + +Returns: [StateDurationNode](/kapacitor/v1.5/nodes/state_duration_node/) + + + +### Stats + +Create a new stream of data that contains the internal statistics of the node. +The interval represents how often to emit the statistics based on real time. +This means the interval time is independent of the times of the data points the source node is receiving. + + +```js +noOp|stats(interval time.Duration) +``` + +Returns: [StatsNode](/kapacitor/v1.5/nodes/stats_node/) + + + +### Stddev + +Compute the standard deviation. + + +```js +noOp|stddev(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Sum + +Compute the sum of all values. + + +```js +noOp|sum(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### SwarmAutoscale + +Create a node that can trigger autoscale events for a Docker swarm cluster. + + +```js +noOp|swarmAutoscale() +``` + +Returns: [SwarmAutoscaleNode](/kapacitor/v1.5/nodes/swarm_autoscale_node/) + + + +### Top + +Select the top `num` points for `field` and sort by any extra tags or fields. + + +```js +noOp|top(num int64, field string, fieldsAndTags ...string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Union + +Perform the union of this node and all other given nodes. + + +```js +noOp|union(node ...Node) +``` + +Returns: [UnionNode](/kapacitor/v1.5/nodes/union_node/) + + + +### Where + +Create a new node that filters the data stream by a given expression. + + +```js +noOp|where(expression ast.LambdaNode) +``` + +Returns: [WhereNode](/kapacitor/v1.5/nodes/where_node/) + + + +### Window + +Create a new node that windows the stream by time. + +NOTE: Window can only be applied to stream edges. + + +```js +noOp|window() +``` + +Returns: [WindowNode](/kapacitor/v1.5/nodes/window_node/) + + diff --git a/content/kapacitor/v1.5/nodes/query_node.md b/content/kapacitor/v1.5/nodes/query_node.md new file mode 100644 index 000000000..36135743d --- /dev/null +++ b/content/kapacitor/v1.5/nodes/query_node.md @@ -0,0 +1,1112 @@ +--- +title: QueryNode +description: QueryNode defines a source and a schedule for processing batch data. The data is queried from an InfluxDB database and then passed into the data pipeline. +note: Auto generated by tickdoc + +menu: + kapacitor_1_5_ref: + name: QueryNode + identifier: query_node + weight: 220 + parent: nodes +--- + +The `query` node defines a source and a schedule for processing batch data. Data is queried from InfluxDB, computed by the `query` node, and then passed into the data pipeline. + +Example: + +```js +batch + |query(''' + SELECT mean("value") + FROM "telegraf"."default".cpu_usage_idle + WHERE "host" = 'serverA' + ''') + .period(1m) + .every(20s) + .groupBy(time(10s), 'cpu') + ... +``` + +In the example above, InfluxDB is queried every 20 seconds; the window of time returned spans 1 minute and is grouped into 10 second buckets. + +To use InfluxQL advanced syntax for functions in the `query` node, you must express part of the InfluxQL query in the TICKScript. Advanced syntax computes the nested aggregation over time buckets, and then applies the outer aggregate on the results. + +For example, the following InfluxQL script, which calculates the non-negative difference of the 10-second mean of cpu usage for every cpu, isn't valid for the `query` node: + +```js +SELECT non_negative_difference(mean("value")) + FROM "telegraf"."default"."cpu_usage_idle" + WHERE "host" = 'serverA' + GROUP BY time(10s), "cpu" + ... +``` + +To calculate the result above for the `query` node, you must specify grouping and the outer aggregate using TICKScript: + +```js +batch + |query(''' + SELECT mean("value") + FROM "telegraf"."default".cpu_usage_idle + WHERE "host" = 'serverA' + ''') + .period(1m) + .every(1m) + .groupBy(time(10s), 'cpu') + | difference('max_usage') + | where(lambda: "difference" >= 0) + ... +``` + +### Constructor + +| Chaining Method | Description | +|:---------|:---------| +| **query ( `q` `string`)** | The query to execute. Must not contain a time condition in the `WHERE` clause or contain a `GROUP BY` clause. The time conditions are added dynamically according to the period, offset and schedule. The `GROUP BY` clause is added dynamically according to the dimensions passed to the `groupBy` method. | + +### Property Methods + +| Setters | Description | +|:---|:---| +| **[align](#align) ( )** | Align start and stop times for queries with even boundaries of the QueryNode.Every property. Does not apply if using the QueryNode.Cron property. | +| **[alignGroup](#aligngroup) ( )** | Align the group by time intervals with the start time of the query | +| **[cluster](#cluster) ( `value` `string`)** | The name of a configured InfluxDB cluster. If empty the default cluster will be used. | +| **[cron](#cron) ( `value` `string`)** | Define a schedule using a cron syntax. | +| **[every](#every) ( `value` `time.Duration`)** | How often to query InfluxDB. | +| **[fill](#fill) ( `value` `interface{}`)** | Fill the data. Options are: | +| **[groupBy](#groupby) ( `d` `...interface{}`)** | Group the data by a set of dimensions. Can specify one time dimension. | +| **[groupByMeasurement](#groupbymeasurement) ( )** | If set will include the measurement name in the group ID. Along with any other group by dimensions. | +| **[offset](#offset) ( `value` `time.Duration`)** | How far back in time to query from the current time | +| **[period](#period) ( `value` `time.Duration`)** | The period or length of time that will be queried from InfluxDB | +| **[quiet](#quiet) ( )** | Suppress all error logging events from this node. | + + + +### Chaining Methods +[Alert](#alert), +[Barrier](#barrier), +[Bottom](#bottom), +[ChangeDetect](#changedetect), +[Combine](#combine), +[Count](#count), +[CumulativeSum](#cumulativesum), +[Deadman](#deadman), +[Default](#default), +[Delete](#delete), +[Derivative](#derivative), +[Difference](#difference), +[Distinct](#distinct), +[Ec2Autoscale](#ec2autoscale), +[Elapsed](#elapsed), +[Eval](#eval), +[First](#first), +[Flatten](#flatten), +[HoltWinters](#holtwinters), +[HoltWintersWithFit](#holtwinterswithfit), +[HttpOut](#httpout), +[HttpPost](#httppost), +[InfluxDBOut](#influxdbout), +[Join](#join), +[K8sAutoscale](#k8sautoscale), +[KapacitorLoopback](#kapacitorloopback), +[Last](#last), +[Log](#log), +[Max](#max), +[Mean](#mean), +[Median](#median), +[Min](#min), +[Mode](#mode), +[MovingAverage](#movingaverage), +[Percentile](#percentile), +[Sample](#sample), +[Shift](#shift), +[Sideload](#sideload), +[Spread](#spread), +[StateCount](#statecount), +[StateDuration](#stateduration), +[Stats](#stats), +[Stddev](#stddev), +[Sum](#sum), +[SwarmAutoscale](#swarmautoscale), +[Top](#top), +[Union](#union), +[Where](#where), +[Window](#window) + +--- + +## Properties + +Property methods modify state on the calling node. +They do not add another node to the pipeline, and always return a reference to the calling node. +Property methods are marked using the `.` operator. + + +### Align + +Align start and stop times for queries with even boundaries of the [QueryNode.Every](/kapacitor/v1.5/nodes/query_node/#every) property. +Does not apply if using the [QueryNode.Cron](/kapacitor/v1.5/nodes/query_node/#cron) property. + + +```js +query.align() +``` + + + + +### AlignGroup + +Align the group by time intervals with the start time of the query + + +```js +query.alignGroup() +``` + + + + +### Cluster + +The name of a configured InfluxDB cluster. +If empty the default cluster will be used. + + +```js +query.cluster(value string) +``` + + + + +### Cron + +Define a schedule using a cron syntax. + +The specific cron implementation is documented here: +https://github.com/gorhill/cronexpr#implementation + +The Cron property is mutually exclusive with the Every property. + + +```js +query.cron(value string) +``` + + + + +### Every + +How often to query InfluxDB. + +The Every property is mutually exclusive with the Cron property. + + +```js +query.every(value time.Duration) +``` + + + + +### Fill + +Fill the data. +Options are: + +- Any numerical value +- null - exhibits the same behavior as the default +- previous - reports the value of the previous window +- none - suppresses timestamps and values where the value is null +- linear - reports the results of linear interpolation + + +```js +query.fill(value interface{}) +``` + + + + +### GroupBy + +Group the data by a set of dimensions. +Can specify one time dimension. + +This property adds a `GROUP BY` clause to the query +so all the normal behaviors when quering InfluxDB with a `GROUP BY` apply. + +Use group by time when your period is longer than your group by time interval. + +Example: + + +```js + batch + |query(...) + .period(1m) + .every(1m) + .groupBy(time(10s), 'tag1', 'tag2')) + .align() +``` + +A group by time offset is also possible. + +Example: + + +```js + batch + |query(...) + .period(1m) + .every(1m) + .groupBy(time(10s, -5s), 'tag1', 'tag2')) + .align() + .offset(5s) +``` + +It is recommended to use [QueryNode.Align](/kapacitor/v1.5/nodes/query_node/#align) and [QueryNode.Offset](/kapacitor/v1.5/nodes/query_node/#offset) in conjunction with +group by time dimensions so that the time bounds match up with the group by intervals. +To automatically align the group by intervals to the start of the query time, +use [QueryNode.AlignGroup.](/kapacitor/v1.5/nodes/query_node/#aligngroup) This is useful in more complex situations, such as when +the groupBy time period is longer than the query frequency. + +Example: + + +```js + batch + |query(...) + .period(5m) + .every(30s) + .groupBy(time(1m), 'tag1', 'tag2') + .align() + .alignGroup() +``` + +For the above example, without [QueryNode.AlignGroup,](/kapacitor/v1.5/nodes/query_node/#aligngroup) every other query issued by Kapacitor +(at :30 past the minute) will align to :00 seconds instead of the desired :30 seconds, +which would create 6 group by intervals instead of 5, the first and last of which +would only have 30 seconds of data instead of a full minute. +If the group by time offset (i.e. time(t, offset)) is used in conjunction with +[QueryNode.AlignGroup,](/kapacitor/v1.5/nodes/query_node/#aligngroup) the alignment will occur first, and will be offset +the specified amount after. + +NOTE: Since [QueryNode.Offset](/kapacitor/v1.5/nodes/query_node/#offset) is inherently a negative property the second "offset" argument to the "time" function is negative to match. + + + +```js +query.groupBy(d ...interface{}) +``` + + + + +### GroupByMeasurement + +If set will include the measurement name in the group ID. +Along with any other group by dimensions. + +Example: + + +```js + batch + |query('SELECT sum("value") FROM "telegraf"."autogen"./process_.*/') + .groupByMeasurement() + .groupBy('host') +``` + +The above example selects data from several measurements matching `/process_.*/ and +then each point is grouped by the host tag and measurement name. +Thus keeping measurements in their own groups. + + +```js +query.groupByMeasurement() +``` + + + + +### Offset + +How far back in time to query from the current time + +For example an Offset of 2 hours and an Every of 5m, +Kapacitor will query InfluxDB every 5 minutes for the window of data 2 hours ago. + +This applies to Cron schedules as well. If the cron specifies to run every Sunday at +1 AM and the Offset is 1 hour. Then at 1 AM on Sunday the data from 12 AM will be queried. + + +```js +query.offset(value time.Duration) +``` + + + + +### Period + +The period or length of time that will be queried from InfluxDB + + +```js +query.period(value time.Duration) +``` + + + + +### Quiet + +Suppress all error logging events from this node. + +```js +query.quiet() +``` + + + + +## Chaining Methods + +Chaining methods create a new node in the pipeline as a child of the calling node. +They do not modify the calling node. +Chaining methods are marked using the `|` operator. + + +### Alert + +Create an alert node, which can trigger alerts. + + +```js +query|alert() +``` + +Returns: [AlertNode](/kapacitor/v1.5/nodes/alert_node/) + + + +### Barrier + +Create a new Barrier node that emits a BarrierMessage periodically. + +One BarrierMessage will be emitted every period duration. + + +```js +query|barrier() +``` + +Returns: [BarrierNode](/kapacitor/v1.5/nodes/barrier_node/) + + + +### Bottom + +Select the bottom `num` points for `field` and sort by any extra tags or fields. + + +```js +query|bottom(num int64, field string, fieldsAndTags ...string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### ChangeDetect + +Create a new node that only emits new points if different from the previous point. + +```js +query|changeDetect(field string) +``` + +Returns: [ChangeDetectNode](/kapacitor/v1.5/nodes/change_detect_node/) + + + +### Combine + +Combine this node with itself. The data is combined on timestamp. + + +```js +query|combine(expressions ...ast.LambdaNode) +``` + +Returns: [CombineNode](/kapacitor/v1.5/nodes/combine_node/) + + + +### Count + +Count the number of points. + + +```js +query|count(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### CumulativeSum + +Compute a cumulative sum of each point that is received. +A point is emitted for every point collected. + + +```js +query|cumulativeSum(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Deadman + +Helper function for creating an alert on low throughput, a.k.a. deadman's switch. + +- Threshold: trigger alert if throughput drops below threshold in points/interval. +- Interval: how often to check the throughput. +- Expressions: optional list of expressions to also evaluate. Useful for time of day alerting. + +Example: + + +```js + var data = batch + |query()... + // Trigger critical alert if the throughput drops below 100 points per 10s and checked every 10s. + data + |deadman(100.0, 10s) + //Do normal processing of data + data... +``` + +The above is equivalent to this example: + + +```js + var data = batch + |query()... + // Trigger critical alert if the throughput drops below 100 points per 10s and checked every 10s. + data + |stats(10s) + .align() + |derivative('emitted') + .unit(10s) + .nonNegative() + |alert() + .id('node \'stream0\' in task \'{{ .TaskName }}\'') + .message('{{ .ID }} is {{ if eq .Level "OK" }}alive{{ else }}dead{{ end }}: {{ index .Fields "emitted" | printf "%0.3f" }} points/10s.') + .crit(lambda: "emitted" <= 100.0) + //Do normal processing of data + data... +``` + +The `id` and `message` alert properties can be configured globally via the 'deadman' configuration section. + +Since the [AlertNode](/kapacitor/v1.5/nodes/alert_node/) is the last piece it can be further modified as usual. +Example: + + +```js + var data = batch + |query()... + // Trigger critical alert if the throughput drops below 100 points per 10s and checked every 10s. + data + |deadman(100.0, 10s) + .slack() + .channel('#dead_tasks') + //Do normal processing of data + data... +``` + +You can specify additional lambda expressions to further constrain when the deadman's switch is triggered. +Example: + + +```js + var data = batch + |query()... + // Trigger critical alert if the throughput drops below 100 points per 10s and checked every 10s. + // Only trigger the alert if the time of day is between 8am-5pm. + data + |deadman(100.0, 10s, lambda: hour("time") >= 8 AND hour("time") <= 17) + //Do normal processing of data + data... +``` + + + +```js +query|deadman(threshold float64, interval time.Duration, expr ...ast.LambdaNode) +``` + +Returns: [AlertNode](/kapacitor/v1.5/nodes/alert_node/) + + + +### Default + +Create a node that can set defaults for missing tags or fields. + + +```js +query|default() +``` + +Returns: [DefaultNode](/kapacitor/v1.5/nodes/default_node/) + + + +### Delete + +Create a node that can delete tags or fields. + + +```js +query|delete() +``` + +Returns: [DeleteNode](/kapacitor/v1.5/nodes/delete_node/) + + + +### Derivative + +Create a new node that computes the derivative of adjacent points. + + +```js +query|derivative(field string) +``` + +Returns: [DerivativeNode](/kapacitor/v1.5/nodes/derivative_node/) + + + +### Difference + +Compute the difference between points independent of elapsed time. + + +```js +query|difference(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Distinct + +Produce batch of only the distinct points. + + +```js +query|distinct(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Ec2Autoscale + +Create a node that can trigger autoscale events for a ec2 autoscalegroup. + + +```js +query|ec2Autoscale() +``` + +Returns: [Ec2AutoscaleNode](/kapacitor/v1.5/nodes/ec2_autoscale_node/) + + + +### Elapsed + +Compute the elapsed time between points. + + +```js +query|elapsed(field string, unit time.Duration) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Eval + +Create an eval node that will evaluate the given transformation function to each data point. +A list of expressions may be provided and will be evaluated in the order they are given. +The results are available to later expressions. + + +```js +query|eval(expressions ...ast.LambdaNode) +``` + +Returns: [EvalNode](/kapacitor/v1.5/nodes/eval_node/) + + + +### First + +Select the first point. + + +```js +query|first(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Flatten + +Flatten points with similar times into a single point. + + +```js +query|flatten() +``` + +Returns: [FlattenNode](/kapacitor/v1.5/nodes/flatten_node/) + + + +### HoltWinters + +Compute the Holt-Winters (https://docs.influxdata.com/influxdb/latest/query_language/functions/#holt-winters) forecast of a data set. + + +```js +query|holtWinters(field string, h int64, m int64, interval time.Duration) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### HoltWintersWithFit + +Compute the Holt-Winters (https://docs.influxdata.com/influxdb/latest/query_language/functions/#holt-winters) forecast of a data set. +This method also outputs all the points used to fit the data in addition to the forecasted data. + + +```js +query|holtWintersWithFit(field string, h int64, m int64, interval time.Duration) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### HttpOut + +Create an HTTP output node that caches the most recent data it has received. +The cached data is available at the given endpoint. +The endpoint is the relative path from the API endpoint of the running task. +For example, if the task endpoint is at `/kapacitor/v1/tasks/` and endpoint is +`top10`, then the data can be requested from `/kapacitor/v1/tasks//top10`. + + +```js +query|httpOut(endpoint string) +``` + +Returns: [HTTPOutNode](/kapacitor/v1.5/nodes/http_out_node/) + + + +### HttpPost + +Creates an HTTP Post node that POSTS received data to the provided HTTP endpoint. +HttpPost expects 0 or 1 arguments. If 0 arguments are provided, you must specify an +endpoint property method. + + +```js +query|httpPost(url ...string) +``` + +Returns: [HTTPPostNode](/kapacitor/v1.5/nodes/http_post_node/) + + + +### InfluxDBOut + +Create an influxdb output node that will store the incoming data into InfluxDB. + + +```js +query|influxDBOut() +``` + +Returns: [InfluxDBOutNode](/kapacitor/v1.5/nodes/influx_d_b_out_node/) + + + +### Join + +Join this node with other nodes. The data is joined on timestamp. + + +```js +query|join(others ...Node) +``` + +Returns: [JoinNode](/kapacitor/v1.5/nodes/join_node/) + + + +### K8sAutoscale + +Create a node that can trigger autoscale events for a kubernetes cluster. + + +```js +query|k8sAutoscale() +``` + +Returns: [K8sAutoscaleNode](/kapacitor/v1.5/nodes/k8s_autoscale_node/) + + + +### KapacitorLoopback + +Create an kapacitor loopback node that will send data back into Kapacitor as a stream. + + +```js +query|kapacitorLoopback() +``` + +Returns: [KapacitorLoopbackNode](/kapacitor/v1.5/nodes/kapacitor_loopback_node/) + + + +### Last + +Select the last point. + + +```js +query|last(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Log + +Create a node that logs all data it receives. + + +```js +query|log() +``` + +Returns: [LogNode](/kapacitor/v1.5/nodes/log_node/) + + + +### Max + +Select the maximum point. + + +```js +query|max(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Mean + +Compute the mean of the data. + + +```js +query|mean(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Median + +Compute the median of the data. + +> **Note:** This method is not a selector. +If you want the median point, use `.percentile(field, 50.0)`. + + +```js +query|median(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Min + +Select the minimum point. + + +```js +query|min(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Mode + +Compute the mode of the data. + + +```js +query|mode(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### MovingAverage + +Compute a moving average of the last window points. +No points are emitted until the window is full. + + +```js +query|movingAverage(field string, window int64) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Percentile + +Select a point at the given percentile. This is a selector function, no interpolation between points is performed. + + +```js +query|percentile(field string, percentile float64) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Sample + +Create a new node that samples the incoming points or batches. + +One point will be emitted every count or duration specified. + + +```js +query|sample(rate interface{}) +``` + +Returns: [SampleNode](/kapacitor/v1.5/nodes/sample_node/) + + + +### Shift + +Create a new node that shifts the incoming points or batches in time. + + +```js +query|shift(shift time.Duration) +``` + +Returns: [ShiftNode](/kapacitor/v1.5/nodes/shift_node/) + + + +### Sideload + +Create a node that can load data from external sources. + + +```js +query|sideload() +``` + +Returns: [SideloadNode](/kapacitor/v1.5/nodes/sideload_node/) + + + +### Spread + +Compute the difference between `min` and `max` points. + + +```js +query|spread(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### StateCount + +Create a node that tracks number of consecutive points in a given state. + + +```js +query|stateCount(expression ast.LambdaNode) +``` + +Returns: [StateCountNode](/kapacitor/v1.5/nodes/state_count_node/) + + + +### StateDuration + +Create a node that tracks duration in a given state. + + +```js +query|stateDuration(expression ast.LambdaNode) +``` + +Returns: [StateDurationNode](/kapacitor/v1.5/nodes/state_duration_node/) + + + +### Stats + +Create a new stream of data that contains the internal statistics of the node. +The interval represents how often to emit the statistics based on real time. +This means the interval time is independent of the times of the data points the source node is receiving. + + +```js +query|stats(interval time.Duration) +``` + +Returns: [StatsNode](/kapacitor/v1.5/nodes/stats_node/) + + + +### Stddev + +Compute the standard deviation. + + +```js +query|stddev(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Sum + +Compute the sum of all values. + + +```js +query|sum(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### SwarmAutoscale + +Create a node that can trigger autoscale events for a Docker swarm cluster. + + +```js +query|swarmAutoscale() +``` + +Returns: [SwarmAutoscaleNode](/kapacitor/v1.5/nodes/swarm_autoscale_node/) + + + +### Top + +Select the top `num` points for `field` and sort by any extra tags or fields. + + +```js +query|top(num int64, field string, fieldsAndTags ...string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Union + +Perform the union of this node and all other given nodes. + + +```js +query|union(node ...Node) +``` + +Returns: [UnionNode](/kapacitor/v1.5/nodes/union_node/) + + + +### Where + +Create a new node that filters the data stream by a given expression. + + +```js +query|where(expression ast.LambdaNode) +``` + +Returns: [WhereNode](/kapacitor/v1.5/nodes/where_node/) + + + +### Window + +Create a new node that windows the stream by time. + +NOTE: Window can only be applied to stream edges. + + +```js +query|window() +``` + +Returns: [WindowNode](/kapacitor/v1.5/nodes/window_node/) + + diff --git a/content/kapacitor/v1.5/nodes/sample_node.md b/content/kapacitor/v1.5/nodes/sample_node.md new file mode 100644 index 000000000..b8b3bbc11 --- /dev/null +++ b/content/kapacitor/v1.5/nodes/sample_node.md @@ -0,0 +1,881 @@ +--- +title: SampleNode +description: SampleNode samples points or batches. One point will be emitted every count or duration specified. +note: Auto generated by tickdoc + +menu: + kapacitor_1_5_ref: + name: SampleNode + identifier: sample_node + weight: 230 + parent: nodes +--- + +The `sample` node samples points or batches. +One point will be emitted every count or duration specified. + +Example: + + +```js +stream + |sample(3) +``` + +Keep every third data point or batch. + +Example: + +```js +stream + |sample(10s) +``` + +Keep only samples that land on the 10s boundary. +See [FromNode.Truncate,](/kapacitor/v1.5/nodes/from_node/#truncate) [QueryNode.GroupBy](/kapacitor/v1.5/nodes/query_node/#groupby) time or [WindowNode.Align](/kapacitor/v1.5/nodes/window_node/#align) +for ensuring data is aligned with a boundary. + + +### Constructor + +| Chaining Method | Description | +|:---------|:---------| +| **sample ( `rate` `interface{}`)** | Create a new node that samples the incoming points or batches. | + +### Property Methods + +| Setters | Description | +|:---|:---| +| **[quiet](#quiet) ( )** | Suppress all error logging events from this node. | + + +### Chaining Methods +[Alert](#alert), +[Barrier](#barrier), +[Bottom](#bottom), +[ChangeDetect](#changedetect), +[Combine](#combine), +[Count](#count), +[CumulativeSum](#cumulativesum), +[Deadman](#deadman), +[Default](#default), +[Delete](#delete), +[Derivative](#derivative), +[Difference](#difference), +[Distinct](#distinct), +[Ec2Autoscale](#ec2autoscale), +[Elapsed](#elapsed), +[Eval](#eval), +[First](#first), +[Flatten](#flatten), +[GroupBy](#groupby), +[HoltWinters](#holtwinters), +[HoltWintersWithFit](#holtwinterswithfit), +[HttpOut](#httpout), +[HttpPost](#httppost), +[InfluxDBOut](#influxdbout), +[Join](#join), +[K8sAutoscale](#k8sautoscale), +[KapacitorLoopback](#kapacitorloopback), +[Last](#last), +[Log](#log), +[Max](#max), +[Mean](#mean), +[Median](#median), +[Min](#min), +[Mode](#mode), +[MovingAverage](#movingaverage), +[Percentile](#percentile), +[Sample](#sample), +[Shift](#shift), +[Sideload](#sideload), +[Spread](#spread), +[StateCount](#statecount), +[StateDuration](#stateduration), +[Stats](#stats), +[Stddev](#stddev), +[Sum](#sum), +[SwarmAutoscale](#swarmautoscale), +[Top](#top), +[Union](#union), +[Where](#where), +[Window](#window) + +--- + +## Properties + +Property methods modify state on the calling node. +They do not add another node to the pipeline, and always return a reference to the calling node. +Property methods are marked using the `.` operator. + + +### Quiet + +Suppress all error logging events from this node. + +```js +sample.quiet() +``` + + + + +## Chaining Methods + +Chaining methods create a new node in the pipeline as a child of the calling node. +They do not modify the calling node. +Chaining methods are marked using the `|` operator. + + +### Alert + +Create an alert node, which can trigger alerts. + + +```js +sample|alert() +``` + +Returns: [AlertNode](/kapacitor/v1.5/nodes/alert_node/) + + + +### Barrier + +Create a new Barrier node that emits a BarrierMessage periodically. + +One BarrierMessage will be emitted every period duration. + + +```js +sample|barrier() +``` + +Returns: [BarrierNode](/kapacitor/v1.5/nodes/barrier_node/) + + + +### Bottom + +Select the bottom `num` points for `field` and sort by any extra tags or fields. + + +```js +sample|bottom(num int64, field string, fieldsAndTags ...string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### ChangeDetect + +Create a new node that only emits new points if different from the previous point. + +```js +sample|changeDetect(field string) +``` + +Returns: [ChangeDetectNode](/kapacitor/v1.5/nodes/change_detect_node/) + + + +### Combine + +Combine this node with itself. The data is combined on timestamp. + + +```js +sample|combine(expressions ...ast.LambdaNode) +``` + +Returns: [CombineNode](/kapacitor/v1.5/nodes/combine_node/) + + + +### Count + +Count the number of points. + + +```js +sample|count(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### CumulativeSum + +Compute a cumulative sum of each point that is received. +A point is emitted for every point collected. + + +```js +sample|cumulativeSum(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Deadman + +Helper function for creating an alert on low throughput, a.k.a. deadman's switch. + +- Threshold: trigger alert if throughput drops below threshold in points/interval. +- Interval: how often to check the throughput. +- Expressions: optional list of expressions to also evaluate. Useful for time of day alerting. + +Example: + + +```js + var data = stream + |from()... + // Trigger critical alert if the throughput drops below 100 points per 10s and checked every 10s. + data + |deadman(100.0, 10s) + //Do normal processing of data + data... +``` + +The above is equivalent to this example: + + +```js + var data = stream + |from()... + // Trigger critical alert if the throughput drops below 100 points per 10s and checked every 10s. + data + |stats(10s) + .align() + |derivative('emitted') + .unit(10s) + .nonNegative() + |alert() + .id('node \'stream0\' in task \'{{ .TaskName }}\'') + .message('{{ .ID }} is {{ if eq .Level "OK" }}alive{{ else }}dead{{ end }}: {{ index .Fields "emitted" | printf "%0.3f" }} points/10s.') + .crit(lambda: "emitted" <= 100.0) + //Do normal processing of data + data... +``` + +The `id` and `message` alert properties can be configured globally via the 'deadman' configuration section. + +Since the [AlertNode](/kapacitor/v1.5/nodes/alert_node/) is the last piece it can be further modified as usual. +Example: + + +```js + var data = stream + |from()... + // Trigger critical alert if the throughput drops below 100 points per 10s and checked every 10s. + data + |deadman(100.0, 10s) + .slack() + .channel('#dead_tasks') + //Do normal processing of data + data... +``` + +You can specify additional lambda expressions to further constrain when the deadman's switch is triggered. +Example: + + +```js + var data = stream + |from()... + // Trigger critical alert if the throughput drops below 100 points per 10s and checked every 10s. + // Only trigger the alert if the time of day is between 8am-5pm. + data + |deadman(100.0, 10s, lambda: hour("time") >= 8 AND hour("time") <= 17) + //Do normal processing of data + data... +``` + + + +```js +sample|deadman(threshold float64, interval time.Duration, expr ...ast.LambdaNode) +``` + +Returns: [AlertNode](/kapacitor/v1.5/nodes/alert_node/) + + + +### Default + +Create a node that can set defaults for missing tags or fields. + + +```js +sample|default() +``` + +Returns: [DefaultNode](/kapacitor/v1.5/nodes/default_node/) + + + +### Delete + +Create a node that can delete tags or fields. + + +```js +sample|delete() +``` + +Returns: [DeleteNode](/kapacitor/v1.5/nodes/delete_node/) + + + +### Derivative + +Create a new node that computes the derivative of adjacent points. + + +```js +sample|derivative(field string) +``` + +Returns: [DerivativeNode](/kapacitor/v1.5/nodes/derivative_node/) + + + +### Difference + +Compute the difference between points independent of elapsed time. + + +```js +sample|difference(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Distinct + +Produce batch of only the distinct points. + + +```js +sample|distinct(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Ec2Autoscale + +Create a node that can trigger autoscale events for a ec2 autoscalegroup. + + +```js +sample|ec2Autoscale() +``` + +Returns: [Ec2AutoscaleNode](/kapacitor/v1.5/nodes/ec2_autoscale_node/) + + + +### Elapsed + +Compute the elapsed time between points. + + +```js +sample|elapsed(field string, unit time.Duration) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Eval + +Create an eval node that will evaluate the given transformation function to each data point. +A list of expressions may be provided and will be evaluated in the order they are given. +The results are available to later expressions. + + +```js +sample|eval(expressions ...ast.LambdaNode) +``` + +Returns: [EvalNode](/kapacitor/v1.5/nodes/eval_node/) + + + +### First + +Select the first point. + + +```js +sample|first(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Flatten + +Flatten points with similar times into a single point. + + +```js +sample|flatten() +``` + +Returns: [FlattenNode](/kapacitor/v1.5/nodes/flatten_node/) + + + +### GroupBy + +Group the data by a set of tags. + +Can pass literal * to group by all dimensions. +Example: + + +```js + |groupBy(*) +``` + + + +```js +sample|groupBy(tag ...interface{}) +``` + +Returns: [GroupByNode](/kapacitor/v1.5/nodes/group_by_node/) + + + +### HoltWinters + +Compute the Holt-Winters (https://docs.influxdata.com/influxdb/latest/query_language/functions/#holt-winters) forecast of a data set. + + +```js +sample|holtWinters(field string, h int64, m int64, interval time.Duration) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### HoltWintersWithFit + +Compute the Holt-Winters (https://docs.influxdata.com/influxdb/latest/query_language/functions/#holt-winters) forecast of a data set. +This method also outputs all the points used to fit the data in addition to the forecasted data. + + +```js +sample|holtWintersWithFit(field string, h int64, m int64, interval time.Duration) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### HttpOut + +Create an HTTP output node that caches the most recent data it has received. +The cached data is available at the given endpoint. +The endpoint is the relative path from the API endpoint of the running task. +For example, if the task endpoint is at `/kapacitor/v1/tasks/` and endpoint is +`top10`, then the data can be requested from `/kapacitor/v1/tasks//top10`. + + +```js +sample|httpOut(endpoint string) +``` + +Returns: [HTTPOutNode](/kapacitor/v1.5/nodes/http_out_node/) + + + +### HttpPost + +Creates an HTTP Post node that POSTS received data to the provided HTTP endpoint. +HttpPost expects 0 or 1 arguments. If 0 arguments are provided, you must specify an +endpoint property method. + + +```js +sample|httpPost(url ...string) +``` + +Returns: [HTTPPostNode](/kapacitor/v1.5/nodes/http_post_node/) + + + +### InfluxDBOut + +Create an influxdb output node that will store the incoming data into InfluxDB. + + +```js +sample|influxDBOut() +``` + +Returns: [InfluxDBOutNode](/kapacitor/v1.5/nodes/influx_d_b_out_node/) + + + +### Join + +Join this node with other nodes. The data is joined on timestamp. + + +```js +sample|join(others ...Node) +``` + +Returns: [JoinNode](/kapacitor/v1.5/nodes/join_node/) + + + +### K8sAutoscale + +Create a node that can trigger autoscale events for a kubernetes cluster. + + +```js +sample|k8sAutoscale() +``` + +Returns: [K8sAutoscaleNode](/kapacitor/v1.5/nodes/k8s_autoscale_node/) + + + +### KapacitorLoopback + +Create an kapacitor loopback node that will send data back into Kapacitor as a stream. + + +```js +sample|kapacitorLoopback() +``` + +Returns: [KapacitorLoopbackNode](/kapacitor/v1.5/nodes/kapacitor_loopback_node/) + + + +### Last + +Select the last point. + + +```js +sample|last(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Log + +Create a node that logs all data it receives. + + +```js +sample|log() +``` + +Returns: [LogNode](/kapacitor/v1.5/nodes/log_node/) + + + +### Max + +Select the maximum point. + + +```js +sample|max(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Mean + +Compute the mean of the data. + + +```js +sample|mean(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Median + +Compute the median of the data. + +> **Note:** This method is not a selector. +If you want the median point, use `.percentile(field, 50.0)`. + + +```js +sample|median(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Min + +Select the minimum point. + + +```js +sample|min(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Mode + +Compute the mode of the data. + + +```js +sample|mode(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### MovingAverage + +Compute a moving average of the last window points. +No points are emitted until the window is full. + + +```js +sample|movingAverage(field string, window int64) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Percentile + +Select a point at the given percentile. This is a selector function, no interpolation between points is performed. + + +```js +sample|percentile(field string, percentile float64) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Sample + +Create a new node that samples the incoming points or batches. + +One point will be emitted every count or duration specified. + + +```js +sample|sample(rate interface{}) +``` + +Returns: [SampleNode](/kapacitor/v1.5/nodes/sample_node/) + + + +### Shift + +Create a new node that shifts the incoming points or batches in time. + + +```js +sample|shift(shift time.Duration) +``` + +Returns: [ShiftNode](/kapacitor/v1.5/nodes/shift_node/) + + + +### Sideload + +Create a node that can load data from external sources. + + +```js +sample|sideload() +``` + +Returns: [SideloadNode](/kapacitor/v1.5/nodes/sideload_node/) + + + +### Spread + +Compute the difference between `min` and `max` points. + + +```js +sample|spread(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### StateCount + +Create a node that tracks number of consecutive points in a given state. + + +```js +sample|stateCount(expression ast.LambdaNode) +``` + +Returns: [StateCountNode](/kapacitor/v1.5/nodes/state_count_node/) + + + +### StateDuration + +Create a node that tracks duration in a given state. + + +```js +sample|stateDuration(expression ast.LambdaNode) +``` + +Returns: [StateDurationNode](/kapacitor/v1.5/nodes/state_duration_node/) + + + +### Stats + +Create a new stream of data that contains the internal statistics of the node. +The interval represents how often to emit the statistics based on real time. +This means the interval time is independent of the times of the data points the source node is receiving. + + +```js +sample|stats(interval time.Duration) +``` + +Returns: [StatsNode](/kapacitor/v1.5/nodes/stats_node/) + + + +### Stddev + +Compute the standard deviation. + + +```js +sample|stddev(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Sum + +Compute the sum of all values. + + +```js +sample|sum(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### SwarmAutoscale + +Create a node that can trigger autoscale events for a Docker swarm cluster. + + +```js +sample|swarmAutoscale() +``` + +Returns: [SwarmAutoscaleNode](/kapacitor/v1.5/nodes/swarm_autoscale_node/) + + + +### Top + +Select the top `num` points for `field` and sort by any extra tags or fields. + + +```js +sample|top(num int64, field string, fieldsAndTags ...string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Union + +Perform the union of this node and all other given nodes. + + +```js +sample|union(node ...Node) +``` + +Returns: [UnionNode](/kapacitor/v1.5/nodes/union_node/) + + + +### Where + +Create a new node that filters the data stream by a given expression. + + +```js +sample|where(expression ast.LambdaNode) +``` + +Returns: [WhereNode](/kapacitor/v1.5/nodes/where_node/) + + + +### Window + +Create a new node that windows the stream by time. + +NOTE: Window can only be applied to stream edges. + + +```js +sample|window() +``` + +Returns: [WindowNode](/kapacitor/v1.5/nodes/window_node/) + + diff --git a/content/kapacitor/v1.5/nodes/shift_node.md b/content/kapacitor/v1.5/nodes/shift_node.md new file mode 100644 index 000000000..a4238fe83 --- /dev/null +++ b/content/kapacitor/v1.5/nodes/shift_node.md @@ -0,0 +1,880 @@ +--- +title: ShiftNode +description: ShiftNode shifts points and batches in time. This is useful for comparing batches or points from different times. +note: Auto generated by tickdoc + +menu: + kapacitor_1_5_ref: + name: ShiftNode + identifier: shift_node + weight: 240 + parent: nodes +--- + +The `shift` node shifts points and batches in time. +This is useful for comparing batches or points from different times. + +Example: + + +```js +stream + |shift(5m) +``` + +Shift all data points 5m forward in time. + +Example: + + +```js +stream + |shift(-10s) +``` + +Shift all data points 10s backward in time. + + +### Constructor + +| Chaining Method | Description | +|:---------|:---------| +| **shift ( `shift` `time.Duration`)** | Create a new node that shifts the incoming points or batches in time. | + +### Property Methods + +| Setters | Description | +|:---|:---| +| **[quiet](#quiet) ( )** | Suppress all error logging events from this node. | + + +### Chaining Methods +[Alert](#alert), +[Barrier](#barrier), +[Bottom](#bottom), +[ChangeDetect](#changedetect), +[Combine](#combine), +[Count](#count), +[CumulativeSum](#cumulativesum), +[Deadman](#deadman), +[Default](#default), +[Delete](#delete), +[Derivative](#derivative), +[Difference](#difference), +[Distinct](#distinct), +[Ec2Autoscale](#ec2autoscale), +[Elapsed](#elapsed), +[Eval](#eval), +[First](#first), +[Flatten](#flatten), +[GroupBy](#groupby), +[HoltWinters](#holtwinters), +[HoltWintersWithFit](#holtwinterswithfit), +[HttpOut](#httpout), +[HttpPost](#httppost), +[InfluxDBOut](#influxdbout), +[Join](#join), +[K8sAutoscale](#k8sautoscale), +[KapacitorLoopback](#kapacitorloopback), +[Last](#last), +[Log](#log), +[Max](#max), +[Mean](#mean), +[Median](#median), +[Min](#min), +[Mode](#mode), +[MovingAverage](#movingaverage), +[Percentile](#percentile), +[Sample](#sample), +[Shift](#shift), +[Sideload](#sideload), +[Spread](#spread), +[StateCount](#statecount), +[StateDuration](#stateduration), +[Stats](#stats), +[Stddev](#stddev), +[Sum](#sum), +[SwarmAutoscale](#swarmautoscale), +[Top](#top), +[Union](#union), +[Where](#where), +[Window](#window) + +--- + +## Properties + +Property methods modify state on the calling node. +They do not add another node to the pipeline, and always return a reference to the calling node. +Property methods are marked using the `.` operator. + + +### Quiet + +Suppress all error logging events from this node. + +```js +shift.quiet() +``` + + + + +## Chaining Methods + +Chaining methods create a new node in the pipeline as a child of the calling node. +They do not modify the calling node. +Chaining methods are marked using the `|` operator. + + +### Alert + +Create an alert node, which can trigger alerts. + + +```js +shift|alert() +``` + +Returns: [AlertNode](/kapacitor/v1.5/nodes/alert_node/) + + + +### Barrier + +Create a new Barrier node that emits a BarrierMessage periodically. + +One BarrierMessage will be emitted every period duration. + + +```js +shift|barrier() +``` + +Returns: [BarrierNode](/kapacitor/v1.5/nodes/barrier_node/) + + + +### Bottom + +Select the bottom `num` points for `field` and sort by any extra tags or fields. + + +```js +shift|bottom(num int64, field string, fieldsAndTags ...string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### ChangeDetect + +Create a new node that only emits new points if different from the previous point. + +```js +shift|changeDetect(field string) +``` + +Returns: [ChangeDetectNode](/kapacitor/v1.5/nodes/change_detect_node/) + + + +### Combine + +Combine this node with itself. The data is combined on timestamp. + + +```js +shift|combine(expressions ...ast.LambdaNode) +``` + +Returns: [CombineNode](/kapacitor/v1.5/nodes/combine_node/) + + + +### Count + +Count the number of points. + + +```js +shift|count(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### CumulativeSum + +Compute a cumulative sum of each point that is received. +A point is emitted for every point collected. + + +```js +shift|cumulativeSum(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Deadman + +Helper function for creating an alert on low throughput, a.k.a. deadman's switch. + +- Threshold: trigger alert if throughput drops below threshold in points/interval. +- Interval: how often to check the throughput. +- Expressions: optional list of expressions to also evaluate. Useful for time of day alerting. + +Example: + + +```js + var data = stream + |from()... + // Trigger critical alert if the throughput drops below 100 points per 10s and checked every 10s. + data + |deadman(100.0, 10s) + //Do normal processing of data + data... +``` + +The above is equivalent to this example: + + +```js + var data = stream + |from()... + // Trigger critical alert if the throughput drops below 100 points per 10s and checked every 10s. + data + |stats(10s) + .align() + |derivative('emitted') + .unit(10s) + .nonNegative() + |alert() + .id('node \'stream0\' in task \'{{ .TaskName }}\'') + .message('{{ .ID }} is {{ if eq .Level "OK" }}alive{{ else }}dead{{ end }}: {{ index .Fields "emitted" | printf "%0.3f" }} points/10s.') + .crit(lambda: "emitted" <= 100.0) + //Do normal processing of data + data... +``` + +The `id` and `message` alert properties can be configured globally via the 'deadman' configuration section. + +Since the [AlertNode](/kapacitor/v1.5/nodes/alert_node/) is the last piece it can be further modified as usual. +Example: + + +```js + var data = stream + |from()... + // Trigger critical alert if the throughput drops below 100 points per 10s and checked every 10s. + data + |deadman(100.0, 10s) + .slack() + .channel('#dead_tasks') + //Do normal processing of data + data... +``` + +You can specify additional lambda expressions to further constrain when the deadman's switch is triggered. +Example: + + +```js + var data = stream + |from()... + // Trigger critical alert if the throughput drops below 100 points per 10s and checked every 10s. + // Only trigger the alert if the time of day is between 8am-5pm. + data + |deadman(100.0, 10s, lambda: hour("time") >= 8 AND hour("time") <= 17) + //Do normal processing of data + data... +``` + + + +```js +shift|deadman(threshold float64, interval time.Duration, expr ...ast.LambdaNode) +``` + +Returns: [AlertNode](/kapacitor/v1.5/nodes/alert_node/) + + + +### Default + +Create a node that can set defaults for missing tags or fields. + + +```js +shift|default() +``` + +Returns: [DefaultNode](/kapacitor/v1.5/nodes/default_node/) + + + +### Delete + +Create a node that can delete tags or fields. + + +```js +shift|delete() +``` + +Returns: [DeleteNode](/kapacitor/v1.5/nodes/delete_node/) + + + +### Derivative + +Create a new node that computes the derivative of adjacent points. + + +```js +shift|derivative(field string) +``` + +Returns: [DerivativeNode](/kapacitor/v1.5/nodes/derivative_node/) + + + +### Difference + +Compute the difference between points independent of elapsed time. + + +```js +shift|difference(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Distinct + +Produce batch of only the distinct points. + + +```js +shift|distinct(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Ec2Autoscale + +Create a node that can trigger autoscale events for a ec2 autoscalegroup. + + +```js +shift|ec2Autoscale() +``` + +Returns: [Ec2AutoscaleNode](/kapacitor/v1.5/nodes/ec2_autoscale_node/) + + + +### Elapsed + +Compute the elapsed time between points. + + +```js +shift|elapsed(field string, unit time.Duration) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Eval + +Create an eval node that will evaluate the given transformation function to each data point. +A list of expressions may be provided and will be evaluated in the order they are given. +The results are available to later expressions. + + +```js +shift|eval(expressions ...ast.LambdaNode) +``` + +Returns: [EvalNode](/kapacitor/v1.5/nodes/eval_node/) + + + +### First + +Select the first point. + + +```js +shift|first(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Flatten + +Flatten points with similar times into a single point. + + +```js +shift|flatten() +``` + +Returns: [FlattenNode](/kapacitor/v1.5/nodes/flatten_node/) + + + +### GroupBy + +Group the data by a set of tags. + +Can pass literal * to group by all dimensions. +Example: + + +```js + |groupBy(*) +``` + + + +```js +shift|groupBy(tag ...interface{}) +``` + +Returns: [GroupByNode](/kapacitor/v1.5/nodes/group_by_node/) + + + +### HoltWinters + +Compute the Holt-Winters (https://docs.influxdata.com/influxdb/latest/query_language/functions/#holt-winters) forecast of a data set. + + +```js +shift|holtWinters(field string, h int64, m int64, interval time.Duration) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### HoltWintersWithFit + +Compute the Holt-Winters (https://docs.influxdata.com/influxdb/latest/query_language/functions/#holt-winters) forecast of a data set. +This method also outputs all the points used to fit the data in addition to the forecasted data. + + +```js +shift|holtWintersWithFit(field string, h int64, m int64, interval time.Duration) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### HttpOut + +Create an HTTP output node that caches the most recent data it has received. +The cached data is available at the given endpoint. +The endpoint is the relative path from the API endpoint of the running task. +For example, if the task endpoint is at `/kapacitor/v1/tasks/` and endpoint is +`top10`, then the data can be requested from `/kapacitor/v1/tasks//top10`. + + +```js +shift|httpOut(endpoint string) +``` + +Returns: [HTTPOutNode](/kapacitor/v1.5/nodes/http_out_node/) + + + +### HttpPost + +Creates an HTTP Post node that POSTS received data to the provided HTTP endpoint. +HttpPost expects 0 or 1 arguments. If 0 arguments are provided, you must specify an +endpoint property method. + + +```js +shift|httpPost(url ...string) +``` + +Returns: [HTTPPostNode](/kapacitor/v1.5/nodes/http_post_node/) + + + +### InfluxDBOut + +Create an influxdb output node that will store the incoming data into InfluxDB. + + +```js +shift|influxDBOut() +``` + +Returns: [InfluxDBOutNode](/kapacitor/v1.5/nodes/influx_d_b_out_node/) + + + +### Join + +Join this node with other nodes. The data is joined on timestamp. + + +```js +shift|join(others ...Node) +``` + +Returns: [JoinNode](/kapacitor/v1.5/nodes/join_node/) + + + +### K8sAutoscale + +Create a node that can trigger autoscale events for a kubernetes cluster. + + +```js +shift|k8sAutoscale() +``` + +Returns: [K8sAutoscaleNode](/kapacitor/v1.5/nodes/k8s_autoscale_node/) + + + +### KapacitorLoopback + +Create an kapacitor loopback node that will send data back into Kapacitor as a stream. + + +```js +shift|kapacitorLoopback() +``` + +Returns: [KapacitorLoopbackNode](/kapacitor/v1.5/nodes/kapacitor_loopback_node/) + + + +### Last + +Select the last point. + + +```js +shift|last(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Log + +Create a node that logs all data it receives. + + +```js +shift|log() +``` + +Returns: [LogNode](/kapacitor/v1.5/nodes/log_node/) + + + +### Max + +Select the maximum point. + + +```js +shift|max(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Mean + +Compute the mean of the data. + + +```js +shift|mean(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Median + +Compute the median of the data. + +> **Note:** This method is not a selector. +If you want the median point, use `.percentile(field, 50.0)`. + + +```js +shift|median(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Min + +Select the minimum point. + + +```js +shift|min(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Mode + +Compute the mode of the data. + + +```js +shift|mode(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### MovingAverage + +Compute a moving average of the last window points. +No points are emitted until the window is full. + + +```js +shift|movingAverage(field string, window int64) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Percentile + +Select a point at the given percentile. This is a selector function, no interpolation between points is performed. + + +```js +shift|percentile(field string, percentile float64) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Sample + +Create a new node that samples the incoming points or batches. + +One point will be emitted every count or duration specified. + + +```js +shift|sample(rate interface{}) +``` + +Returns: [SampleNode](/kapacitor/v1.5/nodes/sample_node/) + + + +### Shift + +Create a new node that shifts the incoming points or batches in time. + + +```js +shift|shift(shift time.Duration) +``` + +Returns: [ShiftNode](/kapacitor/v1.5/nodes/shift_node/) + + + +### Sideload + +Create a node that can load data from external sources. + + +```js +shift|sideload() +``` + +Returns: [SideloadNode](/kapacitor/v1.5/nodes/sideload_node/) + + + +### Spread + +Compute the difference between `min` and `max` points. + + +```js +shift|spread(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### StateCount + +Create a node that tracks number of consecutive points in a given state. + + +```js +shift|stateCount(expression ast.LambdaNode) +``` + +Returns: [StateCountNode](/kapacitor/v1.5/nodes/state_count_node/) + + + +### StateDuration + +Create a node that tracks duration in a given state. + + +```js +shift|stateDuration(expression ast.LambdaNode) +``` + +Returns: [StateDurationNode](/kapacitor/v1.5/nodes/state_duration_node/) + + + +### Stats + +Create a new stream of data that contains the internal statistics of the node. +The interval represents how often to emit the statistics based on real time. +This means the interval time is independent of the times of the data points the source node is receiving. + + +```js +shift|stats(interval time.Duration) +``` + +Returns: [StatsNode](/kapacitor/v1.5/nodes/stats_node/) + + + +### Stddev + +Compute the standard deviation. + + +```js +shift|stddev(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Sum + +Compute the sum of all values. + + +```js +shift|sum(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### SwarmAutoscale + +Create a node that can trigger autoscale events for a Docker swarm cluster. + + +```js +shift|swarmAutoscale() +``` + +Returns: [SwarmAutoscaleNode](/kapacitor/v1.5/nodes/swarm_autoscale_node/) + + + +### Top + +Select the top `num` points for `field` and sort by any extra tags or fields. + + +```js +shift|top(num int64, field string, fieldsAndTags ...string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Union + +Perform the union of this node and all other given nodes. + + +```js +shift|union(node ...Node) +``` + +Returns: [UnionNode](/kapacitor/v1.5/nodes/union_node/) + + + +### Where + +Create a new node that filters the data stream by a given expression. + + +```js +shift|where(expression ast.LambdaNode) +``` + +Returns: [WhereNode](/kapacitor/v1.5/nodes/where_node/) + + + +### Window + +Create a new node that windows the stream by time. + +NOTE: Window can only be applied to stream edges. + + +```js +shift|window() +``` + +Returns: [WindowNode](/kapacitor/v1.5/nodes/window_node/) + + diff --git a/content/kapacitor/v1.5/nodes/sideload_node.md b/content/kapacitor/v1.5/nodes/sideload_node.md new file mode 100644 index 000000000..d7bcdce41 --- /dev/null +++ b/content/kapacitor/v1.5/nodes/sideload_node.md @@ -0,0 +1,933 @@ +--- +title: SideloadNode +description: SideloadNode adds fields and tags to points based on hierarchical data from various sources. +note: Auto generated by tickdoc + +menu: + kapacitor_1_5_ref: + name: SideloadNode + identifier: sideload_node + weight: 250 + parent: nodes +--- + +The `sideload` node adds fields and tags to points based on hierarchical data from various sources. + +Example: + + +```js +|sideload() + .source('file:///path/to/dir') + .order('host/{{.host}}.yml', 'hostgroup/{{.hostgroup}}.yml') + .field('cpu_threshold', 0.0) + .tag('foo', 'unknown') +``` + +Add a field `cpu_threshold` and a tag `foo` to each point based on the value loaded from the hierarchical source. +The list of templates in the `.order()` property are evaluated using the points tags. +The files paths are checked then checked in order for the specified keys and the first value that is found is used. + + +### Constructor + +| Chaining Method | Description | +|:---------|:---------| +| **sideload ( )** | Create a node that can load data from external sources | + +### Property Methods + +| Setters | Description | +|:---|:---| +| **[field](#field) ( `f` `string`, `v` `interface{}`)** | Field is the name of a field to load from the source and its default value. The type loaded must match the type of the default value. Otherwise an error is recorded and the default value is used. | +| **[order](#order) ( `order` `...string`)** | Order is a list of paths that indicate the hierarchical order. The paths are relative to the source and can have template markers like `{{.tagname}}` that will be replaced with the tag value of the point. The paths are then searched in order for the keys and the first value that is found is used. This allows for values to be overridden based on a hierarchy of tags. | +| **[quiet](#quiet) ( )** | Suppress all error logging events from this node. | +| **[source](#source) ( `value` `string`)** | Source for the data, currently only `file://` based sources are supported | +| **[tag](#tag) ( `t` `string`, `v` `string`)** | Tag is the name of a tag to load from the source and its default value. The loaded values must be strings, otherwise an error is recorded and the default value is used. | + + + +### Chaining Methods +[Alert](#alert), +[Barrier](#barrier), +[Bottom](#bottom), +[ChangeDetect](#changedetect), +[Combine](#combine), +[Count](#count), +[CumulativeSum](#cumulativesum), +[Deadman](#deadman), +[Default](#default), +[Delete](#delete), +[Derivative](#derivative), +[Difference](#difference), +[Distinct](#distinct), +[Ec2Autoscale](#ec2autoscale), +[Elapsed](#elapsed), +[Eval](#eval), +[First](#first), +[Flatten](#flatten), +[GroupBy](#groupby), +[HoltWinters](#holtwinters), +[HoltWintersWithFit](#holtwinterswithfit), +[HttpOut](#httpout), +[HttpPost](#httppost), +[InfluxDBOut](#influxdbout), +[Join](#join), +[K8sAutoscale](#k8sautoscale), +[KapacitorLoopback](#kapacitorloopback), +[Last](#last), +[Log](#log), +[Max](#max), +[Mean](#mean), +[Median](#median), +[Min](#min), +[Mode](#mode), +[MovingAverage](#movingaverage), +[Percentile](#percentile), +[Sample](#sample), +[Shift](#shift), +[Sideload](#sideload), +[Spread](#spread), +[StateCount](#statecount), +[StateDuration](#stateduration), +[Stats](#stats), +[Stddev](#stddev), +[Sum](#sum), +[SwarmAutoscale](#swarmautoscale), +[Top](#top), +[Union](#union), +[Where](#where), +[Window](#window) + +--- + +## Properties + +Property methods modify state on the calling node. +They do not add another node to the pipeline, and always return a reference to the calling node. +Property methods are marked using the `.` operator. + + +### Field + +Field is the name of a field to load from the source and its default value. +The type loaded must match the type of the default value. +Otherwise an error is recorded and the default value is used. + + +```js +sideload.field(f string, v interface{}) +``` + + + + +### Order + +Order is a list of paths that indicate the hierarchical order. +The paths are relative to the source and can have template markers like `{{.tagname}}` that will be replaced with the tag value of the point. +The paths are then searched in order for the keys and the first value that is found is used. +This allows for values to be overridden based on a hierarchy of tags. + + +```js +sideload.order(order ...string) +``` + + + + +### Quiet + +Suppress all error logging events from this node. + +```js +sideload.quiet() +``` + + + + +### Source + +Source for the data, currently only `file://` based sources are supported + + +```js +sideload.source(value string) +``` + + + + +### Tag + +Tag is the name of a tag to load from the source and its default value. +The loaded values must be strings, otherwise an error is recorded and the default value is used. + + +```js +sideload.tag(t string, v string) +``` + + + + +## Chaining Methods + +Chaining methods create a new node in the pipeline as a child of the calling node. +They do not modify the calling node. +Chaining methods are marked using the `|` operator. + + +### Alert + +Create an alert node, which can trigger alerts. + + +```js +sideload|alert() +``` + +Returns: [AlertNode](/kapacitor/v1.5/nodes/alert_node/) + + + +### Barrier + +Create a new Barrier node that emits a BarrierMessage periodically. + +One BarrierMessage will be emitted every period duration. + + +```js +sideload|barrier() +``` + +Returns: [BarrierNode](/kapacitor/v1.5/nodes/barrier_node/) + + + +### Bottom + +Select the bottom `num` points for `field` and sort by any extra tags or fields. + + +```js +sideload|bottom(num int64, field string, fieldsAndTags ...string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### ChangeDetect + +Create a new node that only emits new points if different from the previous point. + +```js +sideload|changeDetect(field string) +``` + +Returns: [ChangeDetectNode](/kapacitor/v1.5/nodes/change_detect_node/) + + + +### Combine + +Combine this node with itself. The data is combined on timestamp. + + +```js +sideload|combine(expressions ...ast.LambdaNode) +``` + +Returns: [CombineNode](/kapacitor/v1.5/nodes/combine_node/) + + + +### Count + +Count the number of points. + + +```js +sideload|count(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### CumulativeSum + +Compute a cumulative sum of each point that is received. +A point is emitted for every point collected. + + +```js +sideload|cumulativeSum(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Deadman + +Helper function for creating an alert on low throughput, a.k.a. deadman's switch. + +- Threshold: trigger alert if throughput drops below threshold in points/interval. +- Interval: how often to check the throughput. +- Expressions: optional list of expressions to also evaluate. Useful for time of day alerting. + +Example: + + +```js + var data = stream + |from()... + // Trigger critical alert if the throughput drops below 100 points per 10s and checked every 10s. + data + |deadman(100.0, 10s) + //Do normal processing of data + data... +``` + +The above is equivalent to this example: + + +```js + var data = stream + |from()... + // Trigger critical alert if the throughput drops below 100 points per 10s and checked every 10s. + data + |stats(10s) + .align() + |derivative('emitted') + .unit(10s) + .nonNegative() + |alert() + .id('node \'stream0\' in task \'{{ .TaskName }}\'') + .message('{{ .ID }} is {{ if eq .Level "OK" }}alive{{ else }}dead{{ end }}: {{ index .Fields "emitted" | printf "%0.3f" }} points/10s.') + .crit(lambda: "emitted" <= 100.0) + //Do normal processing of data + data... +``` + +The `id` and `message` alert properties can be configured globally via the 'deadman' configuration section. + +Since the [AlertNode](/kapacitor/v1.5/nodes/alert_node/) is the last piece it can be further modified as usual. +Example: + + +```js + var data = stream + |from()... + // Trigger critical alert if the throughput drops below 100 points per 10s and checked every 10s. + data + |deadman(100.0, 10s) + .slack() + .channel('#dead_tasks') + //Do normal processing of data + data... +``` + +You can specify additional lambda expressions to further constrain when the deadman's switch is triggered. +Example: + + +```js + var data = stream + |from()... + // Trigger critical alert if the throughput drops below 100 points per 10s and checked every 10s. + // Only trigger the alert if the time of day is between 8am-5pm. + data + |deadman(100.0, 10s, lambda: hour("time") >= 8 AND hour("time") <= 17) + //Do normal processing of data + data... +``` + + + +```js +sideload|deadman(threshold float64, interval time.Duration, expr ...ast.LambdaNode) +``` + +Returns: [AlertNode](/kapacitor/v1.5/nodes/alert_node/) + + + +### Default + +Create a node that can set defaults for missing tags or fields. + + +```js +sideload|default() +``` + +Returns: [DefaultNode](/kapacitor/v1.5/nodes/default_node/) + + + +### Delete + +Create a node that can delete tags or fields. + + +```js +sideload|delete() +``` + +Returns: [DeleteNode](/kapacitor/v1.5/nodes/delete_node/) + + + +### Derivative + +Create a new node that computes the derivative of adjacent points. + + +```js +sideload|derivative(field string) +``` + +Returns: [DerivativeNode](/kapacitor/v1.5/nodes/derivative_node/) + + + +### Difference + +Compute the difference between points independent of elapsed time. + + +```js +sideload|difference(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Distinct + +Produce batch of only the distinct points. + + +```js +sideload|distinct(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Ec2Autoscale + +Create a node that can trigger autoscale events for a ec2 autoscalegroup. + + +```js +sideload|ec2Autoscale() +``` + +Returns: [Ec2AutoscaleNode](/kapacitor/v1.5/nodes/ec2_autoscale_node/) + + + +### Elapsed + +Compute the elapsed time between points. + + +```js +sideload|elapsed(field string, unit time.Duration) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Eval + +Create an eval node that will evaluate the given transformation function to each data point. +A list of expressions may be provided and will be evaluated in the order they are given. +The results are available to later expressions. + + +```js +sideload|eval(expressions ...ast.LambdaNode) +``` + +Returns: [EvalNode](/kapacitor/v1.5/nodes/eval_node/) + + + +### First + +Select the first point. + + +```js +sideload|first(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Flatten + +Flatten points with similar times into a single point. + + +```js +sideload|flatten() +``` + +Returns: [FlattenNode](/kapacitor/v1.5/nodes/flatten_node/) + + + +### GroupBy + +Group the data by a set of tags. + +Can pass literal * to group by all dimensions. +Example: + + +```js + |groupBy(*) +``` + + + +```js +sideload|groupBy(tag ...interface{}) +``` + +Returns: [GroupByNode](/kapacitor/v1.5/nodes/group_by_node/) + + + +### HoltWinters + +Compute the Holt-Winters (https://docs.influxdata.com/influxdb/latest/query_language/functions/#holt-winters) forecast of a data set. + + +```js +sideload|holtWinters(field string, h int64, m int64, interval time.Duration) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### HoltWintersWithFit + +Compute the Holt-Winters (https://docs.influxdata.com/influxdb/latest/query_language/functions/#holt-winters) forecast of a data set. +This method also outputs all the points used to fit the data in addition to the forecasted data. + + +```js +sideload|holtWintersWithFit(field string, h int64, m int64, interval time.Duration) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### HttpOut + +Create an HTTP output node that caches the most recent data it has received. +The cached data is available at the given endpoint. +The endpoint is the relative path from the API endpoint of the running task. +For example, if the task endpoint is at `/kapacitor/v1/tasks/` and endpoint is +`top10`, then the data can be requested from `/kapacitor/v1/tasks//top10`. + + +```js +sideload|httpOut(endpoint string) +``` + +Returns: [HTTPOutNode](/kapacitor/v1.5/nodes/http_out_node/) + + + +### HttpPost + +Creates an HTTP Post node that POSTS received data to the provided HTTP endpoint. +HttpPost expects 0 or 1 arguments. If 0 arguments are provided, you must specify an +endpoint property method. + + +```js +sideload|httpPost(url ...string) +``` + +Returns: [HTTPPostNode](/kapacitor/v1.5/nodes/http_post_node/) + + + +### InfluxDBOut + +Create an influxdb output node that will store the incoming data into InfluxDB. + + +```js +sideload|influxDBOut() +``` + +Returns: [InfluxDBOutNode](/kapacitor/v1.5/nodes/influx_d_b_out_node/) + + + +### Join + +Join this node with other nodes. The data is joined on timestamp. + + +```js +sideload|join(others ...Node) +``` + +Returns: [JoinNode](/kapacitor/v1.5/nodes/join_node/) + + + +### K8sAutoscale + +Create a node that can trigger autoscale events for a kubernetes cluster. + + +```js +sideload|k8sAutoscale() +``` + +Returns: [K8sAutoscaleNode](/kapacitor/v1.5/nodes/k8s_autoscale_node/) + + + +### KapacitorLoopback + +Create an kapacitor loopback node that will send data back into Kapacitor as a stream. + + +```js +sideload|kapacitorLoopback() +``` + +Returns: [KapacitorLoopbackNode](/kapacitor/v1.5/nodes/kapacitor_loopback_node/) + + + +### Last + +Select the last point. + + +```js +sideload|last(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Log + +Create a node that logs all data it receives. + + +```js +sideload|log() +``` + +Returns: [LogNode](/kapacitor/v1.5/nodes/log_node/) + + + +### Max + +Select the maximum point. + + +```js +sideload|max(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Mean + +Compute the mean of the data. + + +```js +sideload|mean(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Median + +Compute the median of the data. + +> **Note:** This method is not a selector. +If you want the median point, use `.percentile(field, 50.0)`. + + +```js +sideload|median(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Min + +Select the minimum point. + + +```js +sideload|min(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Mode + +Compute the mode of the data. + + +```js +sideload|mode(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### MovingAverage + +Compute a moving average of the last window points. +No points are emitted until the window is full. + + +```js +sideload|movingAverage(field string, window int64) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Percentile + +Select a point at the given percentile. This is a selector function, no interpolation between points is performed. + + +```js +sideload|percentile(field string, percentile float64) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Sample + +Create a new node that samples the incoming points or batches. + +One point will be emitted every count or duration specified. + + +```js +sideload|sample(rate interface{}) +``` + +Returns: [SampleNode](/kapacitor/v1.5/nodes/sample_node/) + + + +### Shift + +Create a new node that shifts the incoming points or batches in time. + + +```js +sideload|shift(shift time.Duration) +``` + +Returns: [ShiftNode](/kapacitor/v1.5/nodes/shift_node/) + + + +### Sideload + +Create a node that can load data from external sources. + + +```js +sideload|sideload() +``` + +Returns: [SideloadNode](/kapacitor/v1.5/nodes/sideload_node/) + + + +### Spread + +Compute the difference between `min` and `max` points. + + +```js +sideload|spread(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### StateCount + +Create a node that tracks number of consecutive points in a given state. + + +```js +sideload|stateCount(expression ast.LambdaNode) +``` + +Returns: [StateCountNode](/kapacitor/v1.5/nodes/state_count_node/) + + + +### StateDuration + +Create a node that tracks duration in a given state. + + +```js +sideload|stateDuration(expression ast.LambdaNode) +``` + +Returns: [StateDurationNode](/kapacitor/v1.5/nodes/state_duration_node/) + + + +### Stats + +Create a new stream of data that contains the internal statistics of the node. +The interval represents how often to emit the statistics based on real time. +This means the interval time is independent of the times of the data points the source node is receiving. + + +```js +sideload|stats(interval time.Duration) +``` + +Returns: [StatsNode](/kapacitor/v1.5/nodes/stats_node/) + + + +### Stddev + +Compute the standard deviation. + + +```js +sideload|stddev(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Sum + +Compute the sum of all values. + + +```js +sideload|sum(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### SwarmAutoscale + +Create a node that can trigger autoscale events for a Docker swarm cluster. + + +```js +sideload|swarmAutoscale() +``` + +Returns: [SwarmAutoscaleNode](/kapacitor/v1.5/nodes/swarm_autoscale_node/) + + + +### Top + +Select the top `num` points for `field` and sort by any extra tags or fields. + + +```js +sideload|top(num int64, field string, fieldsAndTags ...string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Union + +Perform the union of this node and all other given nodes. + + +```js +sideload|union(node ...Node) +``` + +Returns: [UnionNode](/kapacitor/v1.5/nodes/union_node/) + + + +### Where + +Create a new node that filters the data stream by a given expression. + + +```js +sideload|where(expression ast.LambdaNode) +``` + +Returns: [WhereNode](/kapacitor/v1.5/nodes/where_node/) + + + +### Window + +Create a new node that windows the stream by time. + +NOTE: Window can only be applied to stream edges. + + +```js +sideload|window() +``` + +Returns: [WindowNode](/kapacitor/v1.5/nodes/window_node/) + + diff --git a/content/kapacitor/v1.5/nodes/state_count_node.md b/content/kapacitor/v1.5/nodes/state_count_node.md new file mode 100644 index 000000000..8df173b0d --- /dev/null +++ b/content/kapacitor/v1.5/nodes/state_count_node.md @@ -0,0 +1,898 @@ +--- +title: StateCountNode +description: StateCountNode computes the number of consecutive points in a given state (defined using a lambda expression). +note: Auto generated by tickdoc + +menu: + kapacitor_1_5_ref: + name: StateCountNode + identifier: state_count_node + weight: 260 + parent: nodes +--- + +The `stateCount` node computes the number of consecutive points in a given state. +The state is defined via a lambda expression. For each consecutive point for +which the expression evaluates as true, the state count will be incremented +When a point evaluates as false, the state count is reset. + +The state count will be added as an additional `int64` field to each point. +If the expression evaluates as false, the value will be -1. +If the expression generates an error during evaluation, the point is discarded, and does not affect the state count. + +Example: + + +```js +stream + |from() + .measurement('cpu') + |where(lambda: "cpu" == 'cpu-total') + |groupBy('host') + |stateCount(lambda: "usage_idle" <= 10) + |alert() + // Warn after 1 point + .warn(lambda: "state_count" >= 1) + // Critical after 5 points + .crit(lambda: "state_count" >= 5) +``` + + +### Constructor + +| Chaining Method | Description | +|:---------|:---------| +| **stateCount ( `expression` `ast.LambdaNode`)** | Create a node that tracks number of consecutive points in a given state. | + +### Property Methods + +| Setters | Description | +|:---|:---| +| **[as](#as) ( `value` `string`)** | The new name of the resulting duration field. Default: 'state_count' | +| **[quiet](#quiet) ( )** | Suppress all error logging events from this node. | + + + +### Chaining Methods +[Alert](#alert), +[Barrier](#barrier), +[Bottom](#bottom), +[ChangeDetect](#changedetect), +[Combine](#combine), +[Count](#count), +[CumulativeSum](#cumulativesum), +[Deadman](#deadman), +[Default](#default), +[Delete](#delete), +[Derivative](#derivative), +[Difference](#difference), +[Distinct](#distinct), +[Ec2Autoscale](#ec2autoscale), +[Elapsed](#elapsed), +[Eval](#eval), +[First](#first), +[Flatten](#flatten), +[GroupBy](#groupby), +[HoltWinters](#holtwinters), +[HoltWintersWithFit](#holtwinterswithfit), +[HttpOut](#httpout), +[HttpPost](#httppost), +[InfluxDBOut](#influxdbout), +[Join](#join), +[K8sAutoscale](#k8sautoscale), +[KapacitorLoopback](#kapacitorloopback), +[Last](#last), +[Log](#log), +[Max](#max), +[Mean](#mean), +[Median](#median), +[Min](#min), +[Mode](#mode), +[MovingAverage](#movingaverage), +[Percentile](#percentile), +[Sample](#sample), +[Shift](#shift), +[Sideload](#sideload), +[Spread](#spread), +[StateCount](#statecount), +[StateDuration](#stateduration), +[Stats](#stats), +[Stddev](#stddev), +[Sum](#sum), +[SwarmAutoscale](#swarmautoscale), +[Top](#top), +[Union](#union), +[Where](#where), +[Window](#window) + +--- + +## Properties + +Property methods modify state on the calling node. +They do not add another node to the pipeline, and always return a reference to the calling node. +Property methods are marked using the `.` operator. + + +### As + +The new name of the resulting duration field. +Default: 'state_count' + + +```js +stateCount.as(value string) +``` + + + + +### Quiet + +Suppress all error logging events from this node. + +```js +stateCount.quiet() +``` + + + + +## Chaining Methods + +Chaining methods create a new node in the pipeline as a child of the calling node. +They do not modify the calling node. +Chaining methods are marked using the `|` operator. + + +### Alert + +Create an alert node, which can trigger alerts. + + +```js +stateCount|alert() +``` + +Returns: [AlertNode](/kapacitor/v1.5/nodes/alert_node/) + + + +### Barrier + +Create a new Barrier node that emits a BarrierMessage periodically. + +One BarrierMessage will be emitted every period duration. + + +```js +stateCount|barrier() +``` + +Returns: [BarrierNode](/kapacitor/v1.5/nodes/barrier_node/) + + + +### Bottom + +Select the bottom `num` points for `field` and sort by any extra tags or fields. + + +```js +stateCount|bottom(num int64, field string, fieldsAndTags ...string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### ChangeDetect + +Create a new node that only emits new points if different from the previous point. + +```js +stateCount|changeDetect(field string) +``` + +Returns: [ChangeDetectNode](/kapacitor/v1.5/nodes/change_detect_node/) + + + +### Combine + +Combine this node with itself. The data is combined on timestamp. + + +```js +stateCount|combine(expressions ...ast.LambdaNode) +``` + +Returns: [CombineNode](/kapacitor/v1.5/nodes/combine_node/) + + + +### Count + +Count the number of points. + + +```js +stateCount|count(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### CumulativeSum + +Compute a cumulative sum of each point that is received. +A point is emitted for every point collected. + + +```js +stateCount|cumulativeSum(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Deadman + +Helper function for creating an alert on low throughput, a.k.a. deadman's switch. + +- Threshold: trigger alert if throughput drops below threshold in points/interval. +- Interval: how often to check the throughput. +- Expressions: optional list of expressions to also evaluate. Useful for time of day alerting. + +Example: + + +```js + var data = stream + |from()... + // Trigger critical alert if the throughput drops below 100 points per 10s and checked every 10s. + data + |deadman(100.0, 10s) + //Do normal processing of data + data... +``` + +The above is equivalent to this example: + + +```js + var data = stream + |from()... + // Trigger critical alert if the throughput drops below 100 points per 10s and checked every 10s. + data + |stats(10s) + .align() + |derivative('emitted') + .unit(10s) + .nonNegative() + |alert() + .id('node \'stream0\' in task \'{{ .TaskName }}\'') + .message('{{ .ID }} is {{ if eq .Level "OK" }}alive{{ else }}dead{{ end }}: {{ index .Fields "emitted" | printf "%0.3f" }} points/10s.') + .crit(lambda: "emitted" <= 100.0) + //Do normal processing of data + data... +``` + +The `id` and `message` alert properties can be configured globally via the 'deadman' configuration section. + +Since the [AlertNode](/kapacitor/v1.5/nodes/alert_node/) is the last piece it can be further modified as usual. +Example: + + +```js + var data = stream + |from()... + // Trigger critical alert if the throughput drops below 100 points per 10s and checked every 10s. + data + |deadman(100.0, 10s) + .slack() + .channel('#dead_tasks') + //Do normal processing of data + data... +``` + +You can specify additional lambda expressions to further constrain when the deadman's switch is triggered. +Example: + + +```js + var data = stream + |from()... + // Trigger critical alert if the throughput drops below 100 points per 10s and checked every 10s. + // Only trigger the alert if the time of day is between 8am-5pm. + data + |deadman(100.0, 10s, lambda: hour("time") >= 8 AND hour("time") <= 17) + //Do normal processing of data + data... +``` + + + +```js +stateCount|deadman(threshold float64, interval time.Duration, expr ...ast.LambdaNode) +``` + +Returns: [AlertNode](/kapacitor/v1.5/nodes/alert_node/) + + + +### Default + +Create a node that can set defaults for missing tags or fields. + + +```js +stateCount|default() +``` + +Returns: [DefaultNode](/kapacitor/v1.5/nodes/default_node/) + + + +### Delete + +Create a node that can delete tags or fields. + + +```js +stateCount|delete() +``` + +Returns: [DeleteNode](/kapacitor/v1.5/nodes/delete_node/) + + + +### Derivative + +Create a new node that computes the derivative of adjacent points. + + +```js +stateCount|derivative(field string) +``` + +Returns: [DerivativeNode](/kapacitor/v1.5/nodes/derivative_node/) + + + +### Difference + +Compute the difference between points independent of elapsed time. + + +```js +stateCount|difference(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Distinct + +Produce batch of only the distinct points. + + +```js +stateCount|distinct(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Ec2Autoscale + +Create a node that can trigger autoscale events for a ec2 autoscalegroup. + + +```js +stateCount|ec2Autoscale() +``` + +Returns: [Ec2AutoscaleNode](/kapacitor/v1.5/nodes/ec2_autoscale_node/) + + + +### Elapsed + +Compute the elapsed time between points. + + +```js +stateCount|elapsed(field string, unit time.Duration) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Eval + +Create an eval node that will evaluate the given transformation function to each data point. +A list of expressions may be provided and will be evaluated in the order they are given. +The results are available to later expressions. + + +```js +stateCount|eval(expressions ...ast.LambdaNode) +``` + +Returns: [EvalNode](/kapacitor/v1.5/nodes/eval_node/) + + + +### First + +Select the first point. + + +```js +stateCount|first(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Flatten + +Flatten points with similar times into a single point. + + +```js +stateCount|flatten() +``` + +Returns: [FlattenNode](/kapacitor/v1.5/nodes/flatten_node/) + + + +### GroupBy + +Group the data by a set of tags. + +Can pass literal * to group by all dimensions. +Example: + + +```js + |groupBy(*) +``` + + + +```js +stateCount|groupBy(tag ...interface{}) +``` + +Returns: [GroupByNode](/kapacitor/v1.5/nodes/group_by_node/) + + + +### HoltWinters + +Compute the Holt-Winters (https://docs.influxdata.com/influxdb/latest/query_language/functions/#holt-winters) forecast of a data set. + + +```js +stateCount|holtWinters(field string, h int64, m int64, interval time.Duration) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### HoltWintersWithFit + +Compute the Holt-Winters (https://docs.influxdata.com/influxdb/latest/query_language/functions/#holt-winters) forecast of a data set. +This method also outputs all the points used to fit the data in addition to the forecasted data. + + +```js +stateCount|holtWintersWithFit(field string, h int64, m int64, interval time.Duration) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### HttpOut + +Create an HTTP output node that caches the most recent data it has received. +The cached data is available at the given endpoint. +The endpoint is the relative path from the API endpoint of the running task. +For example, if the task endpoint is at `/kapacitor/v1/tasks/` and endpoint is +`top10`, then the data can be requested from `/kapacitor/v1/tasks//top10`. + + +```js +stateCount|httpOut(endpoint string) +``` + +Returns: [HTTPOutNode](/kapacitor/v1.5/nodes/http_out_node/) + + + +### HttpPost + +Creates an HTTP Post node that POSTS received data to the provided HTTP endpoint. +HttpPost expects 0 or 1 arguments. If 0 arguments are provided, you must specify an +endpoint property method. + + +```js +stateCount|httpPost(url ...string) +``` + +Returns: [HTTPPostNode](/kapacitor/v1.5/nodes/http_post_node/) + + + +### InfluxDBOut + +Create an influxdb output node that will store the incoming data into InfluxDB. + + +```js +stateCount|influxDBOut() +``` + +Returns: [InfluxDBOutNode](/kapacitor/v1.5/nodes/influx_d_b_out_node/) + + + +### Join + +Join this node with other nodes. The data is joined on timestamp. + + +```js +stateCount|join(others ...Node) +``` + +Returns: [JoinNode](/kapacitor/v1.5/nodes/join_node/) + + + +### K8sAutoscale + +Create a node that can trigger autoscale events for a kubernetes cluster. + + +```js +stateCount|k8sAutoscale() +``` + +Returns: [K8sAutoscaleNode](/kapacitor/v1.5/nodes/k8s_autoscale_node/) + + + +### KapacitorLoopback + +Create an kapacitor loopback node that will send data back into Kapacitor as a stream. + + +```js +stateCount|kapacitorLoopback() +``` + +Returns: [KapacitorLoopbackNode](/kapacitor/v1.5/nodes/kapacitor_loopback_node/) + + + +### Last + +Select the last point. + + +```js +stateCount|last(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Log + +Create a node that logs all data it receives. + + +```js +stateCount|log() +``` + +Returns: [LogNode](/kapacitor/v1.5/nodes/log_node/) + + + +### Max + +Select the maximum point. + + +```js +stateCount|max(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Mean + +Compute the mean of the data. + + +```js +stateCount|mean(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Median + +Compute the median of the data. + +> **Note:** This method is not a selector. +If you want the median point, use `.percentile(field, 50.0)`. + + +```js +stateCount|median(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Min + +Select the minimum point. + + +```js +stateCount|min(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Mode + +Compute the mode of the data. + + +```js +stateCount|mode(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### MovingAverage + +Compute a moving average of the last window points. +No points are emitted until the window is full. + + +```js +stateCount|movingAverage(field string, window int64) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Percentile + +Select a point at the given percentile. This is a selector function, no interpolation between points is performed. + + +```js +stateCount|percentile(field string, percentile float64) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Sample + +Create a new node that samples the incoming points or batches. + +One point will be emitted every count or duration specified. + + +```js +stateCount|sample(rate interface{}) +``` + +Returns: [SampleNode](/kapacitor/v1.5/nodes/sample_node/) + + + +### Shift + +Create a new node that shifts the incoming points or batches in time. + + +```js +stateCount|shift(shift time.Duration) +``` + +Returns: [ShiftNode](/kapacitor/v1.5/nodes/shift_node/) + + + +### Sideload + +Create a node that can load data from external sources. + + +```js +stateCount|sideload() +``` + +Returns: [SideloadNode](/kapacitor/v1.5/nodes/sideload_node/) + + + +### Spread + +Compute the difference between `min` and `max` points. + + +```js +stateCount|spread(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### StateCount + +Create a node that tracks number of consecutive points in a given state. + + +```js +stateCount|stateCount(expression ast.LambdaNode) +``` + +Returns: [StateCountNode](/kapacitor/v1.5/nodes/state_count_node/) + + + +### StateDuration + +Create a node that tracks duration in a given state. + + +```js +stateCount|stateDuration(expression ast.LambdaNode) +``` + +Returns: [StateDurationNode](/kapacitor/v1.5/nodes/state_duration_node/) + + + +### Stats + +Create a new stream of data that contains the internal statistics of the node. +The interval represents how often to emit the statistics based on real time. +This means the interval time is independent of the times of the data points the source node is receiving. + + +```js +stateCount|stats(interval time.Duration) +``` + +Returns: [StatsNode](/kapacitor/v1.5/nodes/stats_node/) + + + +### Stddev + +Compute the standard deviation. + + +```js +stateCount|stddev(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Sum + +Compute the sum of all values. + + +```js +stateCount|sum(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### SwarmAutoscale + +Create a node that can trigger autoscale events for a Docker swarm cluster. + + +```js +stateCount|swarmAutoscale() +``` + +Returns: [SwarmAutoscaleNode](/kapacitor/v1.5/nodes/swarm_autoscale_node/) + + + +### Top + +Select the top `num` points for `field` and sort by any extra tags or fields. + + +```js +stateCount|top(num int64, field string, fieldsAndTags ...string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Union + +Perform the union of this node and all other given nodes. + + +```js +stateCount|union(node ...Node) +``` + +Returns: [UnionNode](/kapacitor/v1.5/nodes/union_node/) + + + +### Where + +Create a new node that filters the data stream by a given expression. + + +```js +stateCount|where(expression ast.LambdaNode) +``` + +Returns: [WhereNode](/kapacitor/v1.5/nodes/where_node/) + + + +### Window + +Create a new node that windows the stream by time. + +NOTE: Window can only be applied to stream edges. + + +```js +stateCount|window() +``` + +Returns: [WindowNode](/kapacitor/v1.5/nodes/window_node/) + + diff --git a/content/kapacitor/v1.5/nodes/state_duration_node.md b/content/kapacitor/v1.5/nodes/state_duration_node.md new file mode 100644 index 000000000..75aee01cb --- /dev/null +++ b/content/kapacitor/v1.5/nodes/state_duration_node.md @@ -0,0 +1,922 @@ +--- +title: StateDurationNode +description: StateDurationNode computes the duration of a given state (defined using a lambda expression). +note: Auto generated by tickdoc + +menu: + kapacitor_1_5_ref: + name: StateDurationNode + identifier: state_duration_node + weight: 270 + parent: nodes +--- + +The `stateDuration` node computes the duration of a given state. +The state is defined via a lambda expression. For each consecutive point for +which the expression evaluates as `true`, the state duration will be +incremented by the duration between points. When a point evaluates as `false`, +the state duration is reset. + +The state duration will be added as an additional `float64` field to each point. +If the expression evaluates as false, the value will be `-1`. +If the expression generates an error during evaluation, the point is discarded, and does not affect the state duration. + +Example: + +```js +stream + |from() + .measurement('cpu') + |where(lambda: "cpu" == 'cpu-total') + |groupBy('host') + |stateDuration(lambda: "usage_idle" <= 10) + .unit(1m) + |alert() + // Warn after 1 minute + .warn(lambda: "state_duration" >= 1) + // Critical after 5 minutes + .crit(lambda: "state_duration" >= 5) +``` + +Note that as the first point in the given state has no previous point, its +state duration will be 0. + +> Currently, the StateDurationNode only emits a point when it receives data. +It does not assume the previous evaluation if no data is received at the "expected" +interval or data resolution. +If no data is sent, the StateDurationNode cannot evaluate the state and cannot calculate a duration. + +> More information about this is available in this [comment thread](https://github.com/influxdata/kapacitor/issues/1757) on Github. + +### Constructor + +| Chaining Method | Description | +|:---------|:---------| +| **stateDuration ( `expression` `ast.LambdaNode`)** | Create a node that tracks duration in a given state. | + +### Property Methods + +| Setters | Description | +|:---|:---| +| **[as](#as) ( `value` `string`)** | The new name of the resulting duration field. Default: 'state_duration' | +| **[quiet](#quiet) ( )** | Suppress all error logging events from this node. | +| **[unit](#unit) ( `value` `time.Duration`)** | The time unit of the resulting duration value. Default: 1s. | + + + +### Chaining Methods +[Alert](#alert), +[Barrier](#barrier), +[Bottom](#bottom), +[ChangeDetect](#changedetect), +[Combine](#combine), +[Count](#count), +[CumulativeSum](#cumulativesum), +[Deadman](#deadman), +[Default](#default), +[Delete](#delete), +[Derivative](#derivative), +[Difference](#difference), +[Distinct](#distinct), +[Ec2Autoscale](#ec2autoscale), +[Elapsed](#elapsed), +[Eval](#eval), +[First](#first), +[Flatten](#flatten), +[GroupBy](#groupby), +[HoltWinters](#holtwinters), +[HoltWintersWithFit](#holtwinterswithfit), +[HttpOut](#httpout), +[HttpPost](#httppost), +[InfluxDBOut](#influxdbout), +[Join](#join), +[K8sAutoscale](#k8sautoscale), +[KapacitorLoopback](#kapacitorloopback), +[Last](#last), +[Log](#log), +[Max](#max), +[Mean](#mean), +[Median](#median), +[Min](#min), +[Mode](#mode), +[MovingAverage](#movingaverage), +[Percentile](#percentile), +[Sample](#sample), +[Shift](#shift), +[Sideload](#sideload), +[Spread](#spread), +[StateCount](#statecount), +[StateDuration](#stateduration), +[Stats](#stats), +[Stddev](#stddev), +[Sum](#sum), +[SwarmAutoscale](#swarmautoscale), +[Top](#top), +[Union](#union), +[Where](#where), +[Window](#window) + +--- + +## Properties + +Property methods modify state on the calling node. +They do not add another node to the pipeline, and always return a reference to the calling node. +Property methods are marked using the `.` operator. + + +### As + +The new name of the resulting duration field. +Default: 'state_duration' + + +```js +stateDuration.as(value string) +``` + + + + +### Quiet + +Suppress all error logging events from this node. + +```js +stateDuration.quiet() +``` + + + + +### Unit + +The time unit of the resulting duration value. +Default: 1s. + + +```js +stateDuration.unit(value time.Duration) +``` + + + + +## Chaining Methods + +Chaining methods create a new node in the pipeline as a child of the calling node. +They do not modify the calling node. +Chaining methods are marked using the `|` operator. + + +### Alert + +Create an alert node, which can trigger alerts. + + +```js +stateDuration|alert() +``` + +Returns: [AlertNode](/kapacitor/v1.5/nodes/alert_node/) + + + +### Barrier + +Create a new Barrier node that emits a BarrierMessage periodically. + +One BarrierMessage will be emitted every period duration. + + +```js +stateDuration|barrier() +``` + +Returns: [BarrierNode](/kapacitor/v1.5/nodes/barrier_node/) + + + +### Bottom + +Select the bottom `num` points for `field` and sort by any extra tags or fields. + + +```js +stateDuration|bottom(num int64, field string, fieldsAndTags ...string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### ChangeDetect + +Create a new node that only emits new points if different from the previous point. + +```js +stateDuration|changeDetect(field string) +``` + +Returns: [ChangeDetectNode](/kapacitor/v1.5/nodes/change_detect_node/) + + + +### Combine + +Combine this node with itself. The data is combined on timestamp. + + +```js +stateDuration|combine(expressions ...ast.LambdaNode) +``` + +Returns: [CombineNode](/kapacitor/v1.5/nodes/combine_node/) + + + +### Count + +Count the number of points. + + +```js +stateDuration|count(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### CumulativeSum + +Compute a cumulative sum of each point that is received. +A point is emitted for every point collected. + + +```js +stateDuration|cumulativeSum(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Deadman + +Helper function for creating an alert on low throughput, a.k.a. deadman's switch. + +- Threshold: trigger alert if throughput drops below threshold in points/interval. +- Interval: how often to check the throughput. +- Expressions: optional list of expressions to also evaluate. Useful for time of day alerting. + +Example: + + +```js + var data = stream + |from()... + // Trigger critical alert if the throughput drops below 100 points per 10s and checked every 10s. + data + |deadman(100.0, 10s) + //Do normal processing of data + data... +``` + +The above is equivalent to this example: + + +```js + var data = stream + |from()... + // Trigger critical alert if the throughput drops below 100 points per 10s and checked every 10s. + data + |stats(10s) + .align() + |derivative('emitted') + .unit(10s) + .nonNegative() + |alert() + .id('node \'stream0\' in task \'{{ .TaskName }}\'') + .message('{{ .ID }} is {{ if eq .Level "OK" }}alive{{ else }}dead{{ end }}: {{ index .Fields "emitted" | printf "%0.3f" }} points/10s.') + .crit(lambda: "emitted" <= 100.0) + //Do normal processing of data + data... +``` + +The `id` and `message` alert properties can be configured globally via the 'deadman' configuration section. + +Since the [AlertNode](/kapacitor/v1.5/nodes/alert_node/) is the last piece it can be further modified as usual. +Example: + + +```js + var data = stream + |from()... + // Trigger critical alert if the throughput drops below 100 points per 10s and checked every 10s. + data + |deadman(100.0, 10s) + .slack() + .channel('#dead_tasks') + //Do normal processing of data + data... +``` + +You can specify additional lambda expressions to further constrain when the deadman's switch is triggered. +Example: + + +```js + var data = stream + |from()... + // Trigger critical alert if the throughput drops below 100 points per 10s and checked every 10s. + // Only trigger the alert if the time of day is between 8am-5pm. + data + |deadman(100.0, 10s, lambda: hour("time") >= 8 AND hour("time") <= 17) + //Do normal processing of data + data... +``` + + + +```js +stateDuration|deadman(threshold float64, interval time.Duration, expr ...ast.LambdaNode) +``` + +Returns: [AlertNode](/kapacitor/v1.5/nodes/alert_node/) + + + +### Default + +Create a node that can set defaults for missing tags or fields. + + +```js +stateDuration|default() +``` + +Returns: [DefaultNode](/kapacitor/v1.5/nodes/default_node/) + + + +### Delete + +Create a node that can delete tags or fields. + + +```js +stateDuration|delete() +``` + +Returns: [DeleteNode](/kapacitor/v1.5/nodes/delete_node/) + + + +### Derivative + +Create a new node that computes the derivative of adjacent points. + + +```js +stateDuration|derivative(field string) +``` + +Returns: [DerivativeNode](/kapacitor/v1.5/nodes/derivative_node/) + + + +### Difference + +Compute the difference between points independent of elapsed time. + + +```js +stateDuration|difference(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Distinct + +Produce batch of only the distinct points. + + +```js +stateDuration|distinct(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Ec2Autoscale + +Create a node that can trigger autoscale events for a ec2 autoscalegroup. + + +```js +stateDuration|ec2Autoscale() +``` + +Returns: [Ec2AutoscaleNode](/kapacitor/v1.5/nodes/ec2_autoscale_node/) + + + +### Elapsed + +Compute the elapsed time between points. + + +```js +stateDuration|elapsed(field string, unit time.Duration) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Eval + +Create an eval node that will evaluate the given transformation function to each data point. +A list of expressions may be provided and will be evaluated in the order they are given. +The results are available to later expressions. + + +```js +stateDuration|eval(expressions ...ast.LambdaNode) +``` + +Returns: [EvalNode](/kapacitor/v1.5/nodes/eval_node/) + + + +### First + +Select the first point. + + +```js +stateDuration|first(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Flatten + +Flatten points with similar times into a single point. + + +```js +stateDuration|flatten() +``` + +Returns: [FlattenNode](/kapacitor/v1.5/nodes/flatten_node/) + + + +### GroupBy + +Group the data by a set of tags. + +Can pass literal * to group by all dimensions. +Example: + + +```js + |groupBy(*) +``` + + + +```js +stateDuration|groupBy(tag ...interface{}) +``` + +Returns: [GroupByNode](/kapacitor/v1.5/nodes/group_by_node/) + + + +### HoltWinters + +Compute the Holt-Winters (https://docs.influxdata.com/influxdb/latest/query_language/functions/#holt-winters) forecast of a data set. + + +```js +stateDuration|holtWinters(field string, h int64, m int64, interval time.Duration) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### HoltWintersWithFit + +Compute the Holt-Winters (https://docs.influxdata.com/influxdb/latest/query_language/functions/#holt-winters) forecast of a data set. +This method also outputs all the points used to fit the data in addition to the forecasted data. + + +```js +stateDuration|holtWintersWithFit(field string, h int64, m int64, interval time.Duration) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### HttpOut + +Create an HTTP output node that caches the most recent data it has received. +The cached data is available at the given endpoint. +The endpoint is the relative path from the API endpoint of the running task. +For example, if the task endpoint is at `/kapacitor/v1/tasks/` and endpoint is +`top10`, then the data can be requested from `/kapacitor/v1/tasks//top10`. + + +```js +stateDuration|httpOut(endpoint string) +``` + +Returns: [HTTPOutNode](/kapacitor/v1.5/nodes/http_out_node/) + + + +### HttpPost + +Creates an HTTP Post node that POSTS received data to the provided HTTP endpoint. +HttpPost expects 0 or 1 arguments. If 0 arguments are provided, you must specify an +endpoint property method. + + +```js +stateDuration|httpPost(url ...string) +``` + +Returns: [HTTPPostNode](/kapacitor/v1.5/nodes/http_post_node/) + + + +### InfluxDBOut + +Create an influxdb output node that will store the incoming data into InfluxDB. + + +```js +stateDuration|influxDBOut() +``` + +Returns: [InfluxDBOutNode](/kapacitor/v1.5/nodes/influx_d_b_out_node/) + + + +### Join + +Join this node with other nodes. The data is joined on timestamp. + + +```js +stateDuration|join(others ...Node) +``` + +Returns: [JoinNode](/kapacitor/v1.5/nodes/join_node/) + + + +### K8sAutoscale + +Create a node that can trigger autoscale events for a kubernetes cluster. + + +```js +stateDuration|k8sAutoscale() +``` + +Returns: [K8sAutoscaleNode](/kapacitor/v1.5/nodes/k8s_autoscale_node/) + + + +### KapacitorLoopback + +Create an kapacitor loopback node that will send data back into Kapacitor as a stream. + + +```js +stateDuration|kapacitorLoopback() +``` + +Returns: [KapacitorLoopbackNode](/kapacitor/v1.5/nodes/kapacitor_loopback_node/) + + + +### Last + +Select the last point. + + +```js +stateDuration|last(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Log + +Create a node that logs all data it receives. + + +```js +stateDuration|log() +``` + +Returns: [LogNode](/kapacitor/v1.5/nodes/log_node/) + + + +### Max + +Select the maximum point. + + +```js +stateDuration|max(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Mean + +Compute the mean of the data. + + +```js +stateDuration|mean(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Median + +Compute the median of the data. + +> **Note:** This method is not a selector. +If you want the median point, use `.percentile(field, 50.0)`. + + +```js +stateDuration|median(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Min + +Select the minimum point. + + +```js +stateDuration|min(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Mode + +Compute the mode of the data. + + +```js +stateDuration|mode(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### MovingAverage + +Compute a moving average of the last window points. +No points are emitted until the window is full. + + +```js +stateDuration|movingAverage(field string, window int64) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Percentile + +Select a point at the given percentile. This is a selector function, no interpolation between points is performed. + + +```js +stateDuration|percentile(field string, percentile float64) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Sample + +Create a new node that samples the incoming points or batches. + +One point will be emitted every count or duration specified. + + +```js +stateDuration|sample(rate interface{}) +``` + +Returns: [SampleNode](/kapacitor/v1.5/nodes/sample_node/) + + + +### Shift + +Create a new node that shifts the incoming points or batches in time. + + +```js +stateDuration|shift(shift time.Duration) +``` + +Returns: [ShiftNode](/kapacitor/v1.5/nodes/shift_node/) + + + +### Sideload + +Create a node that can load data from external sources. + + +```js +stateDuration|sideload() +``` + +Returns: [SideloadNode](/kapacitor/v1.5/nodes/sideload_node/) + + + +### Spread + +Compute the difference between `min` and `max` points. + + +```js +stateDuration|spread(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### StateCount + +Create a node that tracks number of consecutive points in a given state. + + +```js +stateDuration|stateCount(expression ast.LambdaNode) +``` + +Returns: [StateCountNode](/kapacitor/v1.5/nodes/state_count_node/) + + + +### StateDuration + +Create a node that tracks duration in a given state. + + +```js +stateDuration|stateDuration(expression ast.LambdaNode) +``` + +Returns: [StateDurationNode](/kapacitor/v1.5/nodes/state_duration_node/) + + + +### Stats + +Create a new stream of data that contains the internal statistics of the node. +The interval represents how often to emit the statistics based on real time. +This means the interval time is independent of the times of the data points the source node is receiving. + + +```js +stateDuration|stats(interval time.Duration) +``` + +Returns: [StatsNode](/kapacitor/v1.5/nodes/stats_node/) + + + +### Stddev + +Compute the standard deviation. + + +```js +stateDuration|stddev(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Sum + +Compute the sum of all values. + + +```js +stateDuration|sum(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### SwarmAutoscale + +Create a node that can trigger autoscale events for a Docker swarm cluster. + + +```js +stateDuration|swarmAutoscale() +``` + +Returns: [SwarmAutoscaleNode](/kapacitor/v1.5/nodes/swarm_autoscale_node/) + + + +### Top + +Select the top `num` points for `field` and sort by any extra tags or fields. + + +```js +stateDuration|top(num int64, field string, fieldsAndTags ...string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Union + +Perform the union of this node and all other given nodes. + + +```js +stateDuration|union(node ...Node) +``` + +Returns: [UnionNode](/kapacitor/v1.5/nodes/union_node/) + + + +### Where + +Create a new node that filters the data stream by a given expression. + + +```js +stateDuration|where(expression ast.LambdaNode) +``` + +Returns: [WhereNode](/kapacitor/v1.5/nodes/where_node/) + + + +### Window + +Create a new node that windows the stream by time. + +NOTE: Window can only be applied to stream edges. + + +```js +stateDuration|window() +``` + +Returns: [WindowNode](/kapacitor/v1.5/nodes/window_node/) + + diff --git a/content/kapacitor/v1.5/nodes/stats_node.md b/content/kapacitor/v1.5/nodes/stats_node.md new file mode 100644 index 000000000..28ad4571b --- /dev/null +++ b/content/kapacitor/v1.5/nodes/stats_node.md @@ -0,0 +1,909 @@ +--- +title: StatsNode +description: StreamNode emits internal statistics about another node at a given interval. +note: Auto generated by tickdoc + +menu: + kapacitor_1_5_ref: + name: StatsNode + identifier: stats_node + weight: 280 + parent: nodes +--- + +The `stats` node emits internal statistics about the another node at a given interval. + +The interval represents how often to emit the statistics based on real time. +This means the interval time is independent of the times of the data points the other node is receiving. +As a result the [StatsNode](/kapacitor/v1.5/nodes/stats_node/) is a root node in the task pipeline. + + +The currently available internal statistics: + +* emitted: the number of points or batches this node has sent to its children. + +Each stat is available as a field in the data stream. + +The stats are in groups according to the original data. +Meaning that if the source node is grouped by the tag 'host' as an example, +then the counts are output per host with the appropriate 'host' tag. +Since its possible for groups to change when crossing a node only the emitted groups +are considered. + +Example: + + +```js +var data = stream + |from()... +// Emit statistics every 1 minute and cache them via the HTTP API. +data + |stats(1m) + |httpOut('stats') +// Continue normal processing of the data stream +data... +``` + +{{% warn %}} +WARNING: It is not recommended to join the stats stream with the original data stream. +Since they operate on different clocks you could potentially create a deadlock. +This is a limitation of the current implementation and may be removed in the future. +{{% /warn %}} + +### Constructor + +| Chaining Method | Description | +|:---------|:---------| +| **stats ( `interval` `time.Duration`)** | Create a new stream of data that contains the internal statistics of the node. The interval represents how often to emit the statistics based on real time. This means the interval time is independent of the times of the data points the source node is receiving. | + +### Property Methods + +| Setters | Description | +|:---|:---| +| **[align](#align) ( )** | Round times to the StatsNode.Interval value. | +| **[quiet](#quiet) ( )** | Suppress all error logging events from this node. | + + + +### Chaining Methods +[Alert](#alert), +[Barrier](#barrier), +[Bottom](#bottom), +[ChangeDetect](#changedetect), +[Combine](#combine), +[Count](#count), +[CumulativeSum](#cumulativesum), +[Deadman](#deadman), +[Default](#default), +[Delete](#delete), +[Derivative](#derivative), +[Difference](#difference), +[Distinct](#distinct), +[Ec2Autoscale](#ec2autoscale), +[Elapsed](#elapsed), +[Eval](#eval), +[First](#first), +[Flatten](#flatten), +[GroupBy](#groupby), +[HoltWinters](#holtwinters), +[HoltWintersWithFit](#holtwinterswithfit), +[HttpOut](#httpout), +[HttpPost](#httppost), +[InfluxDBOut](#influxdbout), +[Join](#join), +[K8sAutoscale](#k8sautoscale), +[KapacitorLoopback](#kapacitorloopback), +[Last](#last), +[Log](#log), +[Max](#max), +[Mean](#mean), +[Median](#median), +[Min](#min), +[Mode](#mode), +[MovingAverage](#movingaverage), +[Percentile](#percentile), +[Sample](#sample), +[Shift](#shift), +[Sideload](#sideload), +[Spread](#spread), +[StateCount](#statecount), +[StateDuration](#stateduration), +[Stats](#stats), +[Stddev](#stddev), +[Sum](#sum), +[SwarmAutoscale](#swarmautoscale), +[Top](#top), +[Union](#union), +[Where](#where), +[Window](#window) + +--- + +## Properties + +Property methods modify state on the calling node. +They do not add another node to the pipeline, and always return a reference to the calling node. +Property methods are marked using the `.` operator. + + +### Align + +Round times to the StatsNode.Interval value. + + +```js +stats.align() +``` + + + + +### Quiet + +Suppress all error logging events from this node. + +```js +stats.quiet() +``` + + + + +## Chaining Methods + +Chaining methods create a new node in the pipeline as a child of the calling node. +They do not modify the calling node. +Chaining methods are marked using the `|` operator. + + +### Alert + +Create an alert node, which can trigger alerts. + + +```js +stats|alert() +``` + +Returns: [AlertNode](/kapacitor/v1.5/nodes/alert_node/) + + + +### Barrier + +Create a new Barrier node that emits a BarrierMessage periodically. + +One BarrierMessage will be emitted every period duration. + + +```js +stats|barrier() +``` + +Returns: [BarrierNode](/kapacitor/v1.5/nodes/barrier_node/) + + + +### Bottom + +Select the bottom `num` points for `field` and sort by any extra tags or fields. + + +```js +stats|bottom(num int64, field string, fieldsAndTags ...string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### ChangeDetect + +Create a new node that only emits new points if different from the previous point. + +```js +stats|changeDetect(field string) +``` + +Returns: [ChangeDetectNode](/kapacitor/v1.5/nodes/change_detect_node/) + + + +### Combine + +Combine this node with itself. The data is combined on timestamp. + + +```js +stats|combine(expressions ...ast.LambdaNode) +``` + +Returns: [CombineNode](/kapacitor/v1.5/nodes/combine_node/) + + + +### Count + +Count the number of points. + + +```js +stats|count(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### CumulativeSum + +Compute a cumulative sum of each point that is received. +A point is emitted for every point collected. + + +```js +stats|cumulativeSum(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Deadman + +Helper function for creating an alert on low throughput, a.k.a. deadman's switch. + +- Threshold: trigger alert if throughput drops below threshold in points/interval. +- Interval: how often to check the throughput. +- Expressions: optional list of expressions to also evaluate. Useful for time of day alerting. + +Example: + + +```js + var data = stream + |from()... + // Trigger critical alert if the throughput drops below 100 points per 10s and checked every 10s. + data + |deadman(100.0, 10s) + //Do normal processing of data + data... +``` + +The above is equivalent to this example: + + +```js + var data = stream + |from()... + // Trigger critical alert if the throughput drops below 100 points per 10s and checked every 10s. + data + |stats(10s) + .align() + |derivative('emitted') + .unit(10s) + .nonNegative() + |alert() + .id('node \'stream0\' in task \'{{ .TaskName }}\'') + .message('{{ .ID }} is {{ if eq .Level "OK" }}alive{{ else }}dead{{ end }}: {{ index .Fields "emitted" | printf "%0.3f" }} points/10s.') + .crit(lambda: "emitted" <= 100.0) + //Do normal processing of data + data... +``` + +The `id` and `message` alert properties can be configured globally via the 'deadman' configuration section. + +Since the [AlertNode](/kapacitor/v1.5/nodes/alert_node/) is the last piece it can be further modified as usual. +Example: + + +```js + var data = stream + |from()... + // Trigger critical alert if the throughput drops below 100 points per 10s and checked every 10s. + data + |deadman(100.0, 10s) + .slack() + .channel('#dead_tasks') + //Do normal processing of data + data... +``` + +You can specify additional lambda expressions to further constrain when the deadman's switch is triggered. +Example: + + +```js + var data = stream + |from()... + // Trigger critical alert if the throughput drops below 100 points per 10s and checked every 10s. + // Only trigger the alert if the time of day is between 8am-5pm. + data + |deadman(100.0, 10s, lambda: hour("time") >= 8 AND hour("time") <= 17) + //Do normal processing of data + data... +``` + + + +```js +stats|deadman(threshold float64, interval time.Duration, expr ...ast.LambdaNode) +``` + +Returns: [AlertNode](/kapacitor/v1.5/nodes/alert_node/) + + + +### Default + +Create a node that can set defaults for missing tags or fields. + + +```js +stats|default() +``` + +Returns: [DefaultNode](/kapacitor/v1.5/nodes/default_node/) + + + +### Delete + +Create a node that can delete tags or fields. + + +```js +stats|delete() +``` + +Returns: [DeleteNode](/kapacitor/v1.5/nodes/delete_node/) + + + +### Derivative + +Create a new node that computes the derivative of adjacent points. + + +```js +stats|derivative(field string) +``` + +Returns: [DerivativeNode](/kapacitor/v1.5/nodes/derivative_node/) + + + +### Difference + +Compute the difference between points independent of elapsed time. + + +```js +stats|difference(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Distinct + +Produce batch of only the distinct points. + + +```js +stats|distinct(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Ec2Autoscale + +Create a node that can trigger autoscale events for a ec2 autoscalegroup. + + +```js +stats|ec2Autoscale() +``` + +Returns: [Ec2AutoscaleNode](/kapacitor/v1.5/nodes/ec2_autoscale_node/) + + + +### Elapsed + +Compute the elapsed time between points. + + +```js +stats|elapsed(field string, unit time.Duration) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Eval + +Create an eval node that will evaluate the given transformation function to each data point. +A list of expressions may be provided and will be evaluated in the order they are given. +The results are available to later expressions. + + +```js +stats|eval(expressions ...ast.LambdaNode) +``` + +Returns: [EvalNode](/kapacitor/v1.5/nodes/eval_node/) + + + +### First + +Select the first point. + + +```js +stats|first(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Flatten + +Flatten points with similar times into a single point. + + +```js +stats|flatten() +``` + +Returns: [FlattenNode](/kapacitor/v1.5/nodes/flatten_node/) + + + +### GroupBy + +Group the data by a set of tags. + +Can pass literal * to group by all dimensions. +Example: + + +```js + |groupBy(*) +``` + + + +```js +stats|groupBy(tag ...interface{}) +``` + +Returns: [GroupByNode](/kapacitor/v1.5/nodes/group_by_node/) + + + +### HoltWinters + +Compute the Holt-Winters (https://docs.influxdata.com/influxdb/latest/query_language/functions/#holt-winters) forecast of a data set. + + +```js +stats|holtWinters(field string, h int64, m int64, interval time.Duration) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### HoltWintersWithFit + +Compute the Holt-Winters (https://docs.influxdata.com/influxdb/latest/query_language/functions/#holt-winters) forecast of a data set. +This method also outputs all the points used to fit the data in addition to the forecasted data. + + +```js +stats|holtWintersWithFit(field string, h int64, m int64, interval time.Duration) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### HttpOut + +Create an HTTP output node that caches the most recent data it has received. +The cached data is available at the given endpoint. +The endpoint is the relative path from the API endpoint of the running task. +For example, if the task endpoint is at `/kapacitor/v1/tasks/` and endpoint is +`top10`, then the data can be requested from `/kapacitor/v1/tasks//top10`. + + +```js +stats|httpOut(endpoint string) +``` + +Returns: [HTTPOutNode](/kapacitor/v1.5/nodes/http_out_node/) + + + +### HttpPost + +Creates an HTTP Post node that POSTS received data to the provided HTTP endpoint. +HttpPost expects 0 or 1 arguments. If 0 arguments are provided, you must specify an +endpoint property method. + + +```js +stats|httpPost(url ...string) +``` + +Returns: [HTTPPostNode](/kapacitor/v1.5/nodes/http_post_node/) + + + +### InfluxDBOut + +Create an influxdb output node that will store the incoming data into InfluxDB. + + +```js +stats|influxDBOut() +``` + +Returns: [InfluxDBOutNode](/kapacitor/v1.5/nodes/influx_d_b_out_node/) + + + +### Join + +Join this node with other nodes. The data is joined on timestamp. + + +```js +stats|join(others ...Node) +``` + +Returns: [JoinNode](/kapacitor/v1.5/nodes/join_node/) + + + +### K8sAutoscale + +Create a node that can trigger autoscale events for a kubernetes cluster. + + +```js +stats|k8sAutoscale() +``` + +Returns: [K8sAutoscaleNode](/kapacitor/v1.5/nodes/k8s_autoscale_node/) + + + +### KapacitorLoopback + +Create an kapacitor loopback node that will send data back into Kapacitor as a stream. + + +```js +stats|kapacitorLoopback() +``` + +Returns: [KapacitorLoopbackNode](/kapacitor/v1.5/nodes/kapacitor_loopback_node/) + + + +### Last + +Select the last point. + + +```js +stats|last(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Log + +Create a node that logs all data it receives. + + +```js +stats|log() +``` + +Returns: [LogNode](/kapacitor/v1.5/nodes/log_node/) + + + +### Max + +Select the maximum point. + + +```js +stats|max(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Mean + +Compute the mean of the data. + + +```js +stats|mean(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Median + +Compute the median of the data. + +> **Note:** This method is not a selector. +If you want the median point, use `.percentile(field, 50.0)`. + + +```js +stats|median(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Min + +Select the minimum point. + + +```js +stats|min(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Mode + +Compute the mode of the data. + + +```js +stats|mode(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### MovingAverage + +Compute a moving average of the last window points. +No points are emitted until the window is full. + + +```js +stats|movingAverage(field string, window int64) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Percentile + +Select a point at the given percentile. This is a selector function, no interpolation between points is performed. + + +```js +stats|percentile(field string, percentile float64) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Sample + +Create a new node that samples the incoming points or batches. + +One point will be emitted every count or duration specified. + + +```js +stats|sample(rate interface{}) +``` + +Returns: [SampleNode](/kapacitor/v1.5/nodes/sample_node/) + + + +### Shift + +Create a new node that shifts the incoming points or batches in time. + + +```js +stats|shift(shift time.Duration) +``` + +Returns: [ShiftNode](/kapacitor/v1.5/nodes/shift_node/) + + + +### Sideload + +Create a node that can load data from external sources. + + +```js +stats|sideload() +``` + +Returns: [SideloadNode](/kapacitor/v1.5/nodes/sideload_node/) + + + +### Spread + +Compute the difference between `min` and `max` points. + + +```js +stats|spread(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### StateCount + +Create a node that tracks number of consecutive points in a given state. + + +```js +stats|stateCount(expression ast.LambdaNode) +``` + +Returns: [StateCountNode](/kapacitor/v1.5/nodes/state_count_node/) + + + +### StateDuration + +Create a node that tracks duration in a given state. + + +```js +stats|stateDuration(expression ast.LambdaNode) +``` + +Returns: [StateDurationNode](/kapacitor/v1.5/nodes/state_duration_node/) + + + +### Stats + +Create a new stream of data that contains the internal statistics of the node. +The interval represents how often to emit the statistics based on real time. +This means the interval time is independent of the times of the data points the source node is receiving. + + +```js +stats|stats(interval time.Duration) +``` + +Returns: [StatsNode](/kapacitor/v1.5/nodes/stats_node/) + + + +### Stddev + +Compute the standard deviation. + + +```js +stats|stddev(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Sum + +Compute the sum of all values. + + +```js +stats|sum(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### SwarmAutoscale + +Create a node that can trigger autoscale events for a Docker swarm cluster. + + +```js +stats|swarmAutoscale() +``` + +Returns: [SwarmAutoscaleNode](/kapacitor/v1.5/nodes/swarm_autoscale_node/) + + + +### Top + +Select the top `num` points for `field` and sort by any extra tags or fields. + + +```js +stats|top(num int64, field string, fieldsAndTags ...string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Union + +Perform the union of this node and all other given nodes. + + +```js +stats|union(node ...Node) +``` + +Returns: [UnionNode](/kapacitor/v1.5/nodes/union_node/) + + + +### Where + +Create a new node that filters the data stream by a given expression. + + +```js +stats|where(expression ast.LambdaNode) +``` + +Returns: [WhereNode](/kapacitor/v1.5/nodes/where_node/) + + + +### Window + +Create a new node that windows the stream by time. + +NOTE: Window can only be applied to stream edges. + + +```js +stats|window() +``` + +Returns: [WindowNode](/kapacitor/v1.5/nodes/window_node/) + + diff --git a/content/kapacitor/v1.5/nodes/stream_node.md b/content/kapacitor/v1.5/nodes/stream_node.md new file mode 100644 index 000000000..4c835a567 --- /dev/null +++ b/content/kapacitor/v1.5/nodes/stream_node.md @@ -0,0 +1,202 @@ +--- +title: StreamNode +description: StreamNode represents the source of data being streamed to Kapacitor through any of its inputs. +note: Auto generated by tickdoc + +menu: + kapacitor_1_5_ref: + name: StreamNode + identifier: stream_node + weight: 5 + parent: nodes +--- + +The `stream` node represents the source of data being +streamed to Kapacitor via any of its inputs. +The `stream` variable in stream tasks is an instance of +a [StreamNode.](/kapacitor/v1.5/nodes/stream_node/) +[StreamNode.From](/kapacitor/v1.5/nodes/stream_node/#from) is the method/property of this node. + + +### Constructor + +| Chaining Method | Description | +|:---------|:---------| +| **stream** | Has no constructor signature. | + +### Property Methods + +| Setters | Description | +|:---|:---| +| **[quiet](#quiet) ( )** | Suppress all error logging events from this node. | + + +### Chaining Methods +[Deadman](#deadman), +[From](#from), +[Stats](#stats) + +--- + +## Properties + +Property methods modify state on the calling node. +They do not add another node to the pipeline, and always return a reference to the calling node. +Property methods are marked using the `.` operator. + +### Quiet + +Suppress all error logging events from this node. + +```js +stream.quiet() +``` + + + + +## Chaining Methods + +Chaining methods create a new node in the pipeline as a child of the calling node. +They do not modify the calling node. +Chaining methods are marked using the `|` operator. + + +### Deadman + +Helper function for creating an alert on low throughput, a.k.a. deadman's switch. + +- Threshold: trigger alert if throughput drops below threshold in points/interval. +- Interval: how often to check the throughput. +- Expressions: optional list of expressions to also evaluate. Useful for time of day alerting. + +Example: + + +```js + var data = stream + |from()... + // Trigger critical alert if the throughput drops below 100 points per 10s and checked every 10s. + data + |deadman(100.0, 10s) + //Do normal processing of data + data... +``` + +The above is equivalent to this example: + + +```js + var data = stream + |from()... + // Trigger critical alert if the throughput drops below 100 points per 10s and checked every 10s. + data + |stats(10s) + .align() + |derivative('emitted') + .unit(10s) + .nonNegative() + |alert() + .id('node \'stream0\' in task \'{{ .TaskName }}\'') + .message('{{ .ID }} is {{ if eq .Level "OK" }}alive{{ else }}dead{{ end }}: {{ index .Fields "emitted" | printf "%0.3f" }} points/10s.') + .crit(lambda: "emitted" <= 100.0) + //Do normal processing of data + data... +``` + +The `id` and `message` alert properties can be configured globally via the 'deadman' configuration section. + +Since the [AlertNode](/kapacitor/v1.5/nodes/alert_node/) is the last piece it can be further modified as usual. +Example: + + +```js + var data = stream + |from()... + // Trigger critical alert if the throughput drops below 100 points per 10s and checked every 10s. + data + |deadman(100.0, 10s) + .slack() + .channel('#dead_tasks') + //Do normal processing of data + data... +``` + +You can specify additional lambda expressions to further constrain when the deadman's switch is triggered. +Example: + + +```js + var data = stream + |from()... + // Trigger critical alert if the throughput drops below 100 points per 10s and checked every 10s. + // Only trigger the alert if the time of day is between 8am-5pm. + data + |deadman(100.0, 10s, lambda: hour("time") >= 8 AND hour("time") <= 17) + //Do normal processing of data + data... +``` + + + +```js +stream|deadman(threshold float64, interval time.Duration, expr ...ast.LambdaNode) +``` + +Returns: [AlertNode](/kapacitor/v1.5/nodes/alert_node/) + + + +### From + +Creates a new [FromNode](/kapacitor/v1.5/nodes/from_node/) that can be further +filtered using the Database, RetentionPolicy, Measurement and Where properties. +From can be called multiple times to create multiple +independent forks of the data stream. + +Example: + + +```js + // Select the 'cpu' measurement from just the database 'mydb' + // and retention policy 'myrp'. + var cpu = stream + |from() + .database('mydb') + .retentionPolicy('myrp') + .measurement('cpu') + // Select the 'load' measurement from any database and retention policy. + var load = stream + |from() + .measurement('load') + // Join cpu and load streams and do further processing. + cpu + |join(load) + .as('cpu', 'load') + ... +``` + + + +```js +stream|from() +``` + +Returns: [FromNode](/kapacitor/v1.5/nodes/from_node/) + + + +### Stats + +Create a new stream of data that contains the internal statistics of the node. +The interval represents how often to emit the statistics based on real time. +This means the interval time is independent of the times of the data points the source node is receiving. + + +```js +stream|stats(interval time.Duration) +``` + +Returns: [StatsNode](/kapacitor/v1.5/nodes/stats_node/) + + diff --git a/content/kapacitor/v1.5/nodes/swarm_autoscale_node.md b/content/kapacitor/v1.5/nodes/swarm_autoscale_node.md new file mode 100644 index 000000000..7ab6b632f --- /dev/null +++ b/content/kapacitor/v1.5/nodes/swarm_autoscale_node.md @@ -0,0 +1,1039 @@ +--- +title: SwarmAutoscaleNode +description: SwarmAutoscaleNode triggers autoscale events for a service on a Docker Swarm mode cluster and outputs points for the triggered events. +note: Auto generated by tickdoc + +menu: + kapacitor_1_5_ref: + name: SwarmAutoscaleNode + identifier: swarm_autoscale_node + weight: 300 + parent: nodes +--- + +[SwarmAutoscaleNode](/kapacitor/v1.5/nodes/swarm_autoscale_node/) triggers autoscale events for a service on a Docker Swarm mode cluster. +The node also outputs points for the triggered events. + +Example: + + +```js +// Target 80% cpu per container +var target = 80.0 +var min = 1 +var max = 10 +var period = 5m +var every = period +stream + |from() + .measurement('docker_container_cpu') + .groupBy('container_name','com.docker.swarm.service.name') + .where(lambda: "cpu" == 'cpu-total') + |window() + .period(period) + .every(every) + |mean('usage_percent') + .as('mean_cpu') + |groupBy('com.docker.swarm.service.name') + |sum('mean_cpu') + .as('total_cpu') + |swarmAutoscale() + // Get the name of the service from "com.docker.swarm.service.name" tag. + .serviceNameTag('com.docker.swarm.service.name') + .min(min) + .max(max) + // Set the desired number of replicas based on target. + .replicas(lambda: int(ceil("total_cpu" / target))) + |influxDBOut() + .database('deployments') + .measurement('scale_events') + .precision('s') +``` + + +The above example computes the mean of cpu usage_percent by container name and service name. +Then sum of mean cpu_usage is calculated as total_cpu. +Using the total_cpu over the last time period a desired number of replicas is computed +based on the target percentage usage of cpu. + +If the desired number of replicas has changed, Kapacitor makes the appropriate API call to Docker Swarm +to update the replicas spec. + +Any time the SwarmAutoscale node changes a replica count, it emits a point. +The point is tagged with the service name, +using the serviceName respectively +In addition the group by tags will be preserved on the emitted point. +The point contains two fields: `old`, and `new` representing change in the replicas. + +Available Statistics: + +* increase_events: number of times the replica count was increased. +* decrease_events: number of times the replica count was decreased. +* cooldown_drops: number of times an event was dropped because of a cooldown timer. +* errors: number of errors encountered, typically related to communicating with the Swarm manager API. + + +### Constructor + +| Chaining Method | Description | +|:---------|:---------| +| **swarmAutoscale ( )** | Create a node that can trigger autoscale events for a Docker swarm cluster. | + +### Property Methods + +| Setters | Description | +|:---|:---| +| **[cluster](#cluster) ( `value` `string`)** | Cluster is the ID Docker swarm cluster to use. The ID of the cluster is specified in the kapacitor configuration. | +| **[currentField](#currentfield) ( `value` `string`)** | CurrentField is the name of a field into which the current replica count will be set as an int. If empty no field will be set. Useful for computing deltas on the current state. | +| **[decreaseCooldown](#decreasecooldown) ( `value` `time.Duration`)** | Only one decrease event can be triggered per resource every DecreaseCooldown interval. | +| **[increaseCooldown](#increasecooldown) ( `value` `time.Duration`)** | Only one increase event can be triggered per resource every IncreaseCooldown interval. | +| **[max](#max) ( `value` `int64`)** | The maximum scale factor to set. If 0 then there is no upper limit. Default: 0, a.k.a no limit. | +| **[min](#min) ( `value` `int64`)** | The minimum scale factor to set. Default: 1 | +| **[outputServiceNameTag](#outputservicenametag) ( `value` `string`)** | OutputServiceName is the name of a tag into which the service name will be written for output autoscale events. Defaults to the value of ServiceNameTag if its not empty. | +| **[quiet](#quiet) ( )** | Suppress all error logging events from this node. | +| **[replicas](#replicas) ( `value` `ast.LambdaNode`)** | Replicas is a lambda expression that should evaluate to the desired number of replicas for the resource. | +| **[serviceName](#servicename) ( `value` `string`)** | ServiceName is the name of the Docker swarm service to autoscale. | +| **[serviceNameTag](#servicenametag) ( `value` `string`)** | ServiceName is the name of a tag which contains the name of the Docker swarm service to autoscale. | + + + +### Chaining Methods +[Alert](#alert), +[Barrier](#barrier), +[Bottom](#bottom), +[ChangeDetect](#changedetect), +[Combine](#combine), +[Count](#count), +[CumulativeSum](#cumulativesum), +[Deadman](#deadman), +[Default](#default), +[Delete](#delete), +[Derivative](#derivative), +[Difference](#difference), +[Distinct](#distinct), +[Ec2Autoscale](#ec2autoscale), +[Elapsed](#elapsed), +[Eval](#eval), +[First](#first), +[Flatten](#flatten), +[GroupBy](#groupby), +[HoltWinters](#holtwinters), +[HoltWintersWithFit](#holtwinterswithfit), +[HttpOut](#httpout), +[HttpPost](#httppost), +[InfluxDBOut](#influxdbout), +[Join](#join), +[K8sAutoscale](#k8sautoscale), +[KapacitorLoopback](#kapacitorloopback), +[Last](#last), +[Log](#log), +[Mean](#mean), +[Median](#median), +[Mode](#mode), +[MovingAverage](#movingaverage), +[Percentile](#percentile), +[Sample](#sample), +[Shift](#shift), +[Sideload](#sideload), +[Spread](#spread), +[StateCount](#statecount), +[StateDuration](#stateduration), +[Stats](#stats), +[Stddev](#stddev), +[Sum](#sum), +[SwarmAutoscale](#swarmautoscale), +[Top](#top), +[Union](#union), +[Where](#where), +[Window](#window) + +--- + +## Properties + +Property methods modify state on the calling node. +They do not add another node to the pipeline, and always return a reference to the calling node. +Property methods are marked using the `.` operator. + + +### Cluster + +Cluster is the ID Docker swarm cluster to use. +The ID of the cluster is specified in the kapacitor configuration. + + +```js +swarmAutoscale.cluster(value string) +``` + + + + +### CurrentField + +CurrentField is the name of a field into which the current replica count will be set as an int. +If empty no field will be set. +Useful for computing deltas on the current state. + +Example: + + +```js + |swarmAutoscale() + .currentField('replicas') + // Increase the replicas by 1 if the qps is over the threshold + .replicas(lambda: if("qps" > threshold, "replicas" + 1, "replicas")) +``` + + + +```js +swarmAutoscale.currentField(value string) +``` + + + + +### DecreaseCooldown + +Only one decrease event can be triggered per resource every DecreaseCooldown interval. + + +```js +swarmAutoscale.decreaseCooldown(value time.Duration) +``` + + + + +### IncreaseCooldown + +Only one increase event can be triggered per resource every IncreaseCooldown interval. + + +```js +swarmAutoscale.increaseCooldown(value time.Duration) +``` + + + + +### Max + +The maximum scale factor to set. +If 0 then there is no upper limit. +Default: 0, a.k.a no limit. + + +```js +swarmAutoscale.max(value int64) +``` + + + + +### Min + +The minimum scale factor to set. +Default: 1 + + +```js +swarmAutoscale.min(value int64) +``` + + + + +### OutputServiceNameTag + +OutputServiceName is the name of a tag into which the service name will be written for output autoscale events. +Defaults to the value of ServiceNameTag if its not empty. + + +```js +swarmAutoscale.outputServiceNameTag(value string) +``` + + + + +### Quiet + +Suppress all error logging events from this node. + +```js +swarmAutoscale.quiet() +``` + + + + +### Replicas + +Replicas is a lambda expression that should evaluate to the desired number of replicas for the resource. + + +```js +swarmAutoscale.replicas(value ast.LambdaNode) +``` + + + + +### ServiceName + +ServiceName is the name of the Docker swarm service to autoscale. + + +```js +swarmAutoscale.serviceName(value string) +``` + + + + +### ServiceNameTag + +ServiceName is the name of a tag which contains the name of the Docker swarm service to autoscale. + + +```js +swarmAutoscale.serviceNameTag(value string) +``` + + + + +## Chaining Methods + +Chaining methods create a new node in the pipeline as a child of the calling node. +They do not modify the calling node. +Chaining methods are marked using the `|` operator. + + +### Alert + +Create an alert node, which can trigger alerts. + + +```js +swarmAutoscale|alert() +``` + +Returns: [AlertNode](/kapacitor/v1.5/nodes/alert_node/) + + + +### Barrier + +Create a new Barrier node that emits a BarrierMessage periodically. + +One BarrierMessage will be emitted every period duration. + + +```js +swarmAutoscale|barrier() +``` + +Returns: [BarrierNode](/kapacitor/v1.5/nodes/barrier_node/) + + + +### Bottom + +Select the bottom `num` points for `field` and sort by any extra tags or fields. + + +```js +swarmAutoscale|bottom(num int64, field string, fieldsAndTags ...string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### ChangeDetect + +Create a new node that only emits new points if different from the previous point. + +```js +swarmAutoscale|changeDetect(field string) +``` + +Returns: [ChangeDetectNode](/kapacitor/v1.5/nodes/change_detect_node/) + + + +### Combine + +Combine this node with itself. The data is combined on timestamp. + + +```js +swarmAutoscale|combine(expressions ...ast.LambdaNode) +``` + +Returns: [CombineNode](/kapacitor/v1.5/nodes/combine_node/) + + + +### Count + +Count the number of points. + + +```js +swarmAutoscale|count(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### CumulativeSum + +Compute a cumulative sum of each point that is received. +A point is emitted for every point collected. + + +```js +swarmAutoscale|cumulativeSum(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Deadman + +Helper function for creating an alert on low throughput, a.k.a. deadman's switch. + +- Threshold: trigger alert if throughput drops below threshold in points/interval. +- Interval: how often to check the throughput. +- Expressions: optional list of expressions to also evaluate. Useful for time of day alerting. + +Example: + + +```js + var data = stream + |from()... + // Trigger critical alert if the throughput drops below 100 points per 10s and checked every 10s. + data + |deadman(100.0, 10s) + //Do normal processing of data + data... +``` + +The above is equivalent to this example: + + +```js + var data = stream + |from()... + // Trigger critical alert if the throughput drops below 100 points per 10s and checked every 10s. + data + |stats(10s) + .align() + |derivative('emitted') + .unit(10s) + .nonNegative() + |alert() + .id('node \'stream0\' in task \'{{ .TaskName }}\'') + .message('{{ .ID }} is {{ if eq .Level "OK" }}alive{{ else }}dead{{ end }}: {{ index .Fields "emitted" | printf "%0.3f" }} points/10s.') + .crit(lambda: "emitted" <= 100.0) + //Do normal processing of data + data... +``` + +The `id` and `message` alert properties can be configured globally via the 'deadman' configuration section. + +Since the [AlertNode](/kapacitor/v1.5/nodes/alert_node/) is the last piece it can be further modified as usual. +Example: + + +```js + var data = stream + |from()... + // Trigger critical alert if the throughput drops below 100 points per 10s and checked every 10s. + data + |deadman(100.0, 10s) + .slack() + .channel('#dead_tasks') + //Do normal processing of data + data... +``` + +You can specify additional lambda expressions to further constrain when the deadman's switch is triggered. +Example: + + +```js + var data = stream + |from()... + // Trigger critical alert if the throughput drops below 100 points per 10s and checked every 10s. + // Only trigger the alert if the time of day is between 8am-5pm. + data + |deadman(100.0, 10s, lambda: hour("time") >= 8 AND hour("time") <= 17) + //Do normal processing of data + data... +``` + + + +```js +swarmAutoscale|deadman(threshold float64, interval time.Duration, expr ...ast.LambdaNode) +``` + +Returns: [AlertNode](/kapacitor/v1.5/nodes/alert_node/) + + + +### Default + +Create a node that can set defaults for missing tags or fields. + + +```js +swarmAutoscale|default() +``` + +Returns: [DefaultNode](/kapacitor/v1.5/nodes/default_node/) + + + +### Delete + +Create a node that can delete tags or fields. + + +```js +swarmAutoscale|delete() +``` + +Returns: [DeleteNode](/kapacitor/v1.5/nodes/delete_node/) + + + +### Derivative + +Create a new node that computes the derivative of adjacent points. + + +```js +swarmAutoscale|derivative(field string) +``` + +Returns: [DerivativeNode](/kapacitor/v1.5/nodes/derivative_node/) + + + +### Difference + +Compute the difference between points independent of elapsed time. + + +```js +swarmAutoscale|difference(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Distinct + +Produce batch of only the distinct points. + + +```js +swarmAutoscale|distinct(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Ec2Autoscale + +Create a node that can trigger autoscale events for a ec2 autoscalegroup. + + +```js +swarmAutoscale|ec2Autoscale() +``` + +Returns: [Ec2AutoscaleNode](/kapacitor/v1.5/nodes/ec2_autoscale_node/) + + + +### Elapsed + +Compute the elapsed time between points. + + +```js +swarmAutoscale|elapsed(field string, unit time.Duration) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Eval + +Create an eval node that will evaluate the given transformation function to each data point. +A list of expressions may be provided and will be evaluated in the order they are given. +The results are available to later expressions. + + +```js +swarmAutoscale|eval(expressions ...ast.LambdaNode) +``` + +Returns: [EvalNode](/kapacitor/v1.5/nodes/eval_node/) + + + +### First + +Select the first point. + + +```js +swarmAutoscale|first(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Flatten + +Flatten points with similar times into a single point. + + +```js +swarmAutoscale|flatten() +``` + +Returns: [FlattenNode](/kapacitor/v1.5/nodes/flatten_node/) + + + +### GroupBy + +Group the data by a set of tags. + +Can pass literal * to group by all dimensions. +Example: + + +```js + |groupBy(*) +``` + + + +```js +swarmAutoscale|groupBy(tag ...interface{}) +``` + +Returns: [GroupByNode](/kapacitor/v1.5/nodes/group_by_node/) + + + +### HoltWinters + +Compute the Holt-Winters (https://docs.influxdata.com/influxdb/latest/query_language/functions/#holt-winters) forecast of a data set. + + +```js +swarmAutoscale|holtWinters(field string, h int64, m int64, interval time.Duration) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### HoltWintersWithFit + +Compute the Holt-Winters (https://docs.influxdata.com/influxdb/latest/query_language/functions/#holt-winters) forecast of a data set. +This method also outputs all the points used to fit the data in addition to the forecasted data. + + +```js +swarmAutoscale|holtWintersWithFit(field string, h int64, m int64, interval time.Duration) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### HttpOut + +Create an HTTP output node that caches the most recent data it has received. +The cached data is available at the given endpoint. +The endpoint is the relative path from the API endpoint of the running task. +For example, if the task endpoint is at `/kapacitor/v1/tasks/` and endpoint is +`top10`, then the data can be requested from `/kapacitor/v1/tasks//top10`. + + +```js +swarmAutoscale|httpOut(endpoint string) +``` + +Returns: [HTTPOutNode](/kapacitor/v1.5/nodes/http_out_node/) + + + +### HttpPost + +Creates an HTTP Post node that POSTS received data to the provided HTTP endpoint. +HttpPost expects 0 or 1 arguments. If 0 arguments are provided, you must specify an +endpoint property method. + + +```js +swarmAutoscale|httpPost(url ...string) +``` + +Returns: [HTTPPostNode](/kapacitor/v1.5/nodes/http_post_node/) + + + +### InfluxDBOut + +Create an influxdb output node that will store the incoming data into InfluxDB. + + +```js +swarmAutoscale|influxDBOut() +``` + +Returns: [InfluxDBOutNode](/kapacitor/v1.5/nodes/influx_d_b_out_node/) + + + +### Join + +Join this node with other nodes. The data is joined on timestamp. + + +```js +swarmAutoscale|join(others ...Node) +``` + +Returns: [JoinNode](/kapacitor/v1.5/nodes/join_node/) + + + +### K8sAutoscale + +Create a node that can trigger autoscale events for a kubernetes cluster. + + +```js +swarmAutoscale|k8sAutoscale() +``` + +Returns: [K8sAutoscaleNode](/kapacitor/v1.5/nodes/k8s_autoscale_node/) + + + +### KapacitorLoopback + +Create an kapacitor loopback node that will send data back into Kapacitor as a stream. + + +```js +swarmAutoscale|kapacitorLoopback() +``` + +Returns: [KapacitorLoopbackNode](/kapacitor/v1.5/nodes/kapacitor_loopback_node/) + + + +### Last + +Select the last point. + + +```js +swarmAutoscale|last(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Log + +Create a node that logs all data it receives. + + +```js +swarmAutoscale|log() +``` + +Returns: [LogNode](/kapacitor/v1.5/nodes/log_node/) + + + +### Mean + +Compute the mean of the data. + + +```js +swarmAutoscale|mean(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Median + +Compute the median of the data. + +> **Note:** This method is not a selector. +If you want the median point, use `.percentile(field, 50.0)`. + + +```js +swarmAutoscale|median(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Mode + +Compute the mode of the data. + + +```js +swarmAutoscale|mode(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### MovingAverage + +Compute a moving average of the last window points. +No points are emitted until the window is full. + + +```js +swarmAutoscale|movingAverage(field string, window int64) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Percentile + +Select a point at the given percentile. This is a selector function, no interpolation between points is performed. + + +```js +swarmAutoscale|percentile(field string, percentile float64) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Sample + +Create a new node that samples the incoming points or batches. + +One point will be emitted every count or duration specified. + + +```js +swarmAutoscale|sample(rate interface{}) +``` + +Returns: [SampleNode](/kapacitor/v1.5/nodes/sample_node/) + + + +### Shift + +Create a new node that shifts the incoming points or batches in time. + + +```js +swarmAutoscale|shift(shift time.Duration) +``` + +Returns: [ShiftNode](/kapacitor/v1.5/nodes/shift_node/) + + + +### Sideload + +Create a node that can load data from external sources. + + +```js +swarmAutoscale|sideload() +``` + +Returns: [SideloadNode](/kapacitor/v1.5/nodes/sideload_node/) + + + +### Spread + +Compute the difference between `min` and `max` points. + + +```js +swarmAutoscale|spread(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### StateCount + +Create a node that tracks number of consecutive points in a given state. + + +```js +swarmAutoscale|stateCount(expression ast.LambdaNode) +``` + +Returns: [StateCountNode](/kapacitor/v1.5/nodes/state_count_node/) + + + +### StateDuration + +Create a node that tracks duration in a given state. + + +```js +swarmAutoscale|stateDuration(expression ast.LambdaNode) +``` + +Returns: [StateDurationNode](/kapacitor/v1.5/nodes/state_duration_node/) + + + +### Stats + +Create a new stream of data that contains the internal statistics of the node. +The interval represents how often to emit the statistics based on real time. +This means the interval time is independent of the times of the data points the source node is receiving. + + +```js +swarmAutoscale|stats(interval time.Duration) +``` + +Returns: [StatsNode](/kapacitor/v1.5/nodes/stats_node/) + + + +### Stddev + +Compute the standard deviation. + + +```js +swarmAutoscale|stddev(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Sum + +Compute the sum of all values. + + +```js +swarmAutoscale|sum(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### SwarmAutoscale + +Create a node that can trigger autoscale events for a Docker swarm cluster. + + +```js +swarmAutoscale|swarmAutoscale() +``` + +Returns: [SwarmAutoscaleNode](/kapacitor/v1.5/nodes/swarm_autoscale_node/) + + + +### Top + +Select the top `num` points for `field` and sort by any extra tags or fields. + + +```js +swarmAutoscale|top(num int64, field string, fieldsAndTags ...string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Union + +Perform the union of this node and all other given nodes. + + +```js +swarmAutoscale|union(node ...Node) +``` + +Returns: [UnionNode](/kapacitor/v1.5/nodes/union_node/) + + + +### Where + +Create a new node that filters the data stream by a given expression. + + +```js +swarmAutoscale|where(expression ast.LambdaNode) +``` + +Returns: [WhereNode](/kapacitor/v1.5/nodes/where_node/) + + + +### Window + +Create a new node that windows the stream by time. + +NOTE: Window can only be applied to stream edges. + + +```js +swarmAutoscale|window() +``` + +Returns: [WindowNode](/kapacitor/v1.5/nodes/window_node/) + + diff --git a/content/kapacitor/v1.5/nodes/u_d_f_node.md b/content/kapacitor/v1.5/nodes/u_d_f_node.md new file mode 100644 index 000000000..3b61017fc --- /dev/null +++ b/content/kapacitor/v1.5/nodes/u_d_f_node.md @@ -0,0 +1,913 @@ +--- +title: UDFNode +description: UDFNode runs a user defined function (UDF) in a separate process. +note: Auto generated by tickdoc + +menu: + kapacitor_1_5_ref: + name: UDFNode + identifier: u_d_f_node + weight: 310 + parent: nodes +--- + +The `udf` node can run a User Defined Function (UDF) in a separate process. + +A UDF is a custom script or binary that can communicate via Kapacitor's UDF RPC protocol. +The path and arguments to the UDF program are specified in Kapacitor's configuration. +Using TICKscripts you can invoke and configure your UDF for each task. + +See the [README.md](https://github.com/influxdata/kapacitor/tree/master/udf/agent/) +for details on how to write your own UDF. + +UDFs are configured via Kapacitor's main configuration file. + +Example: + + +```js +[udf] +[udf.functions] + # Example moving average UDF. + [udf.functions.movingAverage] + prog = "/path/to/executable/moving_avg" + args = [] + timeout = "10s" +``` + +UDFs are first class objects in TICKscripts and are referenced via their configuration name. + +Example: + + +```js +// Given you have a UDF that computes a moving average +// The UDF can define what its options are and then can be +// invoked via a TICKscript like so: +stream + |from()... + @movingAverage() + .field('value') + .size(100) + .as('mavg') + |httpOut('movingaverage') +``` + +> **NOTE:** The UDF process runs as the same user as the Kapacitor daemon. +As a result, make sure the user is properly secured, as well as the configuration file. + + +### Constructor + +| Chaining Method | Description | +|:---------|:---------| +| **uDF** | Has no constructor signature. | + +### Property Methods + +| Setters | Description | +|:---|:---| +| **[quiet](#quiet) ( )** | Suppress all error logging events from this node. | +| **[uDFName](#udfname) ( `value` `string`)** | | + + + +### Chaining Methods +[Alert](#alert), +[Barrier](#barrier), +[Bottom](#bottom), +[ChangeDetect](#changedetect), +[Combine](#combine), +[Count](#count), +[CumulativeSum](#cumulativesum), +[Deadman](#deadman), +[Default](#default), +[Delete](#delete), +[Derivative](#derivative), +[Difference](#difference), +[Distinct](#distinct), +[Ec2Autoscale](#ec2autoscale), +[Elapsed](#elapsed), +[Eval](#eval), +[First](#first), +[Flatten](#flatten), +[GroupBy](#groupby), +[HoltWinters](#holtwinters), +[HoltWintersWithFit](#holtwinterswithfit), +[HttpOut](#httpout), +[HttpPost](#httppost), +[InfluxDBOut](#influxdbout), +[Join](#join), +[K8sAutoscale](#k8sautoscale), +[KapacitorLoopback](#kapacitorloopback), +[Last](#last), +[Log](#log), +[Max](#max), +[Mean](#mean), +[Median](#median), +[Min](#min), +[Mode](#mode), +[MovingAverage](#movingaverage), +[Percentile](#percentile), +[Sample](#sample), +[Shift](#shift), +[Sideload](#sideload), +[Spread](#spread), +[StateCount](#statecount), +[StateDuration](#stateduration), +[Stats](#stats), +[Stddev](#stddev), +[Sum](#sum), +[SwarmAutoscale](#swarmautoscale), +[Top](#top), +[Union](#union), +[Where](#where), +[Window](#window) + +--- + +## Properties + +Property methods modify state on the calling node. +They do not add another node to the pipeline, and always return a reference to the calling node. +Property methods are marked using the `.` operator. + + +### Quiet + +Suppress all error logging events from this node. + +```js +uDF.quiet() +``` + + + + +### UDFName + +```js +uDF.uDFName(value string) +``` + + + + +## Chaining Methods + +Chaining methods create a new node in the pipeline as a child of the calling node. +They do not modify the calling node. +Chaining methods are marked using the `|` operator. + + +### Alert + +Create an alert node, which can trigger alerts. + + +```js +uDF|alert() +``` + +Returns: [AlertNode](/kapacitor/v1.5/nodes/alert_node/) + + + +### Barrier + +Create a new Barrier node that emits a BarrierMessage periodically. + +One BarrierMessage will be emitted every period duration. + + +```js +uDF|barrier() +``` + +Returns: [BarrierNode](/kapacitor/v1.5/nodes/barrier_node/) + + + +### Bottom + +Select the bottom `num` points for `field` and sort by any extra tags or fields. + + +```js +uDF|bottom(num int64, field string, fieldsAndTags ...string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### ChangeDetect + +Create a new node that only emits new points if different from the previous point. + +```js +uDF|changeDetect(field string) +``` + +Returns: [ChangeDetectNode](/kapacitor/v1.5/nodes/change_detect_node/) + + + +### Combine + +Combine this node with itself. The data is combined on timestamp. + + +```js +uDF|combine(expressions ...ast.LambdaNode) +``` + +Returns: [CombineNode](/kapacitor/v1.5/nodes/combine_node/) + + + +### Count + +Count the number of points. + + +```js +uDF|count(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### CumulativeSum + +Compute a cumulative sum of each point that is received. +A point is emitted for every point collected. + + +```js +uDF|cumulativeSum(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Deadman + +Helper function for creating an alert on low throughput, a.k.a. deadman's switch. + +- Threshold: trigger alert if throughput drops below threshold in points/interval. +- Interval: how often to check the throughput. +- Expressions: optional list of expressions to also evaluate. Useful for time of day alerting. + +Example: + + +```js + var data = stream + |from()... + // Trigger critical alert if the throughput drops below 100 points per 10s and checked every 10s. + data + |deadman(100.0, 10s) + //Do normal processing of data + data... +``` + +The above is equivalent to this example: + + +```js + var data = stream + |from()... + // Trigger critical alert if the throughput drops below 100 points per 10s and checked every 10s. + data + |stats(10s) + .align() + |derivative('emitted') + .unit(10s) + .nonNegative() + |alert() + .id('node \'stream0\' in task \'{{ .TaskName }}\'') + .message('{{ .ID }} is {{ if eq .Level "OK" }}alive{{ else }}dead{{ end }}: {{ index .Fields "emitted" | printf "%0.3f" }} points/10s.') + .crit(lambda: "emitted" <= 100.0) + //Do normal processing of data + data... +``` + +The `id` and `message` alert properties can be configured globally via the 'deadman' configuration section. + +Since the [AlertNode](/kapacitor/v1.5/nodes/alert_node/) is the last piece it can be further modified as usual. +Example: + + +```js + var data = stream + |from()... + // Trigger critical alert if the throughput drops below 100 points per 10s and checked every 10s. + data + |deadman(100.0, 10s) + .slack() + .channel('#dead_tasks') + //Do normal processing of data + data... +``` + +You can specify additional lambda expressions to further constrain when the deadman's switch is triggered. +Example: + + +```js + var data = stream + |from()... + // Trigger critical alert if the throughput drops below 100 points per 10s and checked every 10s. + // Only trigger the alert if the time of day is between 8am-5pm. + data + |deadman(100.0, 10s, lambda: hour("time") >= 8 AND hour("time") <= 17) + //Do normal processing of data + data... +``` + + + +```js +uDF|deadman(threshold float64, interval time.Duration, expr ...ast.LambdaNode) +``` + +Returns: [AlertNode](/kapacitor/v1.5/nodes/alert_node/) + + + +### Default + +Create a node that can set defaults for missing tags or fields. + + +```js +uDF|default() +``` + +Returns: [DefaultNode](/kapacitor/v1.5/nodes/default_node/) + + + +### Delete + +Create a node that can delete tags or fields. + + +```js +uDF|delete() +``` + +Returns: [DeleteNode](/kapacitor/v1.5/nodes/delete_node/) + + + +### Derivative + +Create a new node that computes the derivative of adjacent points. + + +```js +uDF|derivative(field string) +``` + +Returns: [DerivativeNode](/kapacitor/v1.5/nodes/derivative_node/) + + + +### Difference + +Compute the difference between points independent of elapsed time. + + +```js +uDF|difference(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Distinct + +Produce batch of only the distinct points. + + +```js +uDF|distinct(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Ec2Autoscale + +Create a node that can trigger autoscale events for a ec2 autoscalegroup. + + +```js +uDF|ec2Autoscale() +``` + +Returns: [Ec2AutoscaleNode](/kapacitor/v1.5/nodes/ec2_autoscale_node/) + + + +### Elapsed + +Compute the elapsed time between points. + + +```js +uDF|elapsed(field string, unit time.Duration) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Eval + +Create an eval node that will evaluate the given transformation function to each data point. +A list of expressions may be provided and will be evaluated in the order they are given. +The results are available to later expressions. + + +```js +uDF|eval(expressions ...ast.LambdaNode) +``` + +Returns: [EvalNode](/kapacitor/v1.5/nodes/eval_node/) + + + +### First + +Select the first point. + + +```js +uDF|first(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Flatten + +Flatten points with similar times into a single point. + + +```js +uDF|flatten() +``` + +Returns: [FlattenNode](/kapacitor/v1.5/nodes/flatten_node/) + + + +### GroupBy + +Group the data by a set of tags. + +Can pass literal * to group by all dimensions. +Example: + + +```js + |groupBy(*) +``` + + + +```js +uDF|groupBy(tag ...interface{}) +``` + +Returns: [GroupByNode](/kapacitor/v1.5/nodes/group_by_node/) + + + +### HoltWinters + +Compute the Holt-Winters (https://docs.influxdata.com/influxdb/latest/query_language/functions/#holt-winters) forecast of a data set. + + +```js +uDF|holtWinters(field string, h int64, m int64, interval time.Duration) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### HoltWintersWithFit + +Compute the Holt-Winters (https://docs.influxdata.com/influxdb/latest/query_language/functions/#holt-winters) forecast of a data set. +This method also outputs all the points used to fit the data in addition to the forecasted data. + + +```js +uDF|holtWintersWithFit(field string, h int64, m int64, interval time.Duration) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### HttpOut + +Create an HTTP output node that caches the most recent data it has received. +The cached data is available at the given endpoint. +The endpoint is the relative path from the API endpoint of the running task. +For example, if the task endpoint is at `/kapacitor/v1/tasks/` and endpoint is +`top10`, then the data can be requested from `/kapacitor/v1/tasks//top10`. + + +```js +uDF|httpOut(endpoint string) +``` + +Returns: [HTTPOutNode](/kapacitor/v1.5/nodes/http_out_node/) + + + +### HttpPost + +Creates an HTTP Post node that POSTS received data to the provided HTTP endpoint. +HttpPost expects 0 or 1 arguments. If 0 arguments are provided, you must specify an +endpoint property method. + + +```js +uDF|httpPost(url ...string) +``` + +Returns: [HTTPPostNode](/kapacitor/v1.5/nodes/http_post_node/) + + + +### InfluxDBOut + +Create an influxdb output node that will store the incoming data into InfluxDB. + + +```js +uDF|influxDBOut() +``` + +Returns: [InfluxDBOutNode](/kapacitor/v1.5/nodes/influx_d_b_out_node/) + + + +### Join + +Join this node with other nodes. The data is joined on timestamp. + + +```js +uDF|join(others ...Node) +``` + +Returns: [JoinNode](/kapacitor/v1.5/nodes/join_node/) + + + +### K8sAutoscale + +Create a node that can trigger autoscale events for a kubernetes cluster. + + +```js +uDF|k8sAutoscale() +``` + +Returns: [K8sAutoscaleNode](/kapacitor/v1.5/nodes/k8s_autoscale_node/) + + + +### KapacitorLoopback + +Create an kapacitor loopback node that will send data back into Kapacitor as a stream. + + +```js +uDF|kapacitorLoopback() +``` + +Returns: [KapacitorLoopbackNode](/kapacitor/v1.5/nodes/kapacitor_loopback_node/) + + + +### Last + +Select the last point. + + +```js +uDF|last(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Log + +Create a node that logs all data it receives. + + +```js +uDF|log() +``` + +Returns: [LogNode](/kapacitor/v1.5/nodes/log_node/) + + + +### Max + +Select the maximum point. + + +```js +uDF|max(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Mean + +Compute the mean of the data. + + +```js +uDF|mean(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Median + +Compute the median of the data. + +> **Note:** This method is not a selector. +If you want the median point, use `.percentile(field, 50.0)`. + + +```js +uDF|median(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Min + +Select the minimum point. + + +```js +uDF|min(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Mode + +Compute the mode of the data. + + +```js +uDF|mode(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### MovingAverage + +Compute a moving average of the last window points. +No points are emitted until the window is full. + + +```js +uDF|movingAverage(field string, window int64) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Percentile + +Select a point at the given percentile. This is a selector function, no interpolation between points is performed. + + +```js +uDF|percentile(field string, percentile float64) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Sample + +Create a new node that samples the incoming points or batches. + +One point will be emitted every count or duration specified. + + +```js +uDF|sample(rate interface{}) +``` + +Returns: [SampleNode](/kapacitor/v1.5/nodes/sample_node/) + + + +### Shift + +Create a new node that shifts the incoming points or batches in time. + + +```js +uDF|shift(shift time.Duration) +``` + +Returns: [ShiftNode](/kapacitor/v1.5/nodes/shift_node/) + + + +### Sideload + +Create a node that can load data from external sources. + + +```js +uDF|sideload() +``` + +Returns: [SideloadNode](/kapacitor/v1.5/nodes/sideload_node/) + + + +### Spread + +Compute the difference between `min` and `max` points. + + +```js +uDF|spread(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### StateCount + +Create a node that tracks number of consecutive points in a given state. + + +```js +uDF|stateCount(expression ast.LambdaNode) +``` + +Returns: [StateCountNode](/kapacitor/v1.5/nodes/state_count_node/) + + + +### StateDuration + +Create a node that tracks duration in a given state. + + +```js +uDF|stateDuration(expression ast.LambdaNode) +``` + +Returns: [StateDurationNode](/kapacitor/v1.5/nodes/state_duration_node/) + + + +### Stats + +Create a new stream of data that contains the internal statistics of the node. +The interval represents how often to emit the statistics based on real time. +This means the interval time is independent of the times of the data points the source node is receiving. + + +```js +uDF|stats(interval time.Duration) +``` + +Returns: [StatsNode](/kapacitor/v1.5/nodes/stats_node/) + + + +### Stddev + +Compute the standard deviation. + + +```js +uDF|stddev(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Sum + +Compute the sum of all values. + + +```js +uDF|sum(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### SwarmAutoscale + +Create a node that can trigger autoscale events for a Docker swarm cluster. + + +```js +uDF|swarmAutoscale() +``` + +Returns: [SwarmAutoscaleNode](/kapacitor/v1.5/nodes/swarm_autoscale_node/) + + + +### Top + +Select the top `num` points for `field` and sort by any extra tags or fields. + + +```js +uDF|top(num int64, field string, fieldsAndTags ...string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Union + +Perform the union of this node and all other given nodes. + + +```js +uDF|union(node ...Node) +``` + +Returns: [UnionNode](/kapacitor/v1.5/nodes/union_node/) + + + +### Where + +Create a new node that filters the data stream by a given expression. + + +```js +uDF|where(expression ast.LambdaNode) +``` + +Returns: [WhereNode](/kapacitor/v1.5/nodes/where_node/) + + + +### Window + +Create a new node that windows the stream by time. + +NOTE: Window can only be applied to stream edges. + + +```js +uDF|window() +``` + +Returns: [WindowNode](/kapacitor/v1.5/nodes/window_node/) + + diff --git a/content/kapacitor/v1.5/nodes/union_node.md b/content/kapacitor/v1.5/nodes/union_node.md new file mode 100644 index 000000000..b6ff615b2 --- /dev/null +++ b/content/kapacitor/v1.5/nodes/union_node.md @@ -0,0 +1,896 @@ +--- +title: UnionNode +description: UnionNode takes the union of all of its parents as a simple pass-through. Data points received from each parent are passed to children nodes without modification. +note: Auto generated by tickdoc + +menu: + kapacitor_1_5_ref: + name: UnionNode + identifier: union_node + weight: 320 + parent: nodes +--- + +The `union` node takes the union of all of its parents as a simple pass through. +Data points received from each parent are passed onto children nodes without modification. + +Example: + + +```js +var logins = stream + |from() + .measurement('logins') +var logouts = stream + |from() + .measurement('logouts') +var frontpage = stream + |from() + .measurement('frontpage') +// Union all user actions into a single stream +logins + |union(logouts, frontpage) + .rename('user_actions') + ... +``` + + +### Constructor + +| Chaining Method | Description | +|:---------|:---------| +| **union ( `node` `...Node`)** | Perform the union of this node and all other given nodes. | + +### Property Methods + +| Setters | Description | +|:---|:---| +| **[quiet](#quiet) ( )** | Suppress all error logging events from this node. | +| **[rename](#rename) ( `value` `string`)** | The new name of the stream. If empty the name of the left node (i.e. `leftNode.union(otherNode1, otherNode2)`) is used. | + + + +### Chaining Methods +[Alert](#alert), +[Barrier](#barrier), +[Bottom](#bottom), +[ChangeDetect](#changedetect), +[Combine](#combine), +[Count](#count), +[CumulativeSum](#cumulativesum), +[Deadman](#deadman), +[Default](#default), +[Delete](#delete), +[Derivative](#derivative), +[Difference](#difference), +[Distinct](#distinct), +[Ec2Autoscale](#ec2autoscale), +[Elapsed](#elapsed), +[Eval](#eval), +[First](#first), +[Flatten](#flatten), +[GroupBy](#groupby), +[HoltWinters](#holtwinters), +[HoltWintersWithFit](#holtwinterswithfit), +[HttpOut](#httpout), +[HttpPost](#httppost), +[InfluxDBOut](#influxdbout), +[Join](#join), +[K8sAutoscale](#k8sautoscale), +[KapacitorLoopback](#kapacitorloopback), +[Last](#last), +[Log](#log), +[Max](#max), +[Mean](#mean), +[Median](#median), +[Min](#min), +[Mode](#mode), +[MovingAverage](#movingaverage), +[Percentile](#percentile), +[Sample](#sample), +[Shift](#shift), +[Sideload](#sideload), +[Spread](#spread), +[StateCount](#statecount), +[StateDuration](#stateduration), +[Stats](#stats), +[Stddev](#stddev), +[Sum](#sum), +[SwarmAutoscale](#swarmautoscale), +[Top](#top), +[Union](#union), +[Where](#where), +[Window](#window) + +--- + +## Properties + +Property methods modify state on the calling node. +They do not add another node to the pipeline, and always return a reference to the calling node. +Property methods are marked using the `.` operator. + + +### Quiet + +Suppress all error logging events from this node. + +```js +union.quiet() +``` + + + + +### Rename + +The new name of the stream. +If empty the name of the left node +(i.e. `leftNode.union(otherNode1, otherNode2)`) is used. + + +```js +union.rename(value string) +``` + + + + +## Chaining Methods + +Chaining methods create a new node in the pipeline as a child of the calling node. +They do not modify the calling node. +Chaining methods are marked using the `|` operator. + + +### Alert + +Create an alert node, which can trigger alerts. + + +```js +union|alert() +``` + +Returns: [AlertNode](/kapacitor/v1.5/nodes/alert_node/) + + + +### Barrier + +Create a new Barrier node that emits a BarrierMessage periodically. + +One BarrierMessage will be emitted every period duration. + + +```js +union|barrier() +``` + +Returns: [BarrierNode](/kapacitor/v1.5/nodes/barrier_node/) + + + +### Bottom + +Select the bottom `num` points for `field` and sort by any extra tags or fields. + + +```js +union|bottom(num int64, field string, fieldsAndTags ...string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### ChangeDetect + +Create a new node that only emits new points if different from the previous point. + +```js +union|changeDetect(field string) +``` + +Returns: [ChangeDetectNode](/kapacitor/v1.5/nodes/change_detect_node/) + + + +### Combine + +Combine this node with itself. The data is combined on timestamp. + + +```js +union|combine(expressions ...ast.LambdaNode) +``` + +Returns: [CombineNode](/kapacitor/v1.5/nodes/combine_node/) + + + +### Count + +Count the number of points. + + +```js +union|count(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### CumulativeSum + +Compute a cumulative sum of each point that is received. +A point is emitted for every point collected. + + +```js +union|cumulativeSum(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Deadman + +Helper function for creating an alert on low throughput, a.k.a. deadman's switch. + +- Threshold: trigger alert if throughput drops below threshold in points/interval. +- Interval: how often to check the throughput. +- Expressions: optional list of expressions to also evaluate. Useful for time of day alerting. + +Example: + + +```js + var data = stream + |from()... + // Trigger critical alert if the throughput drops below 100 points per 10s and checked every 10s. + data + |deadman(100.0, 10s) + //Do normal processing of data + data... +``` + +The above is equivalent to this example: + + +```js + var data = stream + |from()... + // Trigger critical alert if the throughput drops below 100 points per 10s and checked every 10s. + data + |stats(10s) + .align() + |derivative('emitted') + .unit(10s) + .nonNegative() + |alert() + .id('node \'stream0\' in task \'{{ .TaskName }}\'') + .message('{{ .ID }} is {{ if eq .Level "OK" }}alive{{ else }}dead{{ end }}: {{ index .Fields "emitted" | printf "%0.3f" }} points/10s.') + .crit(lambda: "emitted" <= 100.0) + //Do normal processing of data + data... +``` + +The `id` and `message` alert properties can be configured globally via the 'deadman' configuration section. + +Since the [AlertNode](/kapacitor/v1.5/nodes/alert_node/) is the last piece it can be further modified as usual. +Example: + + +```js + var data = stream + |from()... + // Trigger critical alert if the throughput drops below 100 points per 10s and checked every 10s. + data + |deadman(100.0, 10s) + .slack() + .channel('#dead_tasks') + //Do normal processing of data + data... +``` + +You can specify additional lambda expressions to further constrain when the deadman's switch is triggered. +Example: + + +```js + var data = stream + |from()... + // Trigger critical alert if the throughput drops below 100 points per 10s and checked every 10s. + // Only trigger the alert if the time of day is between 8am-5pm. + data + |deadman(100.0, 10s, lambda: hour("time") >= 8 AND hour("time") <= 17) + //Do normal processing of data + data... +``` + + + +```js +union|deadman(threshold float64, interval time.Duration, expr ...ast.LambdaNode) +``` + +Returns: [AlertNode](/kapacitor/v1.5/nodes/alert_node/) + + + +### Default + +Create a node that can set defaults for missing tags or fields. + + +```js +union|default() +``` + +Returns: [DefaultNode](/kapacitor/v1.5/nodes/default_node/) + + + +### Delete + +Create a node that can delete tags or fields. + + +```js +union|delete() +``` + +Returns: [DeleteNode](/kapacitor/v1.5/nodes/delete_node/) + + + +### Derivative + +Create a new node that computes the derivative of adjacent points. + + +```js +union|derivative(field string) +``` + +Returns: [DerivativeNode](/kapacitor/v1.5/nodes/derivative_node/) + + + +### Difference + +Compute the difference between points independent of elapsed time. + + +```js +union|difference(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Distinct + +Produce batch of only the distinct points. + + +```js +union|distinct(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Ec2Autoscale + +Create a node that can trigger autoscale events for a ec2 autoscalegroup. + + +```js +union|ec2Autoscale() +``` + +Returns: [Ec2AutoscaleNode](/kapacitor/v1.5/nodes/ec2_autoscale_node/) + + + +### Elapsed + +Compute the elapsed time between points. + + +```js +union|elapsed(field string, unit time.Duration) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Eval + +Create an eval node that will evaluate the given transformation function to each data point. +A list of expressions may be provided and will be evaluated in the order they are given. +The results are available to later expressions. + + +```js +union|eval(expressions ...ast.LambdaNode) +``` + +Returns: [EvalNode](/kapacitor/v1.5/nodes/eval_node/) + + + +### First + +Select the first point. + + +```js +union|first(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Flatten + +Flatten points with similar times into a single point. + + +```js +union|flatten() +``` + +Returns: [FlattenNode](/kapacitor/v1.5/nodes/flatten_node/) + + + +### GroupBy + +Group the data by a set of tags. + +Can pass literal * to group by all dimensions. +Example: + + +```js + |groupBy(*) +``` + + + +```js +union|groupBy(tag ...interface{}) +``` + +Returns: [GroupByNode](/kapacitor/v1.5/nodes/group_by_node/) + + + +### HoltWinters + +Compute the Holt-Winters (https://docs.influxdata.com/influxdb/latest/query_language/functions/#holt-winters) forecast of a data set. + + +```js +union|holtWinters(field string, h int64, m int64, interval time.Duration) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### HoltWintersWithFit + +Compute the Holt-Winters (https://docs.influxdata.com/influxdb/latest/query_language/functions/#holt-winters) forecast of a data set. +This method also outputs all the points used to fit the data in addition to the forecasted data. + + +```js +union|holtWintersWithFit(field string, h int64, m int64, interval time.Duration) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### HttpOut + +Create an HTTP output node that caches the most recent data it has received. +The cached data is available at the given endpoint. +The endpoint is the relative path from the API endpoint of the running task. +For example, if the task endpoint is at `/kapacitor/v1/tasks/` and endpoint is +`top10`, then the data can be requested from `/kapacitor/v1/tasks//top10`. + + +```js +union|httpOut(endpoint string) +``` + +Returns: [HTTPOutNode](/kapacitor/v1.5/nodes/http_out_node/) + + + +### HttpPost + +Creates an HTTP Post node that POSTS received data to the provided HTTP endpoint. +HttpPost expects 0 or 1 arguments. If 0 arguments are provided, you must specify an +endpoint property method. + + +```js +union|httpPost(url ...string) +``` + +Returns: [HTTPPostNode](/kapacitor/v1.5/nodes/http_post_node/) + + + +### InfluxDBOut + +Create an influxdb output node that will store the incoming data into InfluxDB. + + +```js +union|influxDBOut() +``` + +Returns: [InfluxDBOutNode](/kapacitor/v1.5/nodes/influx_d_b_out_node/) + + + +### Join + +Join this node with other nodes. The data is joined on timestamp. + + +```js +union|join(others ...Node) +``` + +Returns: [JoinNode](/kapacitor/v1.5/nodes/join_node/) + + + +### K8sAutoscale + +Create a node that can trigger autoscale events for a kubernetes cluster. + + +```js +union|k8sAutoscale() +``` + +Returns: [K8sAutoscaleNode](/kapacitor/v1.5/nodes/k8s_autoscale_node/) + + + +### KapacitorLoopback + +Create an kapacitor loopback node that will send data back into Kapacitor as a stream. + + +```js +union|kapacitorLoopback() +``` + +Returns: [KapacitorLoopbackNode](/kapacitor/v1.5/nodes/kapacitor_loopback_node/) + + + +### Last + +Select the last point. + + +```js +union|last(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Log + +Create a node that logs all data it receives. + + +```js +union|log() +``` + +Returns: [LogNode](/kapacitor/v1.5/nodes/log_node/) + + + +### Max + +Select the maximum point. + + +```js +union|max(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Mean + +Compute the mean of the data. + + +```js +union|mean(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Median + +Compute the median of the data. + +> **Note:** This method is not a selector. +If you want the median point, use `.percentile(field, 50.0)`. + + +```js +union|median(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Min + +Select the minimum point. + + +```js +union|min(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Mode + +Compute the mode of the data. + + +```js +union|mode(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### MovingAverage + +Compute a moving average of the last window points. +No points are emitted until the window is full. + + +```js +union|movingAverage(field string, window int64) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Percentile + +Select a point at the given percentile. This is a selector function, no interpolation between points is performed. + + +```js +union|percentile(field string, percentile float64) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Sample + +Create a new node that samples the incoming points or batches. + +One point will be emitted every count or duration specified. + + +```js +union|sample(rate interface{}) +``` + +Returns: [SampleNode](/kapacitor/v1.5/nodes/sample_node/) + + + +### Shift + +Create a new node that shifts the incoming points or batches in time. + + +```js +union|shift(shift time.Duration) +``` + +Returns: [ShiftNode](/kapacitor/v1.5/nodes/shift_node/) + + + +### Sideload + +Create a node that can load data from external sources. + + +```js +union|sideload() +``` + +Returns: [SideloadNode](/kapacitor/v1.5/nodes/sideload_node/) + + + +### Spread + +Compute the difference between `min` and `max` points. + + +```js +union|spread(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### StateCount + +Create a node that tracks number of consecutive points in a given state. + + +```js +union|stateCount(expression ast.LambdaNode) +``` + +Returns: [StateCountNode](/kapacitor/v1.5/nodes/state_count_node/) + + + +### StateDuration + +Create a node that tracks duration in a given state. + + +```js +union|stateDuration(expression ast.LambdaNode) +``` + +Returns: [StateDurationNode](/kapacitor/v1.5/nodes/state_duration_node/) + + + +### Stats + +Create a new stream of data that contains the internal statistics of the node. +The interval represents how often to emit the statistics based on real time. +This means the interval time is independent of the times of the data points the source node is receiving. + + +```js +union|stats(interval time.Duration) +``` + +Returns: [StatsNode](/kapacitor/v1.5/nodes/stats_node/) + + + +### Stddev + +Compute the standard deviation. + + +```js +union|stddev(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Sum + +Compute the sum of all values. + + +```js +union|sum(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### SwarmAutoscale + +Create a node that can trigger autoscale events for a Docker swarm cluster. + + +```js +union|swarmAutoscale() +``` + +Returns: [SwarmAutoscaleNode](/kapacitor/v1.5/nodes/swarm_autoscale_node/) + + + +### Top + +Select the top `num` points for `field` and sort by any extra tags or fields. + + +```js +union|top(num int64, field string, fieldsAndTags ...string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Union + +Perform the union of this node and all other given nodes. + + +```js +union|union(node ...Node) +``` + +Returns: [UnionNode](/kapacitor/v1.5/nodes/union_node/) + + + +### Where + +Create a new node that filters the data stream by a given expression. + + +```js +union|where(expression ast.LambdaNode) +``` + +Returns: [WhereNode](/kapacitor/v1.5/nodes/where_node/) + + + +### Window + +Create a new node that windows the stream by time. + +NOTE: Window can only be applied to stream edges. + + +```js +union|window() +``` + +Returns: [WindowNode](/kapacitor/v1.5/nodes/window_node/) + + diff --git a/content/kapacitor/v1.5/nodes/where_node.md b/content/kapacitor/v1.5/nodes/where_node.md new file mode 100644 index 000000000..2f602c577 --- /dev/null +++ b/content/kapacitor/v1.5/nodes/where_node.md @@ -0,0 +1,876 @@ +--- +title: WhereNode +description: WhereNode filters a data stream by a given expression. +note: Auto generated by tickdoc + +menu: + kapacitor_1_5_ref: + name: WhereNode + identifier: where_node + weight: 330 + parent: nodes +--- + +The `where` node filters a data stream by a given expression. + +Example: + + +```js +var sums = stream + |from() + .groupBy('service', 'host') + |sum('value') +//Watch particular host for issues. +sums + |where(lambda: "host" == 'h001.example.com') + |alert() + .crit(lambda: TRUE) + .email().to('user@example.com') +``` + + +### Constructor + +| Chaining Method | Description | +|:---------|:---------| +| **where ( `expression` `ast.LambdaNode`)** | Create a new node that filters the data stream by a given expression. | + +### Property Methods + +| Setters | Description | +|:---|:---| +| **[quiet](#quiet) ( )** | Suppress all error logging events from this node. | + + +### Chaining Methods +[Alert](#alert), +[Barrier](#barrier), +[Bottom](#bottom), +[ChangeDetect](#changedetect), +[Combine](#combine), +[Count](#count), +[CumulativeSum](#cumulativesum), +[Deadman](#deadman), +[Default](#default), +[Delete](#delete), +[Derivative](#derivative), +[Difference](#difference), +[Distinct](#distinct), +[Ec2Autoscale](#ec2autoscale), +[Elapsed](#elapsed), +[Eval](#eval), +[First](#first), +[Flatten](#flatten), +[GroupBy](#groupby), +[HoltWinters](#holtwinters), +[HoltWintersWithFit](#holtwinterswithfit), +[HttpOut](#httpout), +[HttpPost](#httppost), +[InfluxDBOut](#influxdbout), +[Join](#join), +[K8sAutoscale](#k8sautoscale), +[KapacitorLoopback](#kapacitorloopback), +[Last](#last), +[Log](#log), +[Max](#max), +[Mean](#mean), +[Median](#median), +[Min](#min), +[Mode](#mode), +[MovingAverage](#movingaverage), +[Percentile](#percentile), +[Sample](#sample), +[Shift](#shift), +[Sideload](#sideload), +[Spread](#spread), +[StateCount](#statecount), +[StateDuration](#stateduration), +[Stats](#stats), +[Stddev](#stddev), +[Sum](#sum), +[SwarmAutoscale](#swarmautoscale), +[Top](#top), +[Union](#union), +[Where](#where), +[Window](#window) + +--- + + +## Properties + +Property methods modify state on the calling node. +They do not add another node to the pipeline, and always return a reference to the calling node. +Property methods are marked using the `.` operator. + + +### Quiet + +Suppress all error logging events from this node. + +```js +where.quiet() +``` + + + + +## Chaining Methods + +Chaining methods create a new node in the pipeline as a child of the calling node. +They do not modify the calling node. +Chaining methods are marked using the `|` operator. + + +### Alert + +Create an alert node, which can trigger alerts. + + +```js +where|alert() +``` + +Returns: [AlertNode](/kapacitor/v1.5/nodes/alert_node/) + + + +### Barrier + +Create a new Barrier node that emits a BarrierMessage periodically. + +One BarrierMessage will be emitted every period duration. + + +```js +where|barrier() +``` + +Returns: [BarrierNode](/kapacitor/v1.5/nodes/barrier_node/) + + + +### Bottom + +Select the bottom `num` points for `field` and sort by any extra tags or fields. + + +```js +where|bottom(num int64, field string, fieldsAndTags ...string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### ChangeDetect + +Create a new node that only emits new points if different from the previous point. + +```js +where|changeDetect(field string) +``` + +Returns: [ChangeDetectNode](/kapacitor/v1.5/nodes/change_detect_node/) + + + +### Combine + +Combine this node with itself. The data is combined on timestamp. + + +```js +where|combine(expressions ...ast.LambdaNode) +``` + +Returns: [CombineNode](/kapacitor/v1.5/nodes/combine_node/) + + + +### Count + +Count the number of points. + + +```js +where|count(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### CumulativeSum + +Compute a cumulative sum of each point that is received. +A point is emitted for every point collected. + + +```js +where|cumulativeSum(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Deadman + +Helper function for creating an alert on low throughput, a.k.a. deadman's switch. + +- Threshold: trigger alert if throughput drops below threshold in points/interval. +- Interval: how often to check the throughput. +- Expressions: optional list of expressions to also evaluate. Useful for time of day alerting. + +Example: + + +```js + var data = stream + |from()... + // Trigger critical alert if the throughput drops below 100 points per 10s and checked every 10s. + data + |deadman(100.0, 10s) + //Do normal processing of data + data... +``` + +The above is equivalent to this example: + + +```js + var data = stream + |from()... + // Trigger critical alert if the throughput drops below 100 points per 10s and checked every 10s. + data + |stats(10s) + .align() + |derivative('emitted') + .unit(10s) + .nonNegative() + |alert() + .id('node \'stream0\' in task \'{{ .TaskName }}\'') + .message('{{ .ID }} is {{ if eq .Level "OK" }}alive{{ else }}dead{{ end }}: {{ index .Fields "emitted" | printf "%0.3f" }} points/10s.') + .crit(lambda: "emitted" <= 100.0) + //Do normal processing of data + data... +``` + +The `id` and `message` alert properties can be configured globally via the 'deadman' configuration section. + +Since the [AlertNode](/kapacitor/v1.5/nodes/alert_node/) is the last piece it can be further modified as usual. +Example: + + +```js + var data = stream + |from()... + // Trigger critical alert if the throughput drops below 100 points per 10s and checked every 10s. + data + |deadman(100.0, 10s) + .slack() + .channel('#dead_tasks') + //Do normal processing of data + data... +``` + +You can specify additional lambda expressions to further constrain when the deadman's switch is triggered. +Example: + + +```js + var data = stream + |from()... + // Trigger critical alert if the throughput drops below 100 points per 10s and checked every 10s. + // Only trigger the alert if the time of day is between 8am-5pm. + data + |deadman(100.0, 10s, lambda: hour("time") >= 8 AND hour("time") <= 17) + //Do normal processing of data + data... +``` + + + +```js +where|deadman(threshold float64, interval time.Duration, expr ...ast.LambdaNode) +``` + +Returns: [AlertNode](/kapacitor/v1.5/nodes/alert_node/) + + + +### Default + +Create a node that can set defaults for missing tags or fields. + + +```js +where|default() +``` + +Returns: [DefaultNode](/kapacitor/v1.5/nodes/default_node/) + + + +### Delete + +Create a node that can delete tags or fields. + + +```js +where|delete() +``` + +Returns: [DeleteNode](/kapacitor/v1.5/nodes/delete_node/) + + + +### Derivative + +Create a new node that computes the derivative of adjacent points. + + +```js +where|derivative(field string) +``` + +Returns: [DerivativeNode](/kapacitor/v1.5/nodes/derivative_node/) + + + +### Difference + +Compute the difference between points independent of elapsed time. + + +```js +where|difference(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Distinct + +Produce batch of only the distinct points. + + +```js +where|distinct(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Ec2Autoscale + +Create a node that can trigger autoscale events for a ec2 autoscalegroup. + + +```js +where|ec2Autoscale() +``` + +Returns: [Ec2AutoscaleNode](/kapacitor/v1.5/nodes/ec2_autoscale_node/) + + + +### Elapsed + +Compute the elapsed time between points. + + +```js +where|elapsed(field string, unit time.Duration) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Eval + +Create an eval node that will evaluate the given transformation function to each data point. +A list of expressions may be provided and will be evaluated in the order they are given. +The results are available to later expressions. + + +```js +where|eval(expressions ...ast.LambdaNode) +``` + +Returns: [EvalNode](/kapacitor/v1.5/nodes/eval_node/) + + + +### First + +Select the first point. + + +```js +where|first(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Flatten + +Flatten points with similar times into a single point. + + +```js +where|flatten() +``` + +Returns: [FlattenNode](/kapacitor/v1.5/nodes/flatten_node/) + + + +### GroupBy + +Group the data by a set of tags. + +Can pass literal * to group by all dimensions. +Example: + + +```js + |groupBy(*) +``` + + + +```js +where|groupBy(tag ...interface{}) +``` + +Returns: [GroupByNode](/kapacitor/v1.5/nodes/group_by_node/) + + + +### HoltWinters + +Compute the Holt-Winters (https://docs.influxdata.com/influxdb/latest/query_language/functions/#holt-winters) forecast of a data set. + + +```js +where|holtWinters(field string, h int64, m int64, interval time.Duration) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### HoltWintersWithFit + +Compute the Holt-Winters (https://docs.influxdata.com/influxdb/latest/query_language/functions/#holt-winters) forecast of a data set. +This method also outputs all the points used to fit the data in addition to the forecasted data. + + +```js +where|holtWintersWithFit(field string, h int64, m int64, interval time.Duration) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### HttpOut + +Create an HTTP output node that caches the most recent data it has received. +The cached data is available at the given endpoint. +The endpoint is the relative path from the API endpoint of the running task. +For example, if the task endpoint is at `/kapacitor/v1/tasks/` and endpoint is +`top10`, then the data can be requested from `/kapacitor/v1/tasks//top10`. + + +```js +where|httpOut(endpoint string) +``` + +Returns: [HTTPOutNode](/kapacitor/v1.5/nodes/http_out_node/) + + + +### HttpPost + +Creates an HTTP Post node that POSTS received data to the provided HTTP endpoint. +HttpPost expects 0 or 1 arguments. If 0 arguments are provided, you must specify an +endpoint property method. + + +```js +where|httpPost(url ...string) +``` + +Returns: [HTTPPostNode](/kapacitor/v1.5/nodes/http_post_node/) + + + +### InfluxDBOut + +Create an influxdb output node that will store the incoming data into InfluxDB. + + +```js +where|influxDBOut() +``` + +Returns: [InfluxDBOutNode](/kapacitor/v1.5/nodes/influx_d_b_out_node/) + + + +### Join + +Join this node with other nodes. The data is joined on timestamp. + + +```js +where|join(others ...Node) +``` + +Returns: [JoinNode](/kapacitor/v1.5/nodes/join_node/) + + + +### K8sAutoscale + +Create a node that can trigger autoscale events for a kubernetes cluster. + + +```js +where|k8sAutoscale() +``` + +Returns: [K8sAutoscaleNode](/kapacitor/v1.5/nodes/k8s_autoscale_node/) + + + +### KapacitorLoopback + +Create an kapacitor loopback node that will send data back into Kapacitor as a stream. + + +```js +where|kapacitorLoopback() +``` + +Returns: [KapacitorLoopbackNode](/kapacitor/v1.5/nodes/kapacitor_loopback_node/) + + + +### Last + +Select the last point. + + +```js +where|last(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Log + +Create a node that logs all data it receives. + + +```js +where|log() +``` + +Returns: [LogNode](/kapacitor/v1.5/nodes/log_node/) + + + +### Max + +Select the maximum point. + + +```js +where|max(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Mean + +Compute the mean of the data. + + +```js +where|mean(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Median + +Compute the median of the data. + +> **Note:** This method is not a selector. +If you want the median point, use `.percentile(field, 50.0)`. + + +```js +where|median(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Min + +Select the minimum point. + + +```js +where|min(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Mode + +Compute the mode of the data. + + +```js +where|mode(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### MovingAverage + +Compute a moving average of the last window points. +No points are emitted until the window is full. + + +```js +where|movingAverage(field string, window int64) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Percentile + +Select a point at the given percentile. This is a selector function, no interpolation between points is performed. + + +```js +where|percentile(field string, percentile float64) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Sample + +Create a new node that samples the incoming points or batches. + +One point will be emitted every count or duration specified. + + +```js +where|sample(rate interface{}) +``` + +Returns: [SampleNode](/kapacitor/v1.5/nodes/sample_node/) + + + +### Shift + +Create a new node that shifts the incoming points or batches in time. + + +```js +where|shift(shift time.Duration) +``` + +Returns: [ShiftNode](/kapacitor/v1.5/nodes/shift_node/) + + + +### Sideload + +Create a node that can load data from external sources. + + +```js +where|sideload() +``` + +Returns: [SideloadNode](/kapacitor/v1.5/nodes/sideload_node/) + + + +### Spread + +Compute the difference between `min` and `max` points. + + +```js +where|spread(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### StateCount + +Create a node that tracks number of consecutive points in a given state. + + +```js +where|stateCount(expression ast.LambdaNode) +``` + +Returns: [StateCountNode](/kapacitor/v1.5/nodes/state_count_node/) + + + +### StateDuration + +Create a node that tracks duration in a given state. + + +```js +where|stateDuration(expression ast.LambdaNode) +``` + +Returns: [StateDurationNode](/kapacitor/v1.5/nodes/state_duration_node/) + + + +### Stats + +Create a new stream of data that contains the internal statistics of the node. +The interval represents how often to emit the statistics based on real time. +This means the interval time is independent of the times of the data points the source node is receiving. + + +```js +where|stats(interval time.Duration) +``` + +Returns: [StatsNode](/kapacitor/v1.5/nodes/stats_node/) + + + +### Stddev + +Compute the standard deviation. + + +```js +where|stddev(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Sum + +Compute the sum of all values. + + +```js +where|sum(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### SwarmAutoscale + +Create a node that can trigger autoscale events for a Docker swarm cluster. + + +```js +where|swarmAutoscale() +``` + +Returns: [SwarmAutoscaleNode](/kapacitor/v1.5/nodes/swarm_autoscale_node/) + + + +### Top + +Select the top `num` points for `field` and sort by any extra tags or fields. + + +```js +where|top(num int64, field string, fieldsAndTags ...string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Union + +Perform the union of this node and all other given nodes. + + +```js +where|union(node ...Node) +``` + +Returns: [UnionNode](/kapacitor/v1.5/nodes/union_node/) + + + +### Where + +Create a new node that filters the data stream by a given expression. + + +```js +where|where(expression ast.LambdaNode) +``` + +Returns: [WhereNode](/kapacitor/v1.5/nodes/where_node/) + + + +### Window + +Create a new node that windows the stream by time. + +NOTE: Window can only be applied to stream edges. + + +```js +where|window() +``` + +Returns: [WindowNode](/kapacitor/v1.5/nodes/window_node/) + + diff --git a/content/kapacitor/v1.5/nodes/window_node.md b/content/kapacitor/v1.5/nodes/window_node.md new file mode 100644 index 000000000..a2593d44c --- /dev/null +++ b/content/kapacitor/v1.5/nodes/window_node.md @@ -0,0 +1,967 @@ +--- +title: WindowNode +description: WindowNode caches data within a moving time range. +note: Auto generated by tickdoc + +menu: + kapacitor_1_5_ref: + name: WindowNode + identifier: window_node + weight: 340 + parent: nodes +--- + +The `window` node caches data within a moving time range. +The `period` property of `window` defines the time range covered by `window`. + +The `every` property of `window` defines the frequency at which the window +is emitted to the next node in the pipeline. + +The `align` property of `window` defines how to align the window edges. +(By default, the edges are defined relative to the first data point the `window` +node receives.) + +Example: + + +```js +stream + |window() + .period(10m) + .every(5m) + |httpOut('recent') +``` + +This example emits the last `10 minute` period every `5 minutes` to the pipeline's `httpOut` node. +Because `every` is less than `period`, each time the window is emitted it contains `5 minutes` of +new data and `5 minutes` of the previous period's data. + +> **NOTE:** Because no `align` property is defined, the `window` edge is defined relative to the first data point. + + +### Constructor + +| Chaining Method | Description | +|:---------|:---------| +| **window ( )** | Create a new node that windows the stream by time. | + +### Property Methods + +| Setters | Description | +|:---|:---| +| **[align](#align) ( )** | If the `align` property is not used to modify the `window` node, then the window alignment is assumed to start at the time of the first data point it receives. If `align` property is set, the window time edges will be truncated to the `every` property (For example, if a data point's time is 12:06 and the `every` property is `5m` then the data point's window will range from 12:05 to 12:10). | +| **[every](#every) ( `value` `time.Duration`)** | How often the current window is emitted into the pipeline. If equal to zero, then every new point will emit the current window. | +| **[everyCount](#everycount) ( `value` `int64`)** | EveryCount determines how often the window is emitted based on the count of points. A value of 1 means that every new point will emit the window. | +| **[fillPeriod](#fillperiod) ( )** | FillPeriod instructs the WindowNode to wait till the period has elapsed before emitting the first batch. This only applies if the period is greater than the every value. | +| **[period](#period) ( `value` `time.Duration`)** | The period, or length in time, of the window. | +| **[periodCount](#periodcount) ( `value` `int64`)** | PeriodCount is the number of points per window. | +| **[quiet](#quiet) ( )** | Suppress all error logging events from this node. | + + + +### Chaining Methods +[Alert](#alert), +[Barrier](#barrier), +[Bottom](#bottom), +[ChangeDetect](#changedetect), +[Combine](#combine), +[Count](#count), +[CumulativeSum](#cumulativesum), +[Deadman](#deadman), +[Default](#default), +[Delete](#delete), +[Derivative](#derivative), +[Difference](#difference), +[Distinct](#distinct), +[Ec2Autoscale](#ec2autoscale), +[Elapsed](#elapsed), +[Eval](#eval), +[First](#first), +[Flatten](#flatten), +[GroupBy](#groupby), +[HoltWinters](#holtwinters), +[HoltWintersWithFit](#holtwinterswithfit), +[HttpOut](#httpout), +[HttpPost](#httppost), +[InfluxDBOut](#influxdbout), +[Join](#join), +[K8sAutoscale](#k8sautoscale), +[KapacitorLoopback](#kapacitorloopback), +[Last](#last), +[Log](#log), +[Max](#max), +[Mean](#mean), +[Median](#median), +[Min](#min), +[Mode](#mode), +[MovingAverage](#movingaverage), +[Percentile](#percentile), +[Sample](#sample), +[Shift](#shift), +[Sideload](#sideload), +[Spread](#spread), +[StateCount](#statecount), +[StateDuration](#stateduration), +[Stats](#stats), +[Stddev](#stddev), +[Sum](#sum), +[SwarmAutoscale](#swarmautoscale), +[Top](#top), +[Union](#union), +[Where](#where), +[Window](#window) + +--- + +## Properties + +Property methods modify state on the calling node. +They do not add another node to the pipeline, and always return a reference to the calling node. +Property methods are marked using the `.` operator. + +### Align + +Set the `align` property to truncate the window time edges to the `every` property. For example, if a data point's time is 12:06 and the `every` property is `5m` then the data point's window ranges from 12:05 to 12:10). + +If the `align` property isn't used to modify the `window` node, the window alignment starts at the time the first data point is received. + +```js +window.align() +``` +> Note: When ingesting data at irregular intervals, we recommend using `window.align()` to group data. + + + + +### Every + +How often the current window is emitted into the pipeline. +If equal to zero, then every new point will emit the current window. + + +```js +window.every(value time.Duration) +``` + + + + +### EveryCount + +EveryCount determines how often the window is emitted based on the count of points. +A value of 1 means that every new point will emit the window. + + +```js +window.everyCount(value int64) +``` + + + + +### FillPeriod + +FillPeriod instructs the [WindowNode](/kapacitor/v1.5/nodes/window_node/) to wait till the period has elapsed before emitting the first batch. +This only applies if the period is greater than the every value. + + +```js +window.fillPeriod() +``` + + + + +### Period + +The period, or length in time, of the window. + + +```js +window.period(value time.Duration) +``` + + + + +### PeriodCount + +PeriodCount is the number of points per window. + + +```js +window.periodCount(value int64) +``` + + + + +### Quiet + +Suppress all error logging events from this node. + +```js +window.quiet() +``` + + + + +## Chaining Methods + +Chaining methods create a new node in the pipeline as a child of the calling node. +They do not modify the calling node. +Chaining methods are marked using the `|` operator. + + +### Alert + +Create an alert node, which can trigger alerts. + + +```js +window|alert() +``` + +Returns: [AlertNode](/kapacitor/v1.5/nodes/alert_node/) + + + +### Barrier + +Create a new Barrier node that emits a BarrierMessage periodically. + +One BarrierMessage will be emitted every period duration. + + +```js +window|barrier() +``` + +Returns: [BarrierNode](/kapacitor/v1.5/nodes/barrier_node/) + + + +### Bottom + +Select the bottom `num` points for `field` and sort by any extra tags or fields. + + +```js +window|bottom(num int64, field string, fieldsAndTags ...string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### ChangeDetect + +Create a new node that only emits new points if different from the previous point. + +```js +window|changeDetect(field string) +``` + +Returns: [ChangeDetectNode](/kapacitor/v1.5/nodes/change_detect_node/) + + + +### Combine + +Combine this node with itself. The data is combined on timestamp. + + +```js +window|combine(expressions ...ast.LambdaNode) +``` + +Returns: [CombineNode](/kapacitor/v1.5/nodes/combine_node/) + + + +### Count + +Count the number of points. + + +```js +window|count(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### CumulativeSum + +Compute a cumulative sum of each point that is received. +A point is emitted for every point collected. + + +```js +window|cumulativeSum(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Deadman + +Helper function for creating an alert on low throughput, a.k.a. deadman's switch. + +- Threshold: trigger alert if throughput drops below threshold in points/interval. +- Interval: how often to check the throughput. +- Expressions: optional list of expressions to also evaluate. Useful for time of day alerting. + +Example: + + +```js + var data = stream + |from()... + // Trigger critical alert if the throughput drops below 100 points per 10s and checked every 10s. + data + |deadman(100.0, 10s) + //Do normal processing of data + data... +``` + +The above is equivalent to this example: + + +```js + var data = stream + |from()... + // Trigger critical alert if the throughput drops below 100 points per 10s and checked every 10s. + data + |stats(10s) + .align() + |derivative('emitted') + .unit(10s) + .nonNegative() + |alert() + .id('node \'stream0\' in task \'{{ .TaskName }}\'') + .message('{{ .ID }} is {{ if eq .Level "OK" }}alive{{ else }}dead{{ end }}: {{ index .Fields "emitted" | printf "%0.3f" }} points/10s.') + .crit(lambda: "emitted" <= 100.0) + //Do normal processing of data + data... +``` + +The `id` and `message` alert properties can be configured globally via the 'deadman' configuration section. + +Since the [AlertNode](/kapacitor/v1.5/nodes/alert_node/) is the last piece it can be further modified as usual. +Example: + + +```js + var data = stream + |from()... + // Trigger critical alert if the throughput drops below 100 points per 10s and checked every 10s. + data + |deadman(100.0, 10s) + .slack() + .channel('#dead_tasks') + //Do normal processing of data + data... +``` + +You can specify additional lambda expressions to further constrain when the deadman's switch is triggered. +Example: + + +```js + var data = stream + |from()... + // Trigger critical alert if the throughput drops below 100 points per 10s and checked every 10s. + // Only trigger the alert if the time of day is between 8am-5pm. + data + |deadman(100.0, 10s, lambda: hour("time") >= 8 AND hour("time") <= 17) + //Do normal processing of data + data... +``` + + + +```js +window|deadman(threshold float64, interval time.Duration, expr ...ast.LambdaNode) +``` + +Returns: [AlertNode](/kapacitor/v1.5/nodes/alert_node/) + + + +### Default + +Create a node that can set defaults for missing tags or fields. + + +```js +window|default() +``` + +Returns: [DefaultNode](/kapacitor/v1.5/nodes/default_node/) + + + +### Delete + +Create a node that can delete tags or fields. + + +```js +window|delete() +``` + +Returns: [DeleteNode](/kapacitor/v1.5/nodes/delete_node/) + + + +### Derivative + +Create a new node that computes the derivative of adjacent points. + + +```js +window|derivative(field string) +``` + +Returns: [DerivativeNode](/kapacitor/v1.5/nodes/derivative_node/) + + + +### Difference + +Compute the difference between points independent of elapsed time. + + +```js +window|difference(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Distinct + +Produce batch of only the distinct points. + + +```js +window|distinct(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Ec2Autoscale + +Create a node that can trigger autoscale events for a ec2 autoscalegroup. + + +```js +window|ec2Autoscale() +``` + +Returns: [Ec2AutoscaleNode](/kapacitor/v1.5/nodes/ec2_autoscale_node/) + + + +### Elapsed + +Compute the elapsed time between points. + + +```js +window|elapsed(field string, unit time.Duration) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Eval + +Create an eval node that will evaluate the given transformation function to each data point. +A list of expressions may be provided and will be evaluated in the order they are given. +The results are available to later expressions. + + +```js +window|eval(expressions ...ast.LambdaNode) +``` + +Returns: [EvalNode](/kapacitor/v1.5/nodes/eval_node/) + + + +### First + +Select the first point. + + +```js +window|first(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Flatten + +Flatten points with similar times into a single point. + + +```js +window|flatten() +``` + +Returns: [FlattenNode](/kapacitor/v1.5/nodes/flatten_node/) + + + +### GroupBy + +Group the data by a set of tags. + +Can pass literal * to group by all dimensions. +Example: + + +```js + |groupBy(*) +``` + + + +```js +window|groupBy(tag ...interface{}) +``` + +Returns: [GroupByNode](/kapacitor/v1.5/nodes/group_by_node/) + + + +### HoltWinters + +Compute the Holt-Winters (https://docs.influxdata.com/influxdb/latest/query_language/functions/#holt-winters) forecast of a data set. + + +```js +window|holtWinters(field string, h int64, m int64, interval time.Duration) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### HoltWintersWithFit + +Compute the Holt-Winters (https://docs.influxdata.com/influxdb/latest/query_language/functions/#holt-winters) forecast of a data set. +This method also outputs all the points used to fit the data in addition to the forecasted data. + + +```js +window|holtWintersWithFit(field string, h int64, m int64, interval time.Duration) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### HttpOut + +Create an HTTP output node that caches the most recent data it has received. +The cached data is available at the given endpoint. +The endpoint is the relative path from the API endpoint of the running task. +For example, if the task endpoint is at `/kapacitor/v1/tasks/` and endpoint is +`top10`, then the data can be requested from `/kapacitor/v1/tasks//top10`. + + +```js +window|httpOut(endpoint string) +``` + +Returns: [HTTPOutNode](/kapacitor/v1.5/nodes/http_out_node/) + + + +### HttpPost + +Creates an HTTP Post node that POSTS received data to the provided HTTP endpoint. +HttpPost expects 0 or 1 arguments. If 0 arguments are provided, you must specify an +endpoint property method. + + +```js +window|httpPost(url ...string) +``` + +Returns: [HTTPPostNode](/kapacitor/v1.5/nodes/http_post_node/) + + + +### InfluxDBOut + +Create an influxdb output node that will store the incoming data into InfluxDB. + + +```js +window|influxDBOut() +``` + +Returns: [InfluxDBOutNode](/kapacitor/v1.5/nodes/influx_d_b_out_node/) + + + +### Join + +Join this node with other nodes. The data is joined on timestamp. + + +```js +window|join(others ...Node) +``` + +Returns: [JoinNode](/kapacitor/v1.5/nodes/join_node/) + + + +### K8sAutoscale + +Create a node that can trigger autoscale events for a kubernetes cluster. + + +```js +window|k8sAutoscale() +``` + +Returns: [K8sAutoscaleNode](/kapacitor/v1.5/nodes/k8s_autoscale_node/) + + + +### KapacitorLoopback + +Create an kapacitor loopback node that will send data back into Kapacitor as a stream. + + +```js +window|kapacitorLoopback() +``` + +Returns: [KapacitorLoopbackNode](/kapacitor/v1.5/nodes/kapacitor_loopback_node/) + + + +### Last + +Select the last point. + + +```js +window|last(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Log + +Create a node that logs all data it receives. + + +```js +window|log() +``` + +Returns: [LogNode](/kapacitor/v1.5/nodes/log_node/) + + + +### Max + +Select the maximum point. + + +```js +window|max(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Mean + +Compute the mean of the data. + + +```js +window|mean(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Median + +Compute the median of the data. + +> **Note:** This method is not a selector. +If you want the median point, use `.percentile(field, 50.0)`. + + +```js +window|median(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Min + +Select the minimum point. + + +```js +window|min(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Mode + +Compute the mode of the data. + + +```js +window|mode(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### MovingAverage + +Compute a moving average of the last window points. +No points are emitted until the window is full. + + +```js +window|movingAverage(field string, window int64) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Percentile + +Select a point at the given percentile. This is a selector function, no interpolation between points is performed. + + +```js +window|percentile(field string, percentile float64) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Sample + +Create a new node that samples the incoming points or batches. + +One point will be emitted every count or duration specified. + + +```js +window|sample(rate interface{}) +``` + +Returns: [SampleNode](/kapacitor/v1.5/nodes/sample_node/) + + + +### Shift + +Create a new node that shifts the incoming points or batches in time. + + +```js +window|shift(shift time.Duration) +``` + +Returns: [ShiftNode](/kapacitor/v1.5/nodes/shift_node/) + + + +### Sideload + +Create a node that can load data from external sources. + + +```js +window|sideload() +``` + +Returns: [SideloadNode](/kapacitor/v1.5/nodes/sideload_node/) + + + +### Spread + +Compute the difference between `min` and `max` points. + + +```js +window|spread(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### StateCount + +Create a node that tracks number of consecutive points in a given state. + + +```js +window|stateCount(expression ast.LambdaNode) +``` + +Returns: [StateCountNode](/kapacitor/v1.5/nodes/state_count_node/) + + + +### StateDuration + +Create a node that tracks duration in a given state. + + +```js +window|stateDuration(expression ast.LambdaNode) +``` + +Returns: [StateDurationNode](/kapacitor/v1.5/nodes/state_duration_node/) + + + +### Stats + +Create a new stream of data that contains the internal statistics of the node. +The interval represents how often to emit the statistics based on real time. +This means the interval time is independent of the times of the data points the source node is receiving. + + +```js +window|stats(interval time.Duration) +``` + +Returns: [StatsNode](/kapacitor/v1.5/nodes/stats_node/) + + + +### Stddev + +Compute the standard deviation. + + +```js +window|stddev(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Sum + +Compute the sum of all values. + + +```js +window|sum(field string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### SwarmAutoscale + +Create a node that can trigger autoscale events for a Docker swarm cluster. + + +```js +window|swarmAutoscale() +``` + +Returns: [SwarmAutoscaleNode](/kapacitor/v1.5/nodes/swarm_autoscale_node/) + + + +### Top + +Select the top `num` points for `field` and sort by any extra tags or fields. + + +```js +window|top(num int64, field string, fieldsAndTags ...string) +``` + +Returns: [InfluxQLNode](/kapacitor/v1.5/nodes/influx_q_l_node/) + + + +### Union + +Perform the union of this node and all other given nodes. + + +```js +window|union(node ...Node) +``` + +Returns: [UnionNode](/kapacitor/v1.5/nodes/union_node/) + + + +### Where + +Create a new node that filters the data stream by a given expression. + + +```js +window|where(expression ast.LambdaNode) +``` + +Returns: [WhereNode](/kapacitor/v1.5/nodes/where_node/) + + + +### Window + +Create a new node that windows the stream by time. + +NOTE: Window can only be applied to stream edges. + + +```js +window|window() +``` + +Returns: [WindowNode](/kapacitor/v1.5/nodes/window_node/) + + diff --git a/content/kapacitor/v1.5/reference/spec.md b/content/kapacitor/v1.5/reference/spec.md new file mode 100644 index 000000000..9739f4274 --- /dev/null +++ b/content/kapacitor/v1.5/reference/spec.md @@ -0,0 +1,84 @@ +--- +title: TICKscript specification +menu: + kapacitor_1_5_ref: + name: TICKscript specification + identifier: specification + weight: 10 +--- + +Introduction +------------ + +The TICKscript language is an invocation chaining language used to define data processing pipelines. + + +Notation +------- + +The syntax is specified using Extended Backus-Naur Form ("EBNF"). +EBNF is the same notation used in the [Go](http://golang.org/) programming language specification, which can be found [here](https://golang.org/ref/spec). + +``` +Production = production_name "=" [ Expression ] "." . +Expression = Alternative { "|" Alternative } . +Alternative = Term { Term } . +Term = production_name | token [ "…" token ] | Group | Option | Repetition . +Group = "(" Expression ")" . +Option = "[" Expression "]" . +Repetition = "{" Expression "}" . +``` + +Notation operators in order of increasing precedence: + +``` +| alternation +() grouping +[] option (0 or 1 times) +{} repetition (0 to n times) +``` + +Grammar +------- + +The following is the EBNF grammar definition of TICKscript. + +``` + +unicode_char = (* an arbitrary Unicode code point except newline *) . +digit = "0" … "9" . +ascii_letter = "A" … "Z" | "a" … "z" . +letter = ascii_letter | "_" . +identifier = ( letter ) { letter | digit } . +boolean_lit = "TRUE" | "FALSE" . +int_lit = "1" … "9" { digit } +letter = ascii_letter | "_" . +number_lit = digit { digit } { "." {digit} } . +duration_lit = int_lit duration_unit . +duration_unit = "u" | "µ" | "ms" | "s" | "m" | "h" | "d" | "w" . +string_lit = `'` { unicode_char } `'` . +star_lit = "*" +regex_lit = `/` { unicode_char } `/` . + +operator_lit = "+" | "-" | "*" | "/" | "==" | "!=" | + "<" | "<=" | ">" | ">=" | "=~" | "!~" | + "AND" | "OR" . + +Program = Statement { Statement } . +Statement = Declaration | Expression . +Declaration = "var" identifier "=" Expression . +Expression = identifier { Chain } | Function { Chain } | Primary . +Chain = "@" Function | "|" Function { Chain } | "." Function { Chain} | "." identifier { Chain } . +Function = identifier "(" Parameters ")" . +Parameters = { Parameter "," } [ Parameter ] . +Parameter = Expression | "lambda:" LambdaExpr | Primary . +Primary = "(" LambdaExpr ")" | number_lit | string_lit | + boolean_lit | duration_lit | regex_lit | star_lit | + LFunc | identifier | Reference | "-" Primary | "!" Primary . +Reference = `"` { unicode_char } `"` . +LambdaExpr = Primary operator_lit Primary . +LFunc = identifier "(" LParameters ")" +LParameters = { LParameter "," } [ LParameter ] . +LParameter = LambdaExpr | Primary . + +``` diff --git a/content/kapacitor/v1.5/tick/_index.md b/content/kapacitor/v1.5/tick/_index.md new file mode 100644 index 000000000..8e3526a5b --- /dev/null +++ b/content/kapacitor/v1.5/tick/_index.md @@ -0,0 +1,26 @@ +--- +title: TICKscript language reference + +menu: + kapacitor_1_5_ref: + name: TICKscript language reference + identifier: tick + weight: 40 +--- + +## What is in this section? + +This section provides introductory information on working with TICKscript. + + * [Introduction](/kapacitor/v1.5/tick/introduction/) - this document presents the fundamental concepts of working with TICKscript in Kapacitor and Chronograf. + * [TICKscript Syntax](/kapacitor/v1.5/tick/syntax/) - this covers the essentials of how TICKscript statements and structures are organized. + * [Lambda expressions](/kapacitor/v1.5/tick/expr/) - this section provides essential information about working with these argument types, which are commonly provided to TICKscript nodes. + * [TICKscript specification](/kapacitor/v1.5/reference/spec/) - (in the reference section) this introduces the specification defining TICKscript. + +Outside of this section the following articles may also be of interest. + + * [Getting started with Kapacitor](/kapacitor/v1.5/introduction/getting-started/) - an introduction to Kapacitor, which presents TICKscript basics. + * [Node overview](/kapacitor/v1.5/nodes/) - a catalog of the types of nodes available in TICKscript. + * [Guides](/kapacitor/v1.5/guides/) - a collection of intermediate to advanced solutions using the TICKscript language. + +
diff --git a/content/kapacitor/v1.5/tick/expr.md b/content/kapacitor/v1.5/tick/expr.md new file mode 100644 index 000000000..ef3437a7c --- /dev/null +++ b/content/kapacitor/v1.5/tick/expr.md @@ -0,0 +1,361 @@ +--- +title: Lambda expressions + +menu: + kapacitor_1_5_ref: + identifier: expr + weight: 5 + parent: tick +--- + +# Overview + +TICKscript uses lambda expressions to define transformations on data points as +well as define Boolean conditions that act as filters. Lambda expressions wrap +mathematical operations, Boolean operations, internal function calls or a +combination of all three. TICKscript tries to be similar to InfluxQL in that +most expressions that you would use in an InfluxQL `WHERE` clause will work as +expressions in TICKscript, but with its own syntax: + +* All field or tag identifiers must be double quoted. +* The comparison operator for equality is `==` not `=`. + +All lambda expressions in TICKscript begin with the `lambda:` keyword. + +```js +.where(lambda: "host" == 'server001.example.com') +``` + +In some nodes the results of a lambda expression can be captured into a new +field as a named result using the property setter `.as()`. +In this way they can be used in other nodes further down the pipeline. + + +The internal functions of lambda expressions can be either stateless or +stateful. Stateful means that each time the function is evaluated the internal +state can change and will persist until the next evaluation. + +For example the built-in function `sigma` calculates a running mean and standard +deviation and returns the number of standard deviations the current data point +is away from the mean. + +**Example 1 – the sigma function** + +```js +sigma("value") > 3.0 +``` + +Each time that the expression is evaluated it updates the running statistics and +then returns the deviation. The simple expression in Example 1 evaluates to + `false` while the stream of data points it has received remains within `3.0` + standard deviations of the running mean. As soon as a value is processed that + is more than `3.0` standard deviations from the mean it evaluates to `true`. + Such an expression can be used inside of a TICKscript to define powerful + alerts, as illustrated in Example 2 below. + +**Example 2 – TICKscript with lambda expression** + +```js +stream + |from() + ... + |alert() + // use an expression to define when an alert should go critical. + .crit(lambda: sigma("value") > 3.0) +``` + +**Note on inadvertent type casting** + +Beware that numerical values declared in the TICKscript follow the parsing rules +for literals introduced in the +[Syntax](/kapacitor/v1.5/tick/syntax/#literal-values) document. They may not be +of a suitable type for the function or operation in which they will be used. +Numerical values that include a decimal will be interpreted as floats. +Numerical values without a decimal will be interpreted as integers. When +integers and floats are used within the same expression the integer values need +to use the `float()` type conversion function if a float result is desired. +Failure to observe this rule can yield unexpected results. For example, when +using a lambda expression to calculate a ratio between 0 and 1 of type float to +use in generating a percentage; and when the fields are of type integer, it might +be assumed that a subset field can be divided by the total field to get the +ratio( e.g. `subset/total * 100`). Such an integer by integer division will +result in an integer value of 0. Furthermore multiplication of the result of +such an operation by the literal `100` (an integer) will also result in 0. +Casting the integer values to float will result in a valid ratio in the range +between 0 and 1, and then multiplication by the literal `100.0` (a float) will +result in a valid percentage value. Correctly written, such an operation should +look like this: + +`eval(lambda: float("total_error_responses")/float("total_responses") * 100.0)`. + +If in the logs an error appears of the type `E! mismatched type to binary +operator...`, check to ensure that the fields on both sides of the operator are +of the same and the desired type. + +In short, to ensure that the type of a field value is correct, use the built-in +type conversion functions (see [below](#above-header-type-conversion)). + +# Built-in functions + +### Stateful functions + +##### Count + +Count takes no arguments but returns the number of times the expression has been +evaluated. + +```js +count() int64 +``` + +##### Sigma + +Computes the number of standard deviations a given value is away from the +running mean. Each time the expression is evaluated the running mean and +standard deviation are updated. + +```js +sigma(value float64) float64 +``` + +##### Spread + +Computes the running range of all values passed into it. The range is the +difference between the maximum and minimum values received. + +```js +spread(value float64) float64 +``` + +### Stateless functions + +#### Type conversion functions + +##### Bool + +Converts a string into a Boolean via Golang's +[strconv.ParseBool](https://golang.org/pkg/strconv/#ParseBool) function. Numeric +types can also be converted to a bool where a 0 -> false and 1 -> true. + +```js +bool(value) bool +``` + +##### Int + +Converts a string or float64 into an int64 via Golang's +[strconv.ParseInt](https://golang.org/pkg/strconv/#ParseInt) or simple +`int64()` coercion. Strings are assumed to be decimal numbers. Durations are +converted into an int64 with nanoseconds units. A Boolean is converted to an +int64 where false -> 0 and true -> 1. + +```js +int(value) int64 +``` + +##### Float + +Converts a string or int64 into an float64 via Golang's +[strconv.ParseFloat](https://golang.org/pkg/strconv/#ParseInt) or simple +`float64()` coercion. +A Boolean is converted to a float64 where false -> 0.0 and true -> 1.0. + +```js +float(value) float64 +``` + +##### String + +Converts a bool, int64 or float64 into an string via Golang's +[strconv.Format*](https://golang.org/pkg/strconv/#FormatBool) functions. +Durations are converted to a string representation of the duration. + +```js +string(value) string +``` + +##### Duration + +Converts an int64 or a float64 into an duration assuming the unit as specified as the 2nd argument +Strings are converted to duration of the form as duration literals in TICKscript. + +```js +duration(value int64|float64, unit duration) duration +duration(value string) duration +``` + +#### Existence + +##### IsPresent + +Returns a Boolean value based on whether the specified field or tag key is present. +Useful for filtering out data this is missing the specified field or tag. + +```js +|where(lambda: isPresent("myfield")) +``` + +This returns `TRUE` if `myfield` is a valid identifier and `FALSE` otherwise. + + +#### Time functions + +##### The `time` field + +Within each expression the `time` field contains the time of the current data point. +The following functions can be used on the `time` field. +Each function returns an int64. + +| Function | Description | +| ---------- | ------------- | +| `unixNano(t time) int64` | the number of nanoseconds elapsed since January 1, 1970 UTC (Unix time) | +| `minute(t time) int64` | the minute within the hour: range [0,59] | +| `hour(t time) int64` | the hour within the day: range [0,23] | +| `weekday(t time) int64` | the weekday within the week: range [0,6], 0 is Sunday | +| `day(t time) int64` | the day within the month: range [1,31] | +| `month(t time) int64` | the month within the year: range [1,12] | +| `year(t time) int64` | the year | + +Example usage: + +```js +lambda: hour("time") >= 9 AND hour("time") < 19 +``` + +The above expression evaluates to `true` if the hour of the day for the data +point falls between 0900 hours and 1900 hours. + +##### Now + +Returns the current time. + +```js +now() time +``` + +Example usage: + +```js +lambda: "expiration" < unixNano(now()) +``` + + +#### Math functions + +The following mathematical functions are available. +Each function is implemented via the equivalent Go function. + +| Function | Description | +| ---------- | ------------- | +| [abs(x float64) float64](https://golang.org/pkg/math/#Abs) | Abs returns the absolute value of x. | +| [acos(x float64) float64](https://golang.org/pkg/math/#Acos) | Acos returns the arccosine, in radians, of x. | +| [acosh(x float64) float64](https://golang.org/pkg/math/#Acosh) | Acosh returns the inverse hyperbolic cosine of x. | +| [asin(x float64) float64](https://golang.org/pkg/math/#Asin) | Asin returns the arcsine, in radians, of x. | +| [asinh(x float64) float64](https://golang.org/pkg/math/#Asinh) | Asinh returns the inverse hyperbolic sine of x. | +| [atan(x float64) float64](https://golang.org/pkg/math/#Atan) | Atan returns the arctangent, in radians, of x. | +| [atan2(y, x float64) float64](https://golang.org/pkg/math/#Atan2) | Atan2 returns the arc tangent of y/x, using the signs of the two to determine the quadrant of the return value. | +| [atanh(x float64) float64](https://golang.org/pkg/math/#Atanh) | Atanh returns the inverse hyperbolic tangent of x. | +| [cbrt(x float64) float64](https://golang.org/pkg/math/#Cbrt) | Cbrt returns the cube root of x. | +| [ceil(x float64) float64](https://golang.org/pkg/math/#Ceil) | Ceil returns the least integer value greater than or equal to x. | +| [cos(x float64) float64](https://golang.org/pkg/math/#Cos) | Cos returns the cosine of the radian argument x. | +| [cosh(x float64) float64](https://golang.org/pkg/math/#Cosh) | Cosh returns the hyperbolic cosine of x. | +| [erf(x float64) float64](https://golang.org/pkg/math/#Erf) | Erf returns the error function of x. | +| [erfc(x float64) float64](https://golang.org/pkg/math/#Erfc) | Erfc returns the complementary error function of x. | +| [exp(x float64) float64](https://golang.org/pkg/math/#Exp) | Exp returns e**x, the base-e exponential of x. | +| [exp2(x float64) float64](https://golang.org/pkg/math/#Exp2) | Exp2 returns 2**x, the base-2 exponential of x. | +| [expm1(x float64) float64](https://golang.org/pkg/math/#Expm1) | Expm1 returns e**x - 1, the base-e exponential of x minus 1. It is more accurate than Exp(x) - 1 when x is near zero. | +| [floor(x float64) float64](https://golang.org/pkg/math/#Floor) | Floor returns the greatest integer value less than or equal to x. | +| [gamma(x float64) float64](https://golang.org/pkg/math/#Gamma) | Gamma returns the Gamma function of x. | +| [hypot(p, q float64) float64](https://golang.org/pkg/math/#Hypot) | Hypot returns Sqrt(p*p + q*q), taking care to avoid unnecessary overflow and underflow. | +| [j0(x float64) float64](https://golang.org/pkg/math/#J0) | J0 returns the order-zero Bessel function of the first kind. | +| [j1(x float64) float64](https://golang.org/pkg/math/#J1) | J1 returns the order-one Bessel function of the first kind. | +| [jn(n int64, x float64) float64](https://golang.org/pkg/math/#Jn) | Jn returns the order-n Bessel function of the first kind. | +| [log(x float64) float64](https://golang.org/pkg/math/#Log) | Log returns the natural logarithm of x. | +| [log10(x float64) float64](https://golang.org/pkg/math/#Log10) | Log10 returns the decimal logarithm of x. | +| [log1p(x float64) float64](https://golang.org/pkg/math/#Log1p) | Log1p returns the natural logarithm of 1 plus its argument x. It is more accurate than Log(1 + x) when x is near zero. | +| [log2(x float64) float64](https://golang.org/pkg/math/#Log2) | Log2 returns the binary logarithm of x. | +| [logb(x float64) float64](https://golang.org/pkg/math/#Logb) | Logb returns the binary exponent of x. | +| [max(x, y float64) float64](https://golang.org/pkg/math/#Max) | Max returns the larger of x or y. | +| [min(x, y float64) float64](https://golang.org/pkg/math/#Min) | Min returns the smaller of x or y. | +| [mod(x, y float64) float64](https://golang.org/pkg/math/#Mod) | Mod returns the floating-point remainder of x/y. The magnitude of the result is less than y and its sign agrees with that of x. | +| [pow(x, y float64) float64](https://golang.org/pkg/math/#Pow) | Pow returns x**y, the base-x exponential of y. | +| [pow10(x int64) float64](https://golang.org/pkg/math/#Pow10) | Pow10 returns 10**e, the base-10 exponential of e. | +| [sin(x float64) float64](https://golang.org/pkg/math/#Sin) | Sin returns the sine of the radian argument x. | +| [sinh(x float64) float64](https://golang.org/pkg/math/#Sinh) | Sinh returns the hyperbolic sine of x. | +| [sqrt(x float64) float64](https://golang.org/pkg/math/#Sqrt) | Sqrt returns the square root of x. | +| [tan(x float64) float64](https://golang.org/pkg/math/#Tan) | Tan returns the tangent of the radian argument x. | +| [tanh(x float64) float64](https://golang.org/pkg/math/#Tanh) | Tanh returns the hyperbolic tangent of x. | +| [trunc(x float64) float64](https://golang.org/pkg/math/#Trunc) | Trunc returns the integer value of x. | +| [y0(x float64) float64](https://golang.org/pkg/math/#Y0) | Y0 returns the order-zero Bessel function of the second kind. | +| [y1(x float64) float64](https://golang.org/pkg/math/#Y1) | Y1 returns the order-one Bessel function of the second kind. | +| [yn(n int64, x float64) float64](https://golang.org/pkg/math/#Yn) | Yn returns the order-n Bessel function of the second kind. | + +#### String functions + +The following string manipulation functions are available. +Each function is implemented via the equivalent Go function. + +| Function | Description | +| ---------- | ------------- | +| [strContains(s, substr string) bool](https://golang.org/pkg/strings/#Contains) | StrContains reports whether substr is within s. | +| [strContainsAny(s, chars string) bool](https://golang.org/pkg/strings/#ContainsAny) | StrContainsAny reports whether any Unicode code points in chars are within s. | +| [strCount(s, sep string) int64](https://golang.org/pkg/strings/#Count) | StrCount counts the number of non-overlapping instances of sep in s. If sep is an empty string, Count returns 1 + the number of Unicode code points in s. | +| [strHasPrefix(s, prefix string) bool](https://golang.org/pkg/strings/#HasPrefix) | StrHasPrefix tests whether the string s begins with prefix. | +| [strHasSuffix(s, suffix string) bool](https://golang.org/pkg/strings/#HasSuffix) | StrHasSuffix tests whether the string s ends with suffix. | +| [strIndex(s, sep string) int64](https://golang.org/pkg/strings/#Index) | StrIndex returns the index of the first instance of sep in s, or -1 if sep is not present in s. | +| [strIndexAny(s, chars string) int64](https://golang.org/pkg/strings/#IndexAny) | StrIndexAny returns the index of the first instance of any Unicode code point from chars in s, or -1 if no Unicode code point from chars is present in s. | +| [strLastIndex(s, sep string) int64](https://golang.org/pkg/strings/#LastIndex) | StrLastIndex returns the index of the last instance of sep in s, or -1 if sep is not present in s. | +| [strLastIndexAny(s, chars string) int64](https://golang.org/pkg/strings/#LastIndexAny) | StrLastIndexAny returns the index of the last instance of any Unicode code point from chars in s, or -1 if no Unicode code point from chars is present in s. | +| [strLength(s string) int64](https://golang.org/ref/spec#Length_and_capacity) | StrLength returns the length of the string. | +| [strReplace(s, old, new string, n int64) string](https://golang.org/pkg/strings/#Replace) | StrReplace returns a copy of the string s with the first n non-overlapping instances of old replaced by new. | +| [strSubstring(s string, start, stop int64) string](https://golang.org/ref/spec#Index_expressions) | StrSubstring returns a substring based on the given indexes, strSubstring(str, start, stop) is equivalent to str[start:stop] in Go. | +| [strToLower(s string) string](https://golang.org/pkg/strings/#ToLower) | StrToLower returns a copy of the string s with all Unicode letters mapped to their lower case. | +| [strToUpper(s string) string](https://golang.org/pkg/strings/#ToUpper) | StrToUpper returns a copy of the string s with all Unicode letters mapped to their upper case. | +| [strTrim(s, cutset string) string](https://golang.org/pkg/strings/#Trim) | StrTrim returns a slice of the string s with all leading and trailing Unicode code points contained in cutset removed. | +| [strTrimLeft(s, cutset string) string](https://golang.org/pkg/strings/#TrimLeft) | StrTrimLeft returns a slice of the string s with all leading Unicode code points contained in cutset removed. | +| [strTrimPrefix(s, prefix string) string](https://golang.org/pkg/strings/#TrimPrefix) | StrTrimPrefix returns s without the provided leading prefix string. If s doesn't start with prefix, s is returned unchanged. | +| [strTrimRight(s, cutset string) string](https://golang.org/pkg/strings/#TrimRight) | StrTrimRight returns a slice of the string s, with all trailing Unicode code points contained in cutset removed. | +| [strTrimSpace(s string) string](https://golang.org/pkg/strings/#TrimSpace) | StrTrimSpace returns a slice of the string s, with all leading and trailing white space removed, as defined by Unicode. | +| [strTrimSuffix(s, suffix string) string)](https://golang.org/pkg/strings/#TrimSuffix) | StrTrimSuffix returns s without the provided trailing suffix string. If s doesn't end with suffix, s is returned unchanged. | +| [regexReplace(r regex, s, pattern string) string](https://golang.org/pkg/regexp/#Regexp.ReplaceAllString) | RegexReplace replaces matches of the regular expression in the input string with the output string. For example regexReplace(/a(b*)c/, 'abbbc', 'group is $1') -> 'group is bbb'. The original string is returned if no matches are found. | + + + +#### Human string functions + +##### HumanBytes + +Converts an int64 or a float64 with units bytes into a human readable string representing the number of bytes. + +```js +humanBytes(value) string +``` + + +#### Conditional functions + +##### If + +Returns the result of its operands depending on the value of the first argument. +The second and third arguments must return the same type. + +Example: + +```js +|eval(lambda: if("field" > threshold AND "field" != 0, 'true', 'false')) + .as('value') +``` + +The value of the field `value` in the above example will be the string `true` or `false`, depending on the condition passed as the first argument. + +The `if` function's return type is the same type as its second and third arguments. + + +```js +if(condition, true expression, false expression) +``` diff --git a/content/kapacitor/v1.5/tick/introduction.md b/content/kapacitor/v1.5/tick/introduction.md new file mode 100644 index 000000000..e0451691d --- /dev/null +++ b/content/kapacitor/v1.5/tick/introduction.md @@ -0,0 +1,152 @@ +--- +title: Introducing the TICKscript language + +menu: + kapacitor_1_5_ref: + name: Introduction + identifier: tick_intro + parent: tick + weight: 1 +--- +# Contents +* [Overview](#overview) +* [Nodes](#nodes) +* [Pipelines](#pipelines) +* [Basic examples](#basic-examples) +* [Where to next](#where-to-next) + +# Overview + +Kapacitor uses a Domain Specific Language(DSL) named **TICKscript** to define **tasks** involving the extraction, transformation and loading of data and involving, moreover, the tracking of arbitrary changes and the detection of events within data. One common task is defining alerts. TICKscript is used in `.tick` files to define **pipelines** for processing data. The TICKscript language is designed to chain together the invocation of data processing operations defined in **nodes**. The Kapacitor [Getting Started](/kapacitor/v1.5/introduction/getting-started/) guide introduces TICKscript basics in the context of that product. For a better understanding of what follows, it is recommended that the reader review that document first. + +Each script has a flat scope and each variable in the scope can reference a literal value, such as a string, an integer or a float value, or a node instance with methods that can then be called. + +These methods come in two forms. + +* **Property methods** – A property method modifies the internal properties of a node and returns a reference to the same node. Property methods are called using dot ('.') notation. +* **Chaining methods** – A chaining method creates a new child node and returns a reference to it. Chaining methods are called using pipe ('|') notation. + +# Nodes + +In TICKscript the fundamental type is the **node**. A node has **properties** and, as mentioned, chaining methods. A new node can be created from a parent or sibling node using a chaining method of that parent or sibling node. For each **node type** the signature of this method will be the same, regardless of the parent or sibling node type. The chaining method can accept zero or more arguments used to initialize internal properties of the new node instance. Common node types are `batch`, `query`, `stream`, `from`, `eval` and `alert`, though there are dozens of others. + +The top level nodes, which establish the processing type of the task to be defined, `stream` and `batch`, are simply declared and take no arguments. Nodes with more complex sets of properties rely on **Property methods** for their internal configuration. + +Each node type **wants** data in either batch or stream mode. Some can handle both. Each node type also **provides** data in batch or stream mode. Some can provide both. This _wants/provides_ pattern is key to understanding how nodes work together. Taking into consideration the _wants/provides_ pattern, four general node use cases can be defined: + + * _want_ a batch and _provide_ a stream - for example, when computing an average or a minimum or a maximum. + * _want_ a batch and _provide_ a batch - for example, when identifying outliers in a batch of data. + * _want_ a stream and _provide_ a batch - for example, when grouping together similar data points. + * _want_ a stream and _provide_ a stream - for example, when applying a mathematical function like a logarithm to a value in a point. + +The [node reference documentation](/kapacitor/v1.5/nodes/) lists the property and chaining methods of each node along with examples and descriptions. + +# Pipelines + +Every TICKscript is broken into one or more **pipelines**. Pipelines are chains of nodes logically organized along edges that cannot cycle back to earlier nodes in the chain. The nodes within a pipeline can be assigned to variables. This allows the results of different pipelines to be combined using, for example, a `join` or a `union` node. It also allows for sections of the pipeline to be broken into reasonably understandable self-descriptive functional units. In a simple TICKscript there may be no need to assign pipeline nodes to variables. The initial node in the pipeline sets the processing type for the Kapacitor task they define. These can be either `stream` or `batch`. These two types of pipelines cannot be combined. + +### Stream or batch? + +With `stream` processing, datapoints are read, as in a classic data stream, point by point as they arrive. With `stream` Kapacitor subscribes to all writes of interest in InfluxDB. With `batch` processing a frame of 'historic' data is read from the database and then processed. With `stream` processing data can be transformed before being written to InfluxDB. With `batch` processing, the data should already be stored in InfluxDB. After processing, it can also be written back to it. + +Which to use depends upon system resources and the kind of computation being undertaken. When working with a large set of data over a long time frame `batch` is preferred. It leaves data stored on the disk until it is required, though the query, when triggered, will result in a sudden high load on the database. Processing a large set of data over a long time frame with `stream` means needlessly holding potentially billions of data points in memory. When working with smaller time frames `stream` is preferred. It lowers the query load on InfluxDB. + +### Pipelines as graphs + +Pipelines in Kapacitor are directed acyclic graphs ([DAGs](https://en.wikipedia.org/wiki/Directed_acyclic_graph)). This means that +each edge has a direction down which data flows, and that there cannot be any cycles in the pipeline. An edge can also be thought of as the data-flow relationship that exists between a parent node and its child. + +At the start of any pipeline will be declared one of two fundamental edges. This first edge establishes the type of processing for the task, however, each ensuing node establishes the edge type between itself and its children. + +* `stream`→`from()`– an edge that transfers data a single data point at a time. +* `batch`→`query()`– an edge that transfers data in chunks instead of one point at a time. + +### Pipeline validity + +When connecting nodes and then creating a new Kapacitor task, Kapacitor will check whether or not the TICKscript syntax is well formed, and if the new edges are applicable to the most recent node. However full functionality of the pipeline will not be validated until runtime, when error messages can appear in the Kapacitor log. + +**Example 1 – a runtime error** +```bash +... +[cpu_alert:alert4] 2017/10/24 14:42:59 E! error evaluating expression for level CRITICAL: left reference value "usage_idle" is missing value +[cpu_alert:alert4] 2017/10/24 14:42:59 E! error evaluating expression for level CRITICAL: left reference value "usage_idle" is missing value +... +``` +Example 1 shows a runtime error that is thrown because a field value has gone missing from the pipeline. This can often happen following an `eval` node when the property `keep()` of the `eval` node has not been set. In general Kapacitor cannot anticipate all the modalities of the data that the task will encounter at runtime. Some tasks may not be written to handle all deviations or exceptions from the norm, such as when fields or tags go missing. In these cases Kapacitor will log an error. + +# Basic examples + +**Example 2 – An elementary stream → from() pipeline** +```js +dbrp "telegraf"."autogen" + +stream + |from() + .measurement('cpu') + |httpOut('dump') +``` + +The simple script in Example 2 can be used to create a task with the default Telegraf database. + +``` +$ kapacitor define sf_task -tick sf.tick +``` + +The task, `sf_task`, will simply cache the latest cpu datapoint as JSON to the HTTP REST endpoint(e.g http://localhost:9092/kapacitor/v1/tasks/sf_task/dump). + +This example contains a database and retention policy statement: `dbrp`. + +This example also contains three nodes: + + * The base `stream` node. + * The requisite `from()` node, that defines the stream of data points. + * The processing node `httpOut()`, that caches the data it receives to the REST service of Kapacitor. + +It contains two edges. + + * `stream`→`from()`– sets the processing type of the task and the data stream. + * `from()`→`httpOut()`– passes the data stream to the HTTP output processing node. + +It contains one property method, which is the call on the `from()` node to `.measurement('cpu')` defining the measurement to be used for further processing. + +**Example 3 – An elementary batch → query() pipeline** + +```js +batch + |query('SELECT * FROM "telegraf"."autogen".cpu WHERE time > now() - 10s') + .period(10s) + .every(10s) + |httpOut('dump') +``` + +The script in Example 3 can be used to define a task with the default Telegraf database. + +``` +$ kapacitor define bq_task -tick bq.tick -dbrp "telegraf"."autogen" +``` + +When used to create the `bq_task` with the default Telegraf database, the TICKscript in Example 3 will simply cache the last cpu datapoint of the batch of measurements representing the last 10 seconds of activity to the HTTP REST endpoint(e.g. http://localhost:9092/kapacitor/v1/tasks/bq_task/dump). + +This example contains three nodes: + + * The base `batch` node. + * The requisite `query()` node, that defines the data set. + * The processing node `httpOut()`, that defines the one step in processing the data set. In this case it is to publish it to the REST service of Kapacitor. + +It contains two edges. + + * `batch`→`query()`– sets the processing style and data set. + * `query()`→`httpOut()`– passes the data set to the HTTP output processing node. + +It contains two property methods, which are called from the `query()` node. + + * `period()`– sets the period in time which the batch of data will cover. + * `every()`– sets the frequency at which the batch of data will be processed. + +### Where to next? + +For basic examples of working with TICKscript see the latest examples in the code base on [GitHub](https://github.com/influxdata/kapacitor/tree/master/examples). + +For TICKscript solutions for intermediate to advanced use cases, see the [Guides](/kapacitor/v1.5/guides/) documentation. + +The next section covers [TICKscript syntax](/kapacitor/v1.5/tick/syntax/) in more detail. [Continue...](/kapacitor/v1.5/tick/syntax/) diff --git a/content/kapacitor/v1.5/tick/syntax.md b/content/kapacitor/v1.5/tick/syntax.md new file mode 100644 index 000000000..454700f78 --- /dev/null +++ b/content/kapacitor/v1.5/tick/syntax.md @@ -0,0 +1,1111 @@ +--- +title: TICKscript syntax +aliases: + - /kapacitor/v1.5/tick/spec/ +menu: + kapacitor_1_5_ref: + name: Syntax + identifier: syntax + weight: 3 + parent: tick +--- + +# Table of Contents + + * [Concepts](#concepts) + * [TICKscript syntax](#tickscript-syntax) + * [Code representation](#code-representation) + * [Variables and literals](#variables-and-literals) + * [Statements](#statements) + * [Taxonomy of node types](#taxonomy-of-node-types) + * [InfluxQL in TICKscript](#influxql-in-tickscript) + * [Lambda expressions](#lambda-expressions) + * [Summary of variable use between syntax sub-spaces](#summary-of-variable-use-between-syntax-sub-spaces) + * [Gotchas](#gotchas) + +# Concepts + +The sections [Introduction](/kapacitor/v1.5/tick/introduction/) and [Getting Started](/kapacitor/v1.5/introduction/getting-started/) present the key concepts of **nodes** and **pipelines**. Nodes represent process invocation units, that either take data as a batch, or in a point by point stream, and then alter that data, store that data, or based on changes in that data trigger some other activity such as an alert. Pipelines are simply logically organized chains of nodes. + +In Kapacitor TICKscript is used to define tasks directly and to define template tasks, which act as templates that can be reused to generate new tasks. + +**Go** + +TICKscript syntax was inspired by many different languages. Among the most influential is Go. This can be seen, for example, in the variable declaration idiom, in string templates, in types such as `duration`, in functions used in lambda expressions, and its influence is also apparent elsewhere in the documentation. + +**Syntax sub-spaces** + +When working with TICKscript, a couple of syntax subspaces will be encountered that have caused confusion for some users. Overarching is the TICKscript syntax of the TICKscript file. This is primarily composed of variable declarations and of nodes chained together in pipelines. On creation the `query` node requires a string representing InfluxQL statements. So, InfluxQL represents the first syntax subspace that may be used. Other nodes and methods use Lambda expressions, which represents a second syntax sub-space that will be met. The syntax between these spaces, such as when accessing variable, tag and field values, can differ, and this can sometimes be a source of confusion. + +To summarize, the two syntax subspaces to be aware of in TICKscript are: + + * [InfluxQL](#influxql-in-tickscript) + * [Lambda expressions](#lambda-expressions) + +**Directed acyclic graphs (DAGs)** + +As mentioned in Getting Started, a pipeline is a Directed Acylic Graph (DAG). (For more information see [Wolfram](http://mathworld.wolfram.com/AcyclicDigraph.html) or [Wikipedia](https://en.wikipedia.org/wiki/Directed_acyclic_graph)). It contains a finite number of nodes (a.k.a. vertices) and edges. Each edge is directed from one node to another. No edge path can lead back to an earlier node in the path, which would result in a cycle or loop. TICKscript paths (a.k.a pipelines and chains) typically begin with a data source definition node with an edge to a data set definition node and then pass their results down to data manipulation and processing nodes. + +# TICKscript syntax + +TICKscript is case sensitive and uses Unicode. The TICKscript parser scans TICKscript code from top to bottom and left to right instantiating variables and nodes and then chaining or linking them together into pipelines as they are encountered. When loading a TICKscript the parser checks that a chaining method called on a node is valid. If an invalid chaining method is encountered, the parser will throw an error with the message "no method or property <identifier> on <node type>". + +## Code representation + +Source files should be encoded using **UTF-8**. A script is broken into **declarations** and **expressions**. Declarations result in the creation of a variable and occur on one line. Expressions can cover more than one line and result in the creation of an entire pipeline, a pipeline **chain** or a pipeline **branch**. + +**Whitespace** is used in declarations to separate variable names from operators and literal values. It is also used within expressions to create indentations, which indicate the hierarchy of method calls. This also helps to make the script more readable. Otherwise, whitespace is ignored. + +**Comments** can be created on a single line by using a pair of forward slashes "//" before the text. Comment forward slashes can be preceded by whitespace and need not be the first characters of a newline. + +### Keywords + +Keywords are tokens that have special meaning within a language and therefore cannot be used as identifiers for functions or variables. TICKscript is compact and contains only a small set of keywords. + +**Table 1 – Keywords** + +| **Word** | **Usage** | +| :----|:------| +| **TRUE** | The literal Boolean value "true". | +| **FALSE** | The literal Boolean value "false". | +| **AND** | Standard Boolean conjunction operator. | +| **OR** | Standard Boolean disjunction operator. | +| **lambda:** | Flags that what follows is to be interpreted as a lambda expression. | +| **var** | Starts a variable declaration. | +| **dbrp** | Starts a database declaration | + + +Since the set of native node types available in TICKscript is limited, each node type, such as `batch` or `stream`, could be considered key. Node types and their taxonomy are discussed in detail in the section [Taxonomy of node types](#taxonomy-of-node-types) below. + +### Operators + +TICKscript has support for traditional mathematical operators as well as a few which make sense in its data processing domain. + +**Table 2 – Standard operators** + +| **Operator** | **Usage** | **Examples** | +|:-------------|:----------|:-------------| +| **+** | Addition and string concatenation | `3 + 6`, `total + count` and `'foo' + 'bar'` | +| **-** | Subtraction | `10 - 1`, `total - errs` | +| **\*** | Multiplication | `3 * 6`, `ratio * 100.0` | +| **/** | Division | `36 / 4`, `errs / total` | +| **==** | Comparison of equality | `1 == 1`, `date == today` | +| **!=** | Comparison of inequality | `result != 0`, `id != "testbed"` | +| **<** | Comparison less than | `4 < 5`, `timestamp < today` | +| **<=** | Comparison less than or equal to | `3 <= 6`, `flow <= mean` | +| **>** | Comparison greater than | `6 > 3.0`, `delta > sigma` | +| **>=** | Comparison greater than or equal to | `9.0 >= 8.1`, `quantity >= threshold` | +| **=~** | Regular expression match. Right value must be a regular expression
or a variable holding such an expression. | `tag =~ /^cz\d+/` | +| **!~** | Regular expression not match. Right value must be a regular expression
or a variable holding such an expression. | `tag !~ /^sn\d+/` | +| **!** | Logical not | `!TRUE`, `!(cpu_idle > 70)` | +| **AND** | Logical conjunction | `rate < 20.0 AND rate >= 10` | +| **OR** | Logical disjunction | `status > warn OR delta > sigma` | + +Standard operators are used in TICKscript and in Lambda expressions. + +**Table 3 – Chaining operators** + +| **Operator** | **Usage** | **Examples** | +|:-------------|:----------|:------------| +| **\|** | Declares a chaining method call which creates an instance of a new node and chains it to the node above it. | `stream`
   \|`from()` | +| **.** | Declares a property method call, setting or changing an internal property in the node to which it belongs. | `from()`
   `.database(mydb)` | +| **@** | Declares a user defined function (UDF) call. Essentially a chaining method that adds a new UDF node to the pipeline. | `from()`
`...`
`@MyFunc()` | + +Chaining operators are used within expressions to define pipelines or pipeline segments. + +## Variables and literals + +Variables in TICKscript are useful for storing and reusing values and for providing a friendly mnemonic for quickly understanding what a variable represents. They are typically declared along with the assignment of a literal value. In a TICKscript intended to be used as a [template task](/kapacitor/v1.5/guides/template_tasks/) they can also be declared with simply a type identifier. + +### Variables + +Variables are declared using the keyword `var` at the start of a declaration. +Variables are immutable and cannot be reassigned new values later on in the script, though they can be used in other declarations and can be passed into methods. +Variables are also used in template tasks as placeholders to be filled when the template is used to create a new task. + +For a detailed presentation on working with **template tasks** see the guide [Template tasks](/kapacitor/v1.5/guides/template_tasks/). +If a TICKscript proves useful, it may be desirable to reuse it as a template task in order to quickly create other similar tasks. For this reason it is recommended to use variables as much as possible. + +#### Naming variables + +Variable identifiers must begin with a standard ASCII letter and can be followed by any number of letters, digits and underscores. Both upper and lower case can be used. In a TICKscript to be used to define a task directly, the type the variable holds depends on the literal value it is assigned. In a TICKscript written for a task template, the type can also be set using the keyword for the type the variable will hold. In a TICKscript to be used to define a task directly, using the type identifier will result in a compile time error `invalid TICKscript: missing value for var "".`. + +**Example 1 – variable declarations for a task** +```js +var my_var = 'foo' +var MY_VAR = 'BAR' +var my_float = 2.71 +var my_int = 1 +var my_node = stream +``` +Variable declarations in templates do not require a literal assignment, as is shown in Example 2 below. + +**Example 2 – variable declarations in a task template** +```js +var measurement string +var frame duration +var warn = float +var period = 12h +var critical = 3.0 +``` + +### Literal values + +Literal values are parsed into instances of the types available in TICKscript. They can be declared directly in method arguments or can be assigned to variables. The parser interprets types based on context and creates instances of the following primitives: Boolean, string, float, integer. Regular expressions, lists, lambda expressions, duration structures and nodes are also recognized. The rules the parser uses to recognize a type are discussed in the following Types section. + +#### Types + +TICKscript recognizes five type identifiers. These identifiers can be used directly in TICKscripts intended for template tasks. Otherwise, the type of the literal will be interpreted from its declaration. + +**Table 4 – Type identifiers** + +| **Identifier** | **Usage** | +|:---------------|:----------| +| **string** | In a template task, declare a variable as type `string`. | `var my_string string` | +| **duration** | In a template task, declare a variable as type `duration` . | `var my_period duration` | +| **int** | In a template task, declare a variable as type `int64`. | `var my_count int` | +| **float** | In a template task, declare a variable as type `float64`. | `var my_ratio float` | +| **lambda** | In a template task, declare a variable as a Lambda expression type. | `var crit lambda` | + +##### Booleans +Boolean values are generated using the Boolean keywords: `TRUE` and `FALSE`. Note that these keywords use all upper case letters. The parser will throw an error when using lower case characters, e.g. `True` or `true`. + +**Example 3 – Boolean literals** +```js +var true_bool = TRUE +... + |flatten() + .on('host','port') + .dropOriginalFieldName(FALSE) +... +``` + +In Example 3 above the first line shows a simple assignment using a Boolean literal. The second example shows using the Boolean literal `FALSE` in a method call. + +##### Numerical types + +Any literal token containing only digits and optionally a decimal will lead to the generation of an instance of a numerical type. TICKscript understands two numerical types based on Go: `int64` and `float64`. Any numerical token containing a decimal point will result in the creation of a `float64` value. Any numerical token that ends without containing a decimal point will result in the creation of an `int64` value. If an integer is prefixed with the zero character, `0`, it is interpreted as an octal. + +**Example 4 – Numerical literals** +```js +var my_int = 6 +var my_float = 2.71828 +var my_octal = 0400 +... +``` +In Example 4 above `my_int` is of type `int64`, `my_float` is of type `float64` and `my_octal` is of type `int64` octal. + +##### Duration literals + +Duration literals define a span of time. Their syntax follows the same syntax present in [InfluxQL](/influxdb/v1.4/query_language/spec/#literals). A duration literal is comprised of two parts: an integer and a duration unit. It is essentially an integer terminated by one or a pair of reserved characters, which represent a unit of time. + +The following table presents the time units used in declaring duration types. + +**Table 5 – Duration literal units** + +**Unit** | **Meaning** +-------|----------------------------------------- +u or µ | microseconds (1 millionth of a second) +ms | milliseconds (1 thousandth of a second) +s | second +m | minute +h | hour +d | day +w | week + +**Example 5 – Duration expressions** +```js +var span = 10s +var frequency = 10s +... +var views = batch + |query('SELECT sum(value) FROM "pages"."default".views') + .period(1h) + .every(1h) + .groupBy(time(1m), *) + .fill(0) +``` + +In Example 5 above the first two lines show the declaration of Duration types. The first represents a time span of 10 seconds and the second a time frame of 10 seconds. The final example shows declaring duration literals directly in method calls. + + +##### Strings +Strings begin with either one or three single quotation marks: `'` or `'''`. Strings can be concatenated using the addition `+` operator. To escape quotation marks within a string delimited by a single quotation mark use the backslash character. If it is to be anticipated that many single quotation marks will be encountered inside the string, delimit it using triple single quotation marks instead. A string delimited by triple quotation marks requires no escape sequences. In both string demarcation cases, the double quotation mark, which is used to access field and tag values, can be used without an escape. + +**Example 6 – Basic strings** + +```js +var region1 = 'EMEA' +var old_standby = 'foo' + 'bar' +var query1 = 'SELECT 100 - mean(usage_idle) AS stat FROM "telegraf"."autogen"."cpu" WHERE cpu = \'cpu-total\' ' +var query2 = '''SELECT 100 - mean(usage_idle) AS stat FROM "telegraf"."autogen"."cpu" WHERE cpu = 'cpu-total' ''' +... +batch + |query('''SELECT 100 - mean(usage_idle) AS stat FROM "telegraf"."autogen"."cpu" WHERE cpu = 'cpu-total' ''') +... +``` +In Example 6 above the first line shows a simple string assignment using a string literal. The second line uses the concatenation operator. Lines three and four show two different approaches to declaring complex string literals with and without internally escaped single quotation marks. The final example shows using a string literal directly in a method call. + +To make long complex strings more readable newlines are permitted within the string. + +**Example 7 – Multiline string** +```js +batch + |query('SELECT 100 - mean(usage_idle) + AS stat + FROM "telegraf"."autogen"."cpu" + WHERE cpu = \'cpu-total\' + ') +``` +In Example 7 above the string is broken up to make the query more easily understood. + +##### String templates + +String templates allow node properties, tags and fields to be added to a string. The format follows the same format provided by the Go [text.template](https://golang.org/pkg/text/template/) package. This is useful when writing alert messages. To add a property, tag or field value to a string template, it needs to be wrapped inside of double curly braces: "{{}}". + +**Example 8 – Variables inside of string templates** +```js +|alert() + .id('{{ index .Tags "host"}}/mem_used') + .message('{{ .ID }}:{{ index .Fields "stat" }}') +``` +In Example 8 three values are added to two string templates. In the call to the setter `id()` the value of the tag `"host"` is added to the start of the string. The call to the setter `message()` then adds the `id` and then the value of the field `"stat"`. + +String templates are currently applicable with the [Alert](/kapacitor/v1.5/nodes/alert_node/) node and are discussed further in the section [Accessing values in string templates](#accessing-values-in-string-templates) below. + +String templates can also include flow statements such as `if...else` as well as calls to internal formating methods. + +``` +.message('{{ .ID }} is {{ if eq .Level "OK" }}alive{{ else }}dead{{ end }}: {{ index .Fields "emitted" | printf "%0.3f" }} points/10s.') +``` +##### String lists + +A string list is a collection of strings declared between two brackets. They can be declared with literals, identifiers for other variables, or with the asterisk wild card, "\*". They can be passed into methods that take multiple string parameters. They are especially useful in template tasks. Note that when used in function calls, list contents get exploded and the elements are used as all the arguments to the function. When a list is given, it is understood that the list contains all the arguments to the function. + +**Example 9 – String lists in a standard task** +```js +var foo = 'foo' +var bar = 'bar' +var foobar_list = [foo, bar] +var cpu_groups = [ 'host', 'cpu' ] +... +stream + |from() + .measurement('cpu') + .groupBy(cpu_groups) +... +``` +Example 9 declares two string lists. The first contains identifiers for other variables. The second contains string literals. The list `cpu_groups` is used in the method `from.groupBy()`. + +**Example 10 – String list in a template task** +```js +dbrp "telegaf"."not_autogen" + +var measurement string +var where_filter = lambda: TRUE +var groups = [*] +var field string +var warn lambda +var crit lambda +var window = 5m +var slack_channel = '#alerts' + +stream + |from() + .measurement(measurement) + .where(where_filter) + .groupBy(groups) + |window() + .period(window) + .every(window) + |mean(field) + |alert() + .warn(warn) + .crit(crit) + .slack() + .channel(slack_channel) +``` + +Example 10, taken from the examples in the [code repository](https://github.com/influxdata/kapacitor/blob/1de435db363fa8ece4b50e26d618fc225b38c70f/examples/load/templates/implicit_template.tick), defines `implicit_template.tick`. It uses the `groups` list to hold a variable arguments to be passed to the `from.groupBy()` method. The contents of the `groups` list will be determined when the template is used to create a new task. + +##### Regular expressions + +Regular expressions begin and end with a forward slash: `/`. The regular expression syntax is the same as Perl, Python and other languages. For details on the syntax see the Go [regular expression library](https://golang.org/pkg/regexp/syntax/). + +**Example 11 – Regular expressions** +```js +var cz_turbines = /^cz\d+/ +var adr_senegal = /\.sn$/ +var local_ips = /^192\.168\..*/ +... +var locals = stream + |from() + .measurement('responses') + .where(lambda: "node" =~ local_ips ) + +var south_afr = stream + |from() + .measurement('responses') + .where(lambda: "dns_node" =~ /\.za$/ ) +``` +In Example 11 the first three lines show the assignment of regular expressions to variables. The `locals` stream uses the regular expression assigned to the variable `local_ips`. The `south_afr` stream uses a regular expression comparison with the regular expression declared literally as a part of the lambda expression. + +##### Lambda expressions as literals + +A lambda expression is a parameter representing a short easily understood function to be passed into a method call or held in a variable. It can wrap a Boolean expression, a mathematical expression, a call to an internal function or a combination of these three. Lambda expressions always operate on point data. They are generally compact and as such are used as literals, which eventually get passed into node methods. Internal functions that can be used in Lambda expressions are discussed in the sections [Type conversion](#type-conversion) and [Lambda expressions](#lambda-expressions) below. Lambda expressions are presented in detail in the topic [Lambda Expressions](/kapacitor/v1.5/tick/expr/). + +Lambda expressions begin with the token `lambda` followed by a colon, ':' – `lambda:`. + +**Example 12 – Lambda expressions** +```js +var my_lambda = lambda: 1 > 0 +var lazy_lambda = lambda: "usage_idle" < 95 +... +var data = stream + |from() +... +var alert = data + |eval(lambda: sigma("stat")) + .as('sigma') + .keep() + |alert() + .id('{{ index .Tags "host"}}/cpu_used') + .message('{{ .ID }}:{{ index .Fields "stat" }}') + .info(lambda: "stat" > 70 OR "sigma" > 2.5) + .warn(lambda: "stat" > 80 OR "sigma" > 3.0) + .crit(lambda: "stat" > 90 OR "sigma" > 3.5) + +``` +Example 12 above shows that a lambda expression can be directly assigned to a variable. In the eval node a lambda statement is used which calls the sigma function. The alert node uses lambda expressions to define the log levels of given events. + + +##### Nodes + +Like the simpler types, node types are declared and can be assigned to variables. + +**Example 13 – Node expressions** +```js +var data = stream + |from() + .database('telegraf') + .retentionPolicy('autogen') + .measurement('cpu') + .groupBy('host') + .where(lambda: "cpu" == 'cpu-total') + |eval(lambda: 100.0 - "usage_idle") + .as('used') + |window() + .period(span) + .every(frequency) + |mean('used') + .as('stat') +... +var alert = data + |eval(lambda: sigma("stat")) + .as('sigma') + .keep() + |alert() + .id('{{ index .Tags "host"}}/cpu_used') +... +``` +In Example 13 above, in the first section, five nodes are created. The top level node `stream` is assigned to the variable `data`. The `stream` node is then used as the root of the pipeline to which the nodes `from`, `eval`, `window` and `mean` are chained in order. In the second section the pipeline is then extended using assignment to the variable `alert`, so that a second `eval` node can be applied to the data. + +#### Working with tags, fields and variables + +In any script it is not enough to simply declare variables. The values they hold must also be accessed. In TICKscript it is also necessary to work with values held in tags and fields drawn from an InfluxDB data series. This is most evident in the examples presented so far. In addition values generated by lambda expressions can be added as new fields to the data set in the pipeline and then accessed as named results of those expressions. The following section explores working not only with variables but also with tag and field values, that can be extracted from the data, as well as with named results. + +##### Accessing values + +Accessing data tags and fields, using string literals and accessing TICKscript variables each involves a different syntax. Additionally it is possible to access the results of lambda expressions used with certain nodes. + + * **Variables** – To access a _TICKscript variable_ simply use its identifier. + + **Example 14 – Variable access** + ```js + var db = 'website' + ... + var data = stream + |from() + .database(db) + ... + ``` + In Example 14 the variable `db` is assigned the literal value `'website'`. This is then used in the setter `.database()` under the chaining method `from()`. + + * **String literals** – To declare a _string literal_ use single quotation marks as discussed in the section [Strings](#strings) above. + * **Tag and Field values** – To access a _tag value_ or a _field value_ in a Lambda expression use double quotes. To refer to them in method calls use single quotes. In method calls these are in essence string literals to be used by a node in matching tag or field values in the data series. + + **Example 15 – Field access** + ```js + // Data frame + var data = stream + |from() + .database('telegraf') + .retentionPolicy('autogen') + .measurement('cpu') + .groupBy('host') + .where(lambda: "cpu" == 'cpu-total') + |eval(lambda: 100.0 - "usage_idle") + .as('used') + ... + ``` + In Example 15 two values from the data frame are accessed. In the `where()` method call, the lambda expression uses the tag `"cpu"` to filter the data frame down to only datapoints whose "cpu" tag equals the literal value of `'cpu-total'`. The chaining method `eval()` also takes a lambda expression that accesses the field `"usage-idle"` to calculate cpu processing power 'used'. Note that the `groupBy()` method uses a string literal `'host'` to be matched to a tag name in the data series. It will then group the data by this tag. + + * **Named lambda expression results** – Lambda expression results get named and added as fields to the data set using an `as()` method. Think of the `as()` method functioning just like the 'AS' keyword in InfluxQL. See the `eval()` method in Example 15 above. The results of lambda expressions can be accessed in other Lambda expressions with double quotation marks, and in method calls with single quotes, just like data tags and fields. + + **Example 16 – Named lambda expression access** + + ```js + ... + |window() + .period(period) + .every(every) + |mean('used') + .as('stat') + + // Thresholds + var alert = data + |eval(lambda: sigma("stat")) + .as('sigma') + .keep() + |alert() + .id('{{ index .Tags "host"}}/cpu_used') + .message('{{ .ID }}:{{ index .Fields "stat" }}') + .info(lambda: "stat" > info OR "sigma" > infoSig) + .warn(lambda: "stat" > warn OR "sigma" > warnSig) + .crit(lambda: "stat" > crit OR "sigma" > critSig) + ``` + Example 16 above continues the pipeline from Example 15. In Example 15, the results of the lambda expression named as `'used'` under the `eval()` method are then accessed in Example 16 as an argument to the method `'mean()'`, which then names its result _as_ `'stat'`. A new statement then begins. This contains a new call to the method `'eval()'`, which has a lambda expression that accesses `"stat"` and sets its result _as_ `'sigma'`. The named result `"stat"` is also accessed in the `message()` method and the threshold methods (`info()`,`warn()`,`crit()`) under the `alert()` chaining method. The named result `"sigma"` is also used in the lambda expressions of these methods. + + **Note – InfluxQL nodes and tag or field access** – [InfluxQL nodes](/kapacitor/v1.5/nodes/influx_q_l_node/), such as `mean()` in Example 16, are special nodes that wrap InfluxQL functions. See the section [Taxonomy of node types](#taxonomy-of-node-types) below. When accessing field values, tag values or named results with this node type single quotes are used. + + **Example 17 – Field access with an InfluxQL node** + ```js + // Dataframe +var data = stream + |from() + .database('telegraf') + .retentionPolicy('autogen') + .measurement('cpu') + .groupBy('host') + .where(lambda: "cpu" == 'cpu-total') + |eval(lambda: 100.0 - "usage_idle") + .as('used') + |window() + .period(period) + .every(every) + |mean('used') + .as('stat') + ``` + In Example 17 above the `eval` result gets named as `used`. The chaining method `mean` is an alias of the node type InfluxQL. It wraps the InfluxQL `mean` function. In the call to mean the named result `'used'` is accessed using only single quotes. + + +##### Accessing values in string templates + +As mentioned in the section [String templates](#string-templates) it is possible to add values from node specific properties, and from tags and fields to output strings. This can be seen under the `alert` node in Example 16. The accessor expression is wrapped in two curly braces. To access a property, a period `.` is used before the identifier. To access a value from tags or fields the token 'index' is used, followed by a space and a period and then the part of the data series to be accessed (e.g. `.Tags` or `.Fields`); the actual name is then specified in double quotes. + +**Example 18 – accessing values in string templates** + +```js +|alert() + .id('{{ index .Tags "host"}}/mem_used') + .message('{{ .ID }}:{{ index .Fields "stat" }}') +``` +In Example 18 above, the property method `.id()` uses the value of the tag in the data stream with the key `"host"` to set the part of the value of the id. This value is then used in the property method `message()` as `.ID`. This property method also access the value from the named result `"stat"`. + +For more specific information, see [Alert node](/kapacitor/v1.5/nodes/alert_node/). + +##### Type conversion + +Within lambda expressions it is possible to use stateless conversion functions to convert values between types. + + * `bool()` - converts a string, int64 or float64 to Boolean. + * `int()` - converts a string, float64, Boolean or duration type to an int64. + * `float()` - converts a string, int64 or Boolean to float64. + * `string()` - converts an int64, float64, Boolean or duration value to a string. + * `duration()` - converts an int64, float64 or string to a duration type. + +**Example 19 – Type conversion** + +```js + |eval(lambda: float("total_error_responses")/float("total_responses") * 100.0) +``` + +In Example 19 above, the `float` conversion function is used to ensure that the calculated percentage uses floating point precision when the field values in the data series may have been stored as integers. + +##### Numerical precision + + + +When writing floating point values in messages, or to InfluxDB, it might be helpful to specify the decimal precision in order to make the values more readable or better comparable. For example, in the `messsage()` method of an `alert` node it is possible to "pipe" a value to a `printf` statement. + +```js +|alert() + .id('{{ index .Tags "host"}}/mem_used') + .message('{{ .ID }}:{{ index .Fields "stat" | printf "%0.2f" }}') +``` + +When working with floating point values in lambda expressions, it is also possible to use the floor function and powers of ten to round to a less precise value. Note that using `printf` in a string template is much faster. Note as well that since values are written as 64bit, this has no effect on storage. If this were to be used with the `InfluxDBOut` node, for example when downsizing data, it could lead to a needless loss of information. + +**Example 20 – Rendering floating points less precise** +```js +stream + // Select just the cpu measurement from our example database. + |from() + .measurement('cpu') + |eval(lambda: floor("usage_idle" * 1000.0)/1000.0) + .as('thousandths') + .keep('usage_user','usage_idle','thousandths') + |alert() + .crit(lambda: "thousandths" < 95.000) + .message('{{ index .Fields "thousandths" }}') + // Whenever we get an alert write it to a file. + .log('/tmp/alerts.log') +``` +Example 20 accomplishes something similar to using `printf`. The `usage_idle` value is rounded down to thousandths of a percent and then used for comparison in the threshold method of the alert node. It is then written into the alert message. + +##### Time precision + + + +As Kapacitor and TICKscripts can be used to write values into an InfluxDB database, it may be desirable, in some cases, to specify the time precision to be used. One example occurs when downsizing data using the calculated mean. The precision to be written could be set to a value coarser than the default up to and even surpassing the bucket size, i.e. the value set by a call to a method like `window.every()`. Using a precision larger than the bucket size is not recommended. Specifying time precision can bring storage and performance improvements. The most common example occurs when working with the `InfluxDBOut` node, whose precision property can be set. Note that the `InfluxDBOut` node defaults to the most precise precision, which is nanoseconds. It is important not to confuse _mathematical_ precision, which is used most commonly with field values, and _time_ precision, which is specified for timestamps. + +**Example 21 – Setting time precision with InfluxDBOut** +```js + +stream + |from() + .database('telegraf') + .measurement('cpu') + .groupBy(*) + |window() + .period(5m) + .every(5m) + .align() + |mean('usage_idle') + .as('usage_idle') + |influxDBOut() + .database('telegraf') + .retentionPolicy('autogen') + .measurement('mean_cpu_idle') + .precision('s') +... +``` +In Example 21, taken from the guide topic [Continuous queries](/kapacitor/v1.5/guides/continuous_queries/), the time precision of the series to be written to the database "telegraf" as measurement `mean_cpu_idle` is set to the unit seconds. + +Valid values for precision are the same as those used in InfluxDB. + +**Table 6 – Precision units** + +|String|Unit| +|:-----|----| +| "ns" | nanoseconds | +| "ms" | milliseconds | +| "s" | seconds | +| "m" | minutes | +| "h" | hours | + +## Statements + +There are two types of statements in TICKscript: declarations and expressions. A declaration can declare either a variable or a database, with which the TICKscript will work. Expressions express a pipeline (a.k.a chain) of method calls, which create processing nodes and set their properties. + +### Declarations + +TICKscript works with two types of declarations: database declarations and variable declarations. + +A **database declaration** begins with the keyword `dbrp` and is followed by two strings separated by a period. The first string declares the default database, with which the script will be used. The second string declares its retention policy. Note that the database and retention policy can also be declared using the flag `-dbrp` when defining the task with the command `kapacitor define` on the command-line, so this statement is optional. When used, the Database declaration statement should be the first declaration of a TICKscript. + +**Example 22 – A database declaration** +``` +dbrp "telegraf"."autogen" +... +``` +Example 22 declares that the TICKscript is to be used against the database `telegraf` with its retention policy `autogen`. + +A **variable declaration** begins with the `var` keyword followed by an identifier for the variable being declared. An assignment operator follows with a literal right side value, which will set the type and value for the new variable. + +**Example 23 – Typical declarations** +```js +... +var db = 'website' +var rp = 'autogen' +var measurement = 'responses' +var whereFilter = lambda: ("lb" == '17.99.99.71') +var name = 'test rule' +var idVar = name + ':{{.Group}}' +... +``` +Example 23 shows six declaration statements. Five of them create variables holding strings and one a lambda expression. + +A declaration can also be used to assign an expression to a variable. + +**Example 24 – Declaring an expression to a variable** +```js +var data = stream + |from() + .database(db) + .retentionPolicy(rp) +``` +In Example 24, the `data` variable holds the stream pipeline declared in the expression beginning with the node `stream`. + +### Expressions + +An expression begins with a node identifier or with a variable identifier holding another expression. It then chains together additional node creation methods (chaining methods), property setters (property methods) or user defined functions (UDF). The pipe operator "|" indicates the start of a chaining method call, returning a new node into the chain. The dot operator "." adds a property setter. The at operator "@" introduces a user defined function. + +Expressions can be written all on a single line, but this can lead to readability issues. The command `kapacitor show ` will show the TICKscript as part of its console output. This command pretty prints or uses newlines and indentation regardless of how the defining TICKscript was written. Adding a new line and indenting new method calls is the recommended practice for writing TICKscript expressions. Typically, when a new chaining method is introduced in an expression, a newline is created and the new link in the chain gets indented three or more spaces. Likewise, when a new property setter is called, it is set out on a new line and indented an additional number of spaces. For readability user defined functions should be indented the same as chaining methods. + +An expression ends with the last setter of the last node in the pipeline. + +**Example 25 – Single line expressions** +```js +... +// Dataframe +var data = batch|query('''SELECT mean(used_percent) AS stat FROM "telegraf"."autogen"."mem" ''').period(period).every(every).groupBy('host') + +// Thresholds +var alert = data|eval(lambda: sigma("stat")).as('sigma').keep()|alert().id('{{ index .Tags "host"}}/mem_used').message('{{ .ID }}:{{ index .Fields "stat" }}') + .info(lambda: "stat" > info OR "sigma" > infoSig).warn(lambda: "stat" > warn OR "sigma" > warnSig).crit(lambda: "stat" > crit OR "sigma" > critSig) +... +``` +Example 25 shows an expression with a number of nodes and setters declared all on the same line. While this is possible, it is not the recommended style. Note also that the command line utility `tickfmt`, that comes with the Kapacitor distribution, can be used to reformat a TICKscript to follow the recommended style. + +**Example 26 – Recommended expression syntax** +```js +... +// Dataframe +var data = batch + |query('''SELECT mean(used_percent) AS stat FROM "telegraf"."autogen"."mem" ''') + .period(period) + .every(every) + .groupBy('host') + +// Thresholds +var alert = data + |eval(lambda: sigma("stat")) + .as('sigma') + .keep() + |alert() + .id('{{ index .Tags "host"}}/mem_used') + .message('{{ .ID }}:{{ index .Fields "stat" }}') + .info(lambda: "stat" > info OR "sigma" > infoSig) + .warn(lambda: "stat" > warn OR "sigma" > warnSig) + .crit(lambda: "stat" > crit OR "sigma" > critSig) + +// Alert +alert + .log('/tmp/mem_alert_log.txt') +... +``` +Example 26, taken from the example [mem_alert_batch.tick](https://github.com/influxdata/kapacitor/blob/03267847561b6261798407e62e5245bc54a7cf0c/examples/telegraf/mem/mem_alert_batch.tick) in the code base, shows the recommended style for writing expressions. This example contains three expression statements. The first begins with the declaration of the batch node for the data frame. This gets assigned to the variable `data`. The second expression takes the `data` variable and defines thresholds for warning messages. This gets assigned to the `alert` variable. The third expression sets the `log` property of the `alert` node. + +### Node creation + +With two exceptions (`stream` and `batch`), nodes always occur in pipeline expressions (chains), where they are created through chaining methods. Chaining methods are generally identified using the node type name. One notable exception to this is the InfluxQL node, which uses aliases. See the section [Taxonomy of node types](#taxonomy-of-node-types) below. + +For each node type, the method that creates an instance of that type uses the same signature. So if a `query` node creates an `eval` node and adds it to the chain, and if a `from` node can also create an `eval` node and add it to the chain, the chaining method creating a new `eval` node will accept the same arguments (e.g. one or more lambda expressions) regardless of which node created it. + +**Example 27 – Instantiate eval node in stream** +```js +... +var data = stream + |from() + .database('telegraf') + .retentionPolicy('autogen') + .measurement('cpu') + .groupBy('host') + .where(lambda: "cpu" == 'cpu-total') + |eval(lambda: 100.0 - "usage_idle") + .as('used') + .keep() + ... +``` +Example 27 creates three nodes: `stream`, `from` and `eval`. + +**Example 28 – Instantiate eval node in batch** +```js +... +var data = batch + |query('''SELECT 100 - mean(usage_idle) AS stat FROM "telegraf"."autogen"."cpu" WHERE cpu = 'cpu-total' ''') + .period(period) + .every(every) + .groupBy('host') + |eval(lambda: sigma("stat")) + .as('sigma') + .keep() + ... +``` +Example 28 also creates three nodes: `batch`,`query` and `eval`. + +Both Examples 27 and 28 create an `eval` node. Despite that `eval` is chained below a `from` node in Example 27 and below a `query` node in Example 28, the signature of the chaining method remains the same. + +A short taxonomy of nodes is presented in the section [Taxonomy of node types](#taxonomy-of-node-types) below. The catalog of node types is available under the topic [TICKscript nodes](/kapacitor/v1.5/nodes/). + +### Pipelines + +To reiterate, a pipeline is a logically ordered chain of nodes defined by one or more expressions. "Logically ordered" means that nodes cannot be chained in any random sequence, but occur in the pipeline according to their role in processing the data. A pipeline can begin with one of two mode definition nodes: `batch` or `stream`. The data frame for a `batch` pipeline is defined in a `query` definition node. The data stream for a `stream` pipeline is defined in a `from` definition node. After the definition nodes any other types of nodes may follow. + +Standard node types get added to the pipeline with a chaining method indicated by the pipe "|" character. User defined functions can be added to the pipeline using the at "@" character. + +Each node in the pipeline has internal properties that can be set using property methods delineated using a period ".". These methods get called before the node processes the data. + +Each node in the pipeline can alter the data passed along to the nodes that follow: filtering it, restructuring it, reducing it to a new measurement and more. In some nodes, setting a property can significantly alter the data received by downstream siblings. For example, with an `eval` node, setting the names of lambda functions with the `as` property effectively blocks field and tag names from being passed downstream. For this reason it might be important to set the `keep` property, in order to keep them in the pipeline if they will be needed by a later node. + +It is important to become familiar with the [reference documentation](/kapacitor/v1.5/nodes/) for each node type before using it in a TICKscript. + + +**Example 29 – a typical pipeline** +```js +// Dataframe +var data = batch + |query('''SELECT 100 - mean(usage_idle) AS stat FROM "telegraf"."autogen"."cpu" WHERE cpu = 'cpu-total' ''') + .period(period) + .every(every) + .groupBy('host') + +// Thresholds +var alert = data + |eval(lambda: sigma("stat")) + .as('sigma') + .keep() + |alert() + .id('{{ index .Tags "host"}}/cpu_used') + .message('{{ .ID }}:{{ index .Fields "stat" }}') + .info(lambda: "stat" > info OR "sigma" > infoSig) + .warn(lambda: "stat" > warn OR "sigma" > warnSig) + .crit(lambda: "stat" > crit OR "sigma" > critSig) + +// Alert +alert + .log('/tmp/cpu_alert_log.txt') +``` +Example 29 shows a `batch`→`query` pipeline broken into three expressions using two variables. The first expression declares the data frame, the second expression the alert thresholds and the final expression sets the `log` property of the `alert` node. The entire pipeline begins with the declaration of the `batch` node and ends with the call to the property method `log()`. + +# Taxonomy of node types + +To aid in understanding the roles that different nodes play in a pipeline, a short taxonomy has been defined. For complete documentation on each node type see the topic [TICKscript Nodes](/kapacitor/v1.5/nodes/). + +**Special nodes** + +These nodes are special because they can be created and returned using identifiers other than their type names. An alias representing an aspect of their functionality can be used. This may apply in all instances, as with the InfluxQL node, or only in one, as with the Alert node. + + * [`alert`](/kapacitor/v1.5/nodes/alert_node/) - can be returned as a `deadman` switch + * [`influxQL`](/kapacitor/v1.5/nodes/influx_q_l_node/) - directly calls functions in InfluxQL, so can be returned when a TICKScript chaining method using the name of the InfluxQL method is called. + * example 1: `from()|mean()` - calls the mean function on a data stream defined in the from node and returns an InfluxQL node. + * example 2: `query()|mode()` - calls the mode function on the data frame defined in the Query node and returns an InfluxQL node. + +**Data source definition nodes** + +The first node in a TICKscript pipeline is either `batch` or `stream`. They define the _data source_ used in processing the data. + + * [`batch`](/kapacitor/v1.5/nodes/batch_node/) - chaining method call syntax is not used in the declaration. + * [`stream`](/kapacitor/v1.5/nodes/stream_node/) - chaining method call syntax is not used in the declaration. + +**Data definition nodes** + +Mode definition nodes are typically followed by nodes whose purpose is to define a _frame_ or _stream_ of data to be processed by other nodes. + + * [`from`](/kapacitor/v1.5/nodes/from_node/) - has an empty chaining method. Can follow only a `stream` node. Configure using property methods. + * [`query`](/kapacitor/v1.5/nodes/query_node/) - chaining method takes a query string. Can follow only a `batch` node. + +**Data manipulation nodes** + +Values within the data set can be altered or generated using manipulation nodes. + + * [`default`](/kapacitor/v1.5/nodes/default_node/) - has an empty chaining method. Its `field` and `tag` properties can be used to set default values for fields and tags in the data series. + * [`sample`](/kapacitor/v1.5/nodes/sample_node/) - chaining method takes an int64 or a duration string. It extracts a sample of data based on the count or the time period. + * [`shift`](/kapacitor/v1.5/nodes/shift_node/) - chaining method takes a duration string. It shifts datapoint time stamps. The duration string can be proceeded by a minus sign to shift the stamps backward in time. + * [`where`](/kapacitor/v1.5/nodes/where_node/) - chaining method takes a lambda node. It works with a `stream` pipeline like the `WHERE` statement in InfluxQL. + * [`window`](/kapacitor/v1.5/nodes/window_node/) - has an empty chaining method. It is configured using property methods. It works in a `stream` pipeline usually after the `from` node to cache data within a moving time range. + +**Processing nodes** + +Once the data set has been defined it can be passed to other nodes, which will process it, will transform it or will trigger other processes based on changes within. + +* Nodes for changing the structure of the data or for mixing together pipelines: + * [`combine`](/kapacitor/v1.5/nodes/combine_node/) - chaining method takes a list of one or more lambda expression. It can combine the data from a single node with itself. + * [`eval`](/kapacitor/v1.5/nodes/eval_node/) - chaining method takes a list of one or more lambda expressions. It evaluates expressions on each datapoint it receives and, using its `as` property, makes the results available to nodes that follow in the pipeline. Note that when multiple lambda expressions are used, the `as` method can contain a list of strings to name the results of each lambda. + * [`groupBy`](/kapacitor/v1.5/nodes/group_by_node/) - chaining method takes a list of one or more strings representing the tags of the series. It groups incoming data by tags. + * [`join`](/kapacitor/v1.5/nodes/join_node/) - chaining method takes a list of one or more variables referencing pipeline expressions. It joins data from any number of pipelines based on matching time stamps. + * [`union`](/kapacitor/v1.5/nodes/union_node/) - chaining method takes a list of one or more variables referencing pipeline expressions. It creates a union of any number of pipelines. + +* Nodes for transforming or processing the datapoints within the data set: + * [`delete`](/kapacitor/v1.5/nodes/delete_node/) - empty chaining method. It relies on properties (`field`, `tag`) to delete fields and tags from datapoints. + * [`derivative`](/kapacitor/v1.5/nodes/derivative_node/) - chaining method takes a string representing a field for which a derivative will be calculated. + * [`flatten`](/kapacitor/v1.5/nodes/flatten_node/) - empty chaining method. It relies on properties to flatten a set of points on specific dimensions. + * [`influxQL`](/kapacitor/v1.5/nodes/influx_q_l_node/) - special node (see above). It provides access to InfluxQL functions. It cannot be created directly. + * [`stateCount`](/kapacitor/v1.5/nodes/state_count_node/) - chaining method takes a lambda expression. It computes the number of consecutive points that are in a given state. + * [`stateDuration`](/kapacitor/v1.5/nodes/state_duration_node/) - chaining method takes a lambda expression. It computes the duration of time that a given state lasts. + * [`stats`](/kapacitor/v1.5/nodes/stats_node/) - chaining method takes a duration expression. It emits internal stats about another node at the given interval. + +* Nodes for triggering events, processes: + * [`alert`](/kapacitor/v1.5/nodes/alert_node/) - empty chaining method. It relies on a number of properties for configuring the emission of alerts. + * [`deadman`](/kapacitor/v1.5//nodes/stream_node/#deadman) - actually a helper function, it is an alias for an `alert` that gets triggered when data flow falls below a specified threshold. + * [`httpOut`](/kapacitor/v1.5/nodes/http_out_node/) - chaining method takes a string. It caches the most recent data for each group it receives, making it available over the Kapicator http server using the string argument as the final locator context. + * [`httpPost`](/kapacitor/v1.5/nodes/http_post_node/) - chaining method takes an array of strings. It can also be empty. It posts data to HTTP endpoints specified in the string array. + * [`influxDBOut`](/kapacitor/v1.5/nodes/influx_d_b_out_node/) - empty chaining method – configured through property setters. It writes data to InfluxDB as it is received. + * [`k8sAutoscale`](/kapacitor/v1.5/nodes/k8s_autoscale_node/) - empty chaining method. It relies on a number of properties for configuration. It triggers autoscale on Kubernetes™ resources. + * [`kapacitorLoopback`](/kapacitor/v1.5/nodes/kapacitor_loopback_node/) - empty chaining method – configured through property setters. It writes data back into the Kapacitor stream. + * [`log`](/kapacitor/v1.5/nodes/log_node/) - empty chaining method. It relies on `level` and `prefix` properties for configuration. It logs all data that passes through it. + +**User defined functions (UDFs)** + +User defined functions are nodes that implement functionality defined by user programs or scripts that run as separate processes and that communicate with Kapacitor over sockets or standard system data streams. + + * [`UDF`](/kapacitor/v1.5/nodes/u_d_f_node/) - signature, properties and functionality defined by the user. To learn about writing user defined functions, see the [User Defined Functions Webinar](https://www.influxdata.com/training/advanced-kapacitor-training-user-defined-functions-udfs/?ao_campid=70137000000JiJA) available at [Influx online University](https://www.influxdata.com/university/). + +**Internally used nodes - Do not use** + + * [`noOp`](/kapacitor/v1.5/nodes/no_op_node/) - a helper node that performs no operations. Do not use it! + + +# InfluxQL in TICKscript + +InfluxQL occurs in a TICKscript primarily in a `query` node, whose chaining method takes an InfluxQL query string. This will nearly always be a `SELECT` statement. + +InfluxQL is very similar in its syntax to SQL. When writing a query string for a TICKscript `query` node, generally only three clauses will be required: `SELECT`, `FROM` and `WHERE`. The general pattern is as follows: + +```SQL +SELECT { | | ([|.. WHERE {} +``` + * The base `SELECT` clause can take one or more field or tag keys, or functions. These can be combined with mathematical operations and literal values. Their values or results will be added to the data frame and can be aliased with an `AS` clause. The star, `*`, wild card can also be used to retrieve all tags and fields from a measurement. + * When using the `AS` clause the alias identifier can be accessed later on in the TICKscript as a named result by using double quotes. + * The `FROM` clause requires the database, retention policy and the measurement name from which the values will be selected. Each of these tokens is separated by a dot. The values for the database and retention policy need to be set out using double quotes. + * The `WHERE` clause requires a conditional expression. This may include `AND` and `OR` Boolean operators as well as mathematical operations. + +**Example 30 – A simple InfluxQL query statement** +```js +batch + |query('SELECT cpu, usage_idle FROM "telegraf"."autogen".cpu WHERE time > now() - 10s') + .period(10s) + .every(10s) + |httpOut('dump') +``` + +Example 30 shows a simple `SELECT` statement that takes the `cpu` tag and the `usage_idle` field from the cpu measurement as recorded over the last ten seconds. + +**Example 31 – A simple InfluxQL query statement with variables** +```js +var my_field = 'usage_idle' +var my_tag = 'cpu' + +batch + |query('SELECT ' + my_tag + ', ' + my_field + ' FROM "telegraf"."autogen".cpu WHERE time > now() - 10s') + .period(10s) + .every(10s) + |httpOut('dump') +``` +Example 31 reiterates the same query from Example 30, but shows how to add variables to the query string. + +**Example 32 – An InfluxQL query statement with a function call** +```js +... +var data = batch + |query('''SELECT 100 - mean(usage_idle) AS stat FROM "telegraf"."autogen"."cpu" WHERE cpu = 'cpu-total' ''') + .period(period) + .every(every) + .groupBy('host') +... +``` +Example 32 shows a `SELECT` statement that includes a function and mathematical operation in the `SELECT` clause, as well as the `AS` alias clause. + +Note that the select statement gets passed directly to the InfluxDB API. Within the InfluxQL query string field and tag names do not need to be accessed using double quotes, as is the case elsewhere in TICKscript. However, the database name, and retention policy do get wrapped in double quotes. String literals, such as `'cpu-total'` are expressed inside the query string with single quotation marks. + +See the [InfluxQL](/influxdb/v1.3/query_language/) documentation for a complete introduction to working with the query language. + +# Lambda expressions + +Lambda expressions occur in a number of chaining and property methods. Two of the most common usages are in the creation of an `eval` node and in defining threshold properties on an `alert` node. They are declared with the keyword "lambda" followed by a colon: `lambda:`. They can contain mathematical and Boolean operations as well as calls to a large library of internal functions. With many nodes, their results can be captured by setting an `as` property on the node. + +The internal functions can be stateless, such as common mathematical and string manipulation functions, or they can be stateful, updating an internal value with each new call. As of release 1.3 three stateful functions are provided. + + * `sigma` - counts the number of standard deviations a given value is from the running mean. + * `count` - counts the number of values processed. + * `spread`- computes the running range of all values. + +The full range of lambda expressions and their uses is presented in the topic [Lambda Expressions](/kapacitor/v1.5/tick/expr/). + +Within lambda expressions TICKscript variables can be accessed using their plain identifiers. Tag and field values from data series's can be accessed by surrounding them in double quotes. Literals can also be used directly. + +**Example 33 – Lambda expressions** +```js +... +// Parameters +var info = 70 +var warn = 85 +var crit = 92 +var infoSig = 2.5 +var warnSig = 3 +var critSig = 3.5 +var period = 10s +var every = 10s + +// Dataframe +var data = batch + |query('''SELECT mean(used_percent) AS stat FROM "telegraf"."autogen"."mem" ''') + .period(period) + .every(every) + .groupBy('host') + +// Thresholds +var alert = data + |eval(lambda: sigma("stat")) + .as('sigma') + .keep() + |alert() + .id('{{ index .Tags "host"}}/mem_used') + .message('{{ .ID }}:{{ index .Fields "stat" }}') + .info(lambda: "stat" > info OR "sigma" > infoSig) + .warn(lambda: "stat" > warn OR "sigma" > warnSig) + .crit(lambda: "stat" > crit OR "sigma" > critSig) + +// Alert +alert + .log('/tmp/mem_alert_log.txt') +``` +Example 33 contains four lambda expressions. The first expression is passed to the `eval` node. It calls the internal stateful function `sigma`, into which it passes the named result `stat`, which is set using the `AS` clause in the query string of the `query` node. Through the `.as()` setter of the `eval` node its result is named `sigma`. Three other lambda expressions occur inside the threshold determining property methods of the `alert` node. These lambda expressions also access the named results `stat` and `sigma` as well as variables declared at the start of the script. They each define a series of Boolean operations, which set the level of the alert message. + + +# Summary of variable use between syntax sub-spaces + +The following section summarizes how to access variables and data series tags and fields in TICKscript and the different syntax sub-spaces. + + + + +### TICKscript variable + +Declaration examples: + +```js +var my_var = 'foo' +var my_field = `usage_idle` +var my_num = 2.71 +``` + + **Accessing...** + + * In **TICKscript** simply use the identifier. + + ```js + var my_other_num = my_num + 3.14 + ... + |default() + .tag('bar', my_var) + ... + ``` + + * In a **query string** simply use the identifier with string concatenation. + + ```js + ... + |query('SELECT ' + my_field + ' FROM "telegraf"."autogen".cpu WHERE host = \'' + my_var + '\'' ) + ... + ``` + + * In a **lambda expression** simply use the identifier. + + ```js + ... + .info(lambda: "stat" > my_num ) + ... + ``` + + * In an **InfluxQL node** use the identifier. Note that in most cases strings will be used as field or tag names. + + ```js + ... + |mean(my_var) + ... + ``` + +### Tag, Field or Named Result + +Examples + +```js +... + |query('SELECT mean(usage_idle) AS mean ...') +... + |eval(lambda: sigma("stat")) + .as('sigma') +... +``` + +**Accessing...** + + + * In a **TICKscript** method call use single quotes. + + ```js + ... + |derivative('mean') + ... + ``` + + * In a **query string** use the identifier directly in the string. + + ```js + ... + |query('SELECT cpu, usage_idle FROM "telegraf"."autogen".cpu') + ... + ``` + + * In a **lambda expression** use double quotes. + + ```js + ... + |eval(lambda: 100.0 - "usage_idle") + ... + |alert + .info(lambda: "sigma" > 2 ) + ... + ``` + + * In an **InfluxQL node** use single quotes. + + ```js + ... + |mean('used') + ... + ``` + +# Gotchas + +## Literals versus field values + +Please keep in mind that literal string values are declared using single quotes. Double quotes are used only in lambda expressions to access the values of tags and fields. In most instances using double quotes in place of single quotes will be caught as an error: `unsupported literal type`. On the other hand, using single quotes when double quotes were intended, i.e. accessing a field value, will not be caught and, if this occurs in a lambda expression, the literal value may be used instead of the desired value of a tag, or a field. + +As of Kapacitor 1.3 it is possible to declare a variable using double quotes, which is invalid, and the parser will not flag it as an error. For example `var my_var = "foo"` will pass so long as it is not used. However, when this variable is used in a Lambda expression or other method call, it will trigger a compilation error: `unsupported literal type *ast.ReferenceNode`. + +## Circular rewrites + +When using the InfluxDBOut node, be careful not to create circular rewrites to the same database and the same measurement from which data is being read. + +**Example 34 – A circular rewrite** +```js +stream + |from() + .measurement('system') + |eval(lambda: "n_cpus" + 1) + .as('n_cpus') + |influxDBOut() + .database('telegraf') + .measurement('system') +``` +> Note: Example 34 illustrates how an infinite loop might be created. Please, DO NOT USE IT! + +The script in Example 34 could be used to define a task on the database `telegraf`, with the retention policy `autogen`. For example: + +``` +kapacitor define circular_task -type stream -tick circular_rewrite.tick -dbrp telegraf.autogen +``` +In such a case, the above script will loop infinitely adding a new data point with a new value for the field `n_cpus` until the task is stopped. + + + +## Alerts and ids + +When using the `deadman` method along with one or more `alert` nodes or when using more than one `alert` node in a pipeline, be sure to set the ID property with the property method `id()`. The value of ID must be unique on each node. Failure to do so will lead Kapacitor to assume that they are all the same group of alerts, and so some alerts may not appear as expected. + +# Where to next? + +See the [examples](https://github.com/influxdata/kapacitor/tree/master/examples) in the code base on Github. See also the detailed use case solutions in the section [Guides](/kapacitor/v1.5/guides). diff --git a/content/kapacitor/v1.5/troubleshooting/_index.md b/content/kapacitor/v1.5/troubleshooting/_index.md new file mode 100644 index 000000000..dc437a036 --- /dev/null +++ b/content/kapacitor/v1.5/troubleshooting/_index.md @@ -0,0 +1,12 @@ +--- +title: Troubleshooting Kapacitor +menu: + kapacitor_1_5: + name: Troubleshooting + weight: 110 +--- + +## [Frequently asked questions](/kapacitor/v1.5/troubleshooting/frequently-asked-questions/) + +This page addresses frequent sources of confusion or important things to know related to Kapacitor. +Where applicable, it links to outstanding issues on Github. diff --git a/content/kapacitor/v1.5/troubleshooting/frequently-asked-questions.md b/content/kapacitor/v1.5/troubleshooting/frequently-asked-questions.md new file mode 100644 index 000000000..48e61097a --- /dev/null +++ b/content/kapacitor/v1.5/troubleshooting/frequently-asked-questions.md @@ -0,0 +1,136 @@ +--- +title: Kapacitor frequently asked questions + +menu: + kapacitor_1_5: + name: Frequently asked questions (FAQ) + weight: 10 + parent: Troubleshooting +--- + +This page addresses frequent sources of confusion or important things to know related to Kapacitor. +Where applicable, it links to outstanding issues on Github. + +**Administration** + +- [Is the alert state and alert data lost happen updating a script?](#is-the-alert-state-and-alert-data-lost-happen-when-updating-a-script) +- [How do I verify that Kapacitor is receiving data from InfluxDB?](#how-do-i-verify-that-kapacitor-is-receiving-data-from-influxdb) + +**TICKscript** + +- [Batches work but streams do not. Why?](#batches-work-but-streams-do-not-why) +- [Is there a limit on the number of scripts Kapacitor can handle?](#is-there-a-limit-on-the-number-of-scripts-kapacitor-can-handle) +- [What causes unexpected or additional values with same timestamp??](#what-causes-unexpected-or-additional-values-with-same-timestamp) + +**Performance** + +- [Do you get better performance with running one complex script or having multiple scripts running in parallel?](#do-you-get-better-performance-with-running-one-complex-script-or-having-multiple-scripts-running-in-parallel) +- [Do template-based scripts use less resources or are they just an ease-of-use tool?](#do-template-based-scripts-use-less-resources-r-are-they-just-an-ease-of-use-tool) +- [How does Kapacitor handle high load?](#how-does-kapacitor-handle-high-load) +- [How can I optimize Kapacitor tasks?](#how-can-i-optimize-kapacitor-tasks) + +## Administration + +### Is the alert state and alert data lost happen when updating a script? + +Kapacitor will remember the last level of an alert, but other state-like data, such as data buffered in a window, will be lost. + +### How do I verify that Kapacitor is receiving data from InfluxDB? + +There are a few ways to determine whether or not Kapacitor is receiving data from InfluxDB. +The [`kapacitor stats ingress`](/kapacitor/v1.5/working/cli_client/#stats-ingress) command +outputs InfluxDB measurements stored in the Kapacitor database as well as the number +of data points that pass through the Kapacitor server. + +```bash +$ kapacitor stats ingress +Database Retention Policy Measurement Points Received +_internal monitor cq 5274 +_internal monitor database 52740 +_internal monitor httpd 5274 +_internal monitor queryExecutor 5274 +_internal monitor runtime 5274 +_internal monitor shard 300976 +# ... +``` + +You can also use Kapacitor's [`/debug/vars` API endpoint](/kapacitor/v1.5/working/api/#debug-vars-http-endpoint) +to view and monitor ingest rates. +Using this endpoint and [Telegraf's Kapacitor input plugin](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/kapacitor), +you can create visualizations to monitor Kapacitor ingest rates. +Below are example queries that use Kapacitor data written into InfluxDB using +Telegraf's Kapacitor input plugin: + +_**Kapacitor ingest rate (points/sec)**_ +```sql +SELECT sum(points_received_rate) FROM (SELECT non_negative_derivative(first("points_received"),1s) as points_received_rate FROM "_kapacitor"."autogen"."ingress" WHERE time > :dashboardTime: GROUP BY "database", "retention_policy", "measurement", time(1m)) WHERE time > :dashboardTime: GROUP BY time(1m) +``` + +_**Kapacitor ingest by task (points/sec)**_ +```sql +SELECT non_negative_derivative("collected",1s) FROM "_kapacitor"."autogen"."edges" WHERE time > now() - 15m AND ("parent"='stream' OR "parent"='batch') GROUP BY task +``` + +## TICKscript + +### Batches work but streams do not. Why? + +Make sure port `9092` is open to inbound connections. +Streams are a `PUSH`'d to port `9092` so it must be allowed through the firewall. + +### Is there a limit on the number of scripts Kapacitor can handle? + +There is no software limit, but it will be limited by available server resources. + +### What causes unexpected or additional values with same timestamp? + +If data is ingested at irregular intervals and you see unexpected results with the same timestamp, use the [`log node`](/kapacitor/v1.5/nodes/log_node) when ingesting data in your TICKscript to debug issues. This surfaces issues, for example, duplicate data hidden by httpOut. + +## Performance + +### Do you get better performance with running one complex script or having multiple scripts running in parallel? + +Taking things to the extreme, best-case is one task that consumes all the data and does all the work since there is added overhead when managing multiple tasks. +However, significant effort has gone into reducing the overhead of each task. +Use tasks in a way that makes logical sense for your project and organization. +If you run into performance issues with multiple tasks, [let us know](https://github.com/influxdata/kapacitor/issues/new). +_**As a last resort**_, merge tasks into more complex tasks. + +### Do template-based scripts use less resources or are they just an ease-of-use tool? + +Templates are just an ease-of-use tool and make no difference in regards to performance. + +### How does Kapacitor handle high load? + +If Kapacitor is unable to ingest and process incoming data before it receives new data, +Kapacitor queues incoming data in memory and processes it when able. +Memory requirements of queued data depend on the ingest rate and shape of the incoming data. +Once Kapacitor is able to process all queued data, it slowly releases memory +as the internal garbage collector reclaims memory. + +Extended periods of high data ingestion can overwhelm available system resources +forcing the operating system to stop the `kapacitord` process. +The primary means for avoiding this issue are: + +- Ensure your hardware provides enough system resources to handle additional load. +- Optimize your Kapacitor tasks. _[See below](#how-can-i-optimize-kapacitor-tasks)_. + +{{% note %}} +As Kapacitor processes data in the queue, it may consume other system resources such as +CPU, disk and network IO, etc., which will affect the overall performance of your Kapacitor server. +{{% /note %}} + +### How can I optimize Kapacitor tasks? + +As you optimize Kapacitor tasks, consider the following: + +#### "Batch" incoming data + +[`batch`](/kapacitor/v1.5/nodes/batch_node/) queries data from InfluxDB in batches. +As long as Kapacitor is able to process a batch before the next batch is queried, +it won't need to queue anything. + +[`stream`](/kapacitor/v1.5/nodes/stream_node/) mirrors all InfluxDB writes to +Kapacitor in real time and is more prone to queueing. +If using `stream`, segment incoming data into time-based batches using +[`window`](/kapacitor/v1.5/nodes/window_node/). diff --git a/content/kapacitor/v1.5/working/_index.md b/content/kapacitor/v1.5/working/_index.md new file mode 100644 index 000000000..8890a7e1e --- /dev/null +++ b/content/kapacitor/v1.5/working/_index.md @@ -0,0 +1,19 @@ +--- +title: Working with Kapacitor + +menu: + kapacitor_1_5: + name: Working with Kapacitor + identifier: work-w-kapacitor + weight: 30 +--- + +The documents in this section present the key features of the Kapacitor daemon +(`kapacitord`) and the Kapacitor client (`kapacitor`). + +* [Kapacitor and Chronograf](/kapacitor/v1.5/working/kapa-and-chrono/) – presents how Kapacitor is integrated with the Chronograf graphical user interface application for managing tasks and alerts. +* [Kapacitor API Reference documentation](/kapacitor/v1.5/working/api/) – presents the HTTP API and how to use it to update tasks and the Kapacitor configuration. +* [Alerts - Overview](/kapacitor/v1.5/working/alerts/) – presents an overview of the Kapacitor alerting system. +* [Alerts - Using topics](/kapacitor/v1.5/working/using_alert_topics/) – a walk-through on creating and using alert topics. +* [Alerts - Event handler setup](/kapacitor/v1.5/working/event-handler-setup/) – presents setting up event handlers for HipChat and Telegraf, which can serve as a blueprint for other event handlers. +* [Dynamic data scraping](/kapacitor/v1.5/working/scraping-and-discovery/) – introduces the discovery and scraping features, which allow metrics to be dynamically pulled into Kapacitor and then written to InfluxDB. diff --git a/content/kapacitor/v1.5/working/alerts.md b/content/kapacitor/v1.5/working/alerts.md new file mode 100644 index 000000000..118d9b826 --- /dev/null +++ b/content/kapacitor/v1.5/working/alerts.md @@ -0,0 +1,190 @@ +--- +title: Kapacitor alerts overview + +menu: + kapacitor_1_5: + name: Alerts overview + weight: 3 + parent: work-w-kapacitor +--- + +Kapacitor makes it possible to handle alert messages in two different ways. + +* The messages can be pushed directly to an event handler exposed through the +[Alert](/kapacitor/v1.5/nodes/alert_node/) node. +* The messages can be published to a topic namespace to which one or more alert +handlers can subscribe. + + + +No matter which approach is used, the handlers need to be enabled and configured +in the [configuration](/kapacitor/v1.5/administration/configuration/#optional-table-groupings) +file. If the handler requires sensitive information such as tokens and +passwords, it can also be configured using the [Kapacitor HTTP API](/kapacitor/v1.5/working/api/#overriding-configurations). + +## Push to handler + +Pushing messages to a handler is the basic approach presented in the +[Getting started with Kapacitor](/kapacitor/v1.5/introduction/getting-started/#triggering-alerts-from-stream-data) +guide. This involves simply calling the relevant chaining method made available +through the `alert` node. Messages can be pushed to `log()` files, the `email()` +service, the `httpOut()` cache and many [third party services](#list-of-handlers). + +## Publish and subscribe + +An alert topic is simply a namespace where alerts are grouped. +When an alert event fires it can be published to a topic. +Multiple handlers can subscribe (can be bound) to that topic and all handlers +process each alert event for the topic. Handlers get bound to topics through +the `kapacitor` command line client and handler binding files. Handler binding +files can be written in `yaml` or `json`. They contain four key fields and one +optional one. + + +* `topic`: declares the topic to which the handler will subscribe. +* `id`: declares the identity of the binding. +* `kind`: declares the type of event handler to be used. Note that this +needs to be enabled in the `kapacitord` configuration. +* `match`: (optional) declares a match expression used to filter which +alert events will be processed. See the [Match Expressions](#match-expressions) +section below. +* `options`: options specific to the handler in question. These are +listed below in the section [List of handlers](#list-of-handlers) + + +**Example 1: A handler binding file for the _slack_ handler and _cpu_ topic** +``` +topic: cpu +id: slack +kind: slack +options: + channel: '#kapacitor' +``` + +Example 1 could be saved into a file named `slack_cpu_handler.yaml`. + +This can then be generated into a Kapacitor topic handler through the command +line client. + +``` +$ kapacitor define-topic-handler slack_cpu_handler.yaml +``` + +Handler bindings can also be created over the HTTP API. See the +[Create a Handler](/kapacitor/v1.5/working/api/#creating-handlers) section of +the HTTP API document. + +For a walk through on defining and using alert topics see the +[Using Alert Topics](/kapacitor/v1.5/working/using_alert_topics) walk-through. + +## Handlers + +A handler takes action on incoming alert events for a specific topic. +Each handler operates on exactly one topic. + +### List of handlers + +The following is a list of available alert event handlers: + +| Handler | Description | +| ------- | ----------- | +| [Alerta](/kapacitor/v1.5/event_handlers/alerta) | Post alert message to Alerta. | +| [email](/kapacitor/v1.5/event_handlers/email) | Send and email with alert data. | +| [exec](/kapacitor/v1.5/event_handlers/exec) | Execute a command passing alert data over STDIN. | +| [HipChat](/kapacitor/v1.5/event_handlers/hipchat) | Post alert message to HipChat room. | +| [Kafka](/kapacitor/v1.5/event_handlers/kafka) | Send alert to a Apache Kafka cluster. | +| [log](/kapacitor/v1.5/event_handlers/log) | Log alert data to file. | +| [MQTT](/kapacitor/v1.5/event_handlers/mqtt) | Post alert message to MQTT. | +| [OpsGenie v1](/kapacitor/v1.5/event_handlers/opsgenie/v1) | Send alert to OpsGenie using their v1 API. (Deprecated) | +| [OpsGenie v2](/kapacitor/v1.5/event_handlers/opsgenie/v2) | Send alert to OpsGenie using their v2 API. | +| [PagerDuty v1](/kapacitor/v1.5/event_handlers/pagerduty/v1) | Send alert to PagerDuty using their v1 API. (Deprecated) | +| [PagerDuty v2](/kapacitor/v1.5/event_handlers/pagerduty/v2) | Send alert to PagerDuty using their v2 API. | +| [post](/kapacitor/v1.5/event_handlers/post) | HTTP POST data to a specified URL. | +| [Pushover](/kapacitor/v1.5/event_handlers/pushover) | Send alert to Pushover. | +| [Sensu](/kapacitor/v1.5/event_handlers/sensu) | Post alert message to Sensu client. | +| [Slack](/kapacitor/v1.5/event_handlers/slack) | Post alert message to Slack channel. | +| [SNMPTrap](/kapacitor/v1.5/event_handlers/snmptrap) | Trigger SNMP traps. | +| [Talk](/kapacitor/v1.5/event_handlers/talk) | Post alert message to Talk client. | +| [tcp](/kapacitor/v1.5/event_handlers/tcp) | Send data to a specified address via raw TCP. | +| [Telegram](/kapacitor/v1.5/event_handlers/telegram) | Post alert message to Telegram client. | +| [VictorOps](/kapacitor/v1.5/event_handlers/victorops) | Send alert to VictorOps. | + + +## Match expressions + +Alert handlers support match expressions that filter which alert events the handler processes. + +A match expression is a TICKscript lambda expression. +The data that triggered the alert is available to the match expression, including all fields and tags. + +In addition to the data that triggered the alert metadata about the alert is available. +This alert metadata is available via various functions. + +| Name | Type | Description | +| ---- | ---- | ----------- | +| level | int | The alert level of the event, one of '0', '1', '2', or '3' corresponding to 'OK', 'INFO', 'WARNING', and 'CRITICAL'. | +| changed | bool | Indicates whether the alert level changed with this event. | +| name | string | Returns the measurement name of the triggering data. | +| taskName | string | Returns the task name that generated the alert event. | +| duration | duration | Returns the duration of the event in a non OK state. | + + +Additionally the vars `OK`, `INFO`, `WARNING`, and `CRITICAL` have been defined to correspond with the return value of the `level` function. + +For example to send only critical alerts to a handler, use this match expression: + +```yaml +match: level() == CRITICAL +``` + + +### Examples + +Send only changed events to the handler: + +```yaml +match: changed() == TRUE +``` + + +Send only WARNING and CRITICAL events to the handler: + +```yaml +match: level() >= WARNING +``` + +Send events with the tag "host" equal to `s001.example.com` to the handler: + +```yaml +match: "\"host\" == 's001.example.com'" +``` + +#### Alert event data + +Each alert event that gets sent to a handler contains the following alert data: + +| Name | Description | +| ---- | ----------- | +| **ID** | The ID of the alert, user defined. | +| **Message** | The alert message, user defined. | +| **Details** | The alert details, user defined HTML content. | +| **Time** | The time the alert occurred. | +| **Duration** | The duration of the alert in nanoseconds. | +| **Level** | One of OK, INFO, WARNING or CRITICAL. | +| **Data** | influxql.Result containing the data that triggered the alert. | +| **Recoverable** | Indicates whether the alert is auto-recoverable. Determined by the [`.noRecoveries()`](/kapacitor/v1.5/nodes/alert_node/#norecoveries) property. | + +This data is used by [event handlers](/kapacitor/v1.5/event_handlers) in their +handling of alert events. + +Alert messages use [Golang Template](https://golang.org/pkg/text/template/) and +have access to the alert data. + +```js +|alert() + // ... + .message('{{ .ID }} is {{ .Level }} value:{{ index .Fields "value" }}, {{ if not .Recoverable }}non-recoverable{{ end }}') +``` diff --git a/content/kapacitor/v1.5/working/api.md b/content/kapacitor/v1.5/working/api.md new file mode 100644 index 000000000..d386aa474 --- /dev/null +++ b/content/kapacitor/v1.5/working/api.md @@ -0,0 +1,2492 @@ +--- +title: Kapacitor HTTP API reference documentation +description: Use the Kapacitor HTTP API endpoints to control task execution, query statues, and collect troubleshooting data. +aliases: + - /kapacitor/v1.5/api + - /kapacitor/v1.5/api/api + +menu: + kapacitor_1_5: + weight: 10 + parent: work-w-kapacitor +--- + +* [General Information](#general-information) +* [Writing Data](#writing-data) +* [Tasks](#tasks) +* [Templates](#templates) +* [Recordings](#recordings) +* [Replays](#replays) +* [Alerts](#alerts) +* [Configuration](#overriding-configurations) +* [Storage](#storage) +* [Logging](#logging) +* [Testing services](#testing-services) +* [Miscellaneous](#miscellaneous) + +## General information + +Kapacitor provides an HTTP API on port 9092 by default. +With the API you can control which tasks are executing, query status of tasks and manage recordings etc. + +Each section below defines the available API endpoints and their inputs and outputs. + +All requests are versioned and namespaced using the base path `/kapacitor/v1/`. + +### Response codes + +All requests can return these response codes: + +| HTTP Response Code | Meaning | +| ------------------ | ------- | +| 2xx | The request was a success, content is dependent on the request. | +| 4xx | Invalid request, refer to error for what is wrong with the request. Repeating the request will continue to return the same error. | +| 5xx | The server was unable to process the request, refer to the error for a reason. Repeating the request may result in a success if the server issue has been resolved. | + +### Errors + +All requests can return JSON in the following format to provide more information about a failed request. + +``` +{ + "error" : "error message" +} +``` + +### Query parameters vs JSON body + +To make using this API a consistent and easy experience we follow one simple rule for when extra information +about a request is found in the query parameters of the URL or when they are part of the submitted JSON body. + +Query parameters are used only for GET requests and all other requests expect parameters to be specified in the JSON body. + +>NOTE: The /kapacitor/v1/write endpoint is the one exception to this rule since Kapacitor is compatible with the InfluxDB /write endpoint. + + +### Links + +When creating resources in Kapacitor the API server will return a `link` object with an `href` of the resource. +Clients should not need to perform path manipulation in most cases and can use the links provided from previous calls. + +### IDs + +The API allows the client to specify IDs for the various resources. +This way you can control the meaning of the IDs. +If you do not specify an ID, a random UUID will be generated for the resource. + +All IDs must match this regex `^[-\._\p{L}0-9]+$`, which is essentially numbers, unicode letters, `-`, `.` and `_`. + + +### Backwards compatibility + +Currently, Kapacitor is in 1.x release with a guarantee that all new releases will be backwards compatible with previous releases. +This applies directly to the API. New additions may be made to the API, but existing endpoints will not be changed in backwards incompatible ways during the 1.x releases. + +### Technical preview + +When a new feature is added to Kapacitor, it may be added in a "technical preview" release for a few minor releases, and then later promoted to a fully fledged v1 feature. +Preview means that the newly added features may be changed in backwards incompatible ways until they are promoted to v1 features. +Technical previews allow new features to fully mature while maintaining regularly scheduled releases. + +To make it clear which features of the API are in technical preview, the base path `/kapacitor/v1preview` is used. +If you wish to preview some of these new features, use the path `/kapacitor/v1preview` instead of `/kapacitor/v1` for your requests. +All v1 endpoints are available under the v1preview path so that your client need not be configured with multiple paths. +The technical preview endpoints are only available under the v1preview path. + + +>**Note:** Using a technical preview means that you may have to update your client for breaking changes to the previewed endpoints. + +## Writing data + +Kapacitor accepts writing data over HTTP using InfluxData's [Line Protocol data format](/influxdb/latest/write_protocols/). +The `kapacitor/v1/write` endpoint is identical in nature to the InfluxDB `/write` endpoint. + +| Query Parameter | Purpose | +| --------------- | ------- | +| db | Database name for the writes. | +| rp | Retention policy name for the writes. | + +>NOTE: Kapacitor scopes all points by their database and retention policy. +As a result, you MUST specify the `rp` for writes so that Kapacitor uses the correct retention policy. + +#### Example + +Write data to Kapacitor. + +``` +POST /kapacitor/v1/write?db=DB_NAME&rp=RP_NAME +cpu,host=example.com value=87.6 +``` + +To maintain compatibility with the equivalent InfluxDB `/write` endpoint, the `/write` endpoint is an alias for the `/kapacitor/v1/write` endpoint. + +``` +POST /write?db=DB_NAME&rp=RP_NAME +cpu,host=example.com value=87.6 +``` + +## Tasks + +A task represents work for Kapacitor to perform. +A task is defined by its id, type, TICKscript, and list of database retention policy pairs it is allowed to access. + +### Defining tasks + +To define a task, POST to the `/kapacitor/v1/tasks` endpoint. +If a task already exists, then use the `PATCH` method to modify any property of the task. + +Define a task using a JSON object with the following options: + +| Property | Purpose | +| -------- | ------- | +| id | Unique identifier for the task. If empty a random ID will be chosen. | +| template-id | An optional ID of a template to use instead of specifying a TICKscript and type directly. | +| type | The task type: `stream` or `batch`. | +| dbrps | List of database retention policy pairs the task is allowed to access. | +| script | The content of the script. | +| status | One of `enabled` or `disabled`. | +| vars | A set of vars for overwriting any defined vars in the TICKscript. | + +When using `PATCH`, if any property is missing, the task will be left unmodified. + +> **Note:** When patching a task, no changes are made to the running task. +> The task must be disabled and re-enabled for any changes to take effect. + +##### Vars + +The vars object has the form: + +```json +{ + "field_name" : { + "value": , + "type": + }, + "another_field" : { + "value": , + "type": + } +} +``` + +The following is a table of valid types and example values. + +| Type | Example Value | Description | +| ---- | ------------- | ----------- | +| bool | true | "true" or "false" | +| int | 42 | Any integer value | +| float | 2.5 or 67 | Any numeric value | +| duration | "1s" or 1000000000 | Any integer value interpretted in nanoseconds or an influxql duration string, (i.e. 10000000000 is 10s) | +| string | "a string" | Any string value | +| regex | "^abc.*xyz" | Any string value that represents a valid Go regular expression https://golang.org/pkg/regexp/ | +| lambda | "\"value\" > 5" | Any string that is a valid TICKscript lambda expression | +| star | "" | No value is required, a star type var represents the literal `*` in TICKscript (i.e. `.groupBy(*)`) | +| list | [{"type": TYPE, "value": VALUE}] | A list of var objects. Currently lists may only contain string or star vars | + +#### Example + +Create a new task with the `id` value of `TASK_ID`. + +``` +POST /kapacitor/v1/tasks +{ + "id" : "TASK_ID", + "type" : "stream", + "dbrps": [{"db": "DATABASE_NAME", "rp" : "RP_NAME"}], + "script": "stream\n |from()\n .measurement('cpu')\n", + "vars" : { + "var1": { + "value": 42, + "type": "float" + } + } +} +``` + +Response with task `id` and `link`. + +```json +{ + "link" : {"rel": "self", "href": "/kapacitor/v1/tasks/TASK_ID"}, + "id" : "TASK_ID", + "type" : "stream", + "dbrps" : [{"db": "DATABASE_NAME", "rp" : "RP_NAME"}], + "script" : "stream\n |from()\n .measurement('cpu')\n", + "dot" : "digraph TASK_ID { ... }", + "vars" : { + "var1": { + "value": 42, + "type": "float" + } + }, + "status" : "enabled", + "executing" : true, + "error" : "", + "created": "2006-01-02T15:04:05Z07:00", + "modified": "2006-01-02T15:04:05Z07:00", + "stats" : {} +} +``` + +Modify only the `dbrps` of the task. + +``` +PATCH /kapacitor/v1/tasks/TASK_ID +{ + "dbrps": [{"db": "NEW_DATABASE_NAME", "rp" : "NEW_RP_NAME"}] +} +``` + +>**Note:** Setting any DBRP will overwrite all stored DBRPs. +Setting any Vars will overwrite all stored Vars. + + +Enable an existing task. + +``` +PATCH /kapacitor/v1/tasks/TASK_ID +{ + "status" : "enabled", +} +``` + +Disable an existing task. + +``` +PATCH /kapacitor/v1/tasks/TASK_ID +{ + "status" : "disabled", +} +``` + +Define a new task that is enabled on creation. + +``` +POST /kapacitor/v1/tasks +{ + "id" : "TASK_ID", + "type" : "stream", + "dbrps" : [{"db": "DATABASE_NAME", "rp" : "RP_NAME"}], + "script" : "stream\n |from()\n .measurement('cpu')\n", + "status" : "enabled" +} +``` + +Response with task `id` and `link`. + +```json +{ + "id" : "TASK_ID", + "link" : {"rel": "self", "href": "/kapacitor/v1/tasks/TASK_ID"} +} +``` + +#### Response + +| Code | Meaning | +| ---- | ------- | +| 200 | Task created, contains task information. | +| 404 | Task does not exist | + +### Get task + +To get information about a task, make a `GET` request to the `/kapacitor/v1/tasks/TASK_ID` endpoint. + +| Query Parameter | Default | Purpose | +| --------------- | ------- | ------- | +| dot-view | attributes | One of `labels` or `attributes`. Labels is less readable but will correctly render with all the information contained in labels. | +| script-format | formatted | One of `formatted` or `raw`. Raw will return the script identical to how it was defined. Formatted will first format the script. | +| replay-id | | Optional ID of a running replay. The returned task information will be in the context of the task for the running replay. | + + +A task has these read-only properties in addition to the properties listed [above](#defining-tasks). + +| Property | Description | +| -------- | ----------- | +| dot | [GraphViz DOT](https://en.wikipedia.org/wiki/DOT_(graph_description_language)) syntax formatted representation of the task DAG. | +| executing | Whether the task is currently executing. | +| error | Any error encountered when executing the task. | +| stats | Map of statistics about a task. | +| created | Date the task was first created | +| modified | Date the task was last modified | +| last-enabled | Date the task was last set to status `enabled` | + +#### Example + +Get information about a task using defaults. + +``` +GET /kapacitor/v1/tasks/TASK_ID +``` + +```json +{ + "link" : {"rel": "self", "href": "/kapacitor/v1/tasks/TASK_ID"}, + "id" : "TASK_ID", + "type" : "stream", + "dbrps" : [{"db": "DATABASE_NAME", "rp" : "RP_NAME"}], + "script" : "stream\n |from()\n .measurement('cpu')\n", + "dot" : "digraph TASK_ID { ... }", + "status" : "enabled", + "executing" : true, + "error" : "", + "created": "2006-01-02T15:04:05Z07:00", + "modified": "2006-01-02T15:04:05Z07:00", + "last-enabled": "2006-01-03T15:04:05Z07:00", + "stats" : {} +} +``` + +Get information about a task using only labels in the DOT content and skip the format step. + +``` +GET /kapacitor/v1/tasks/TASK_ID?dot-view=labels&script-format=raw +``` + +```json +{ + "link" : {"rel": "self", "href": "/kapacitor/v1/tasks/TASK_ID"}, + "id" : "TASK_ID", + "type" : "stream", + "dbrps" : [{"db": "DATABASE_NAME", "rp" : "RP_NAME"}], + "script" : "stream|from().measurement('cpu')", + "dot" : "digraph TASK_ID { ... }", + "status" : "enabled", + "executing" : true, + "error" : "", + "created": "2006-01-02T15:04:05Z07:00", + "modified": "2006-01-02T15:04:05Z07:00", + "last-enabled": "2006-01-03T15:04:05Z07:00", + "stats" : {} +} +``` + +#### Response + +| Code | Meaning | +| ---- | ------- | +| 200 | Success | +| 404 | Task does not exist | + + +### Deleting tasks + +To delete a task, make a DELETE request to the `/kapacitor/v1/tasks/TASK_ID` endpoint. + +``` +DELETE /kapacitor/v1/tasks/TASK_ID +``` + +#### Response + +| Code | Meaning | +| ---- | ------- | +| 204 | Success | + +>NOTE: Deleting a non-existent task is not an error and will return a 204 success. + + +### Listing tasks + +To get information about several tasks, make a `GET` request to the `/kapacitor/v1/tasks` endpoint. + +| Query Parameter | Default | Purpose | +| --------------- | ------- | ------- | +| pattern | | Filter results based on the pattern. Uses standard shell glob matching, see [this](https://golang.org/pkg/path/filepath/#Match) for more details. | +| fields | | List of fields to return. If empty returns all fields. Fields `id` and `link` are always returned. | +| dot-view | attributes | One of `labels` or `attributes`. Labels is less readable but will correctly render with all the information contained in labels. | +| script-format | formatted | One of `formatted` or `raw`. Raw will return the script identical to how it was defined. Formatted will first format the script. | +| offset | 0 | Offset count for paginating through tasks. | +| limit | 100 | Maximum number of tasks to return. | + +#### Example + +Get all tasks. + +``` +GET /kapacitor/v1/tasks +``` + +```json +{ + "tasks" : [ + { + "link" : {"rel":"self", "href":"/kapacitor/v1/tasks/TASK_ID"}, + "id" : "TASK_ID", + "type" : "stream", + "dbrps" : [{"db": "DATABASE_NAME", "rp" : "RP_NAME"}], + "script" : "stream|from().measurement('cpu')", + "dot" : "digraph TASK_ID { ... }", + "status" : "enabled", + "executing" : true, + "error" : "", + "stats" : {} + }, + { + "link" : {"rel":"self", "href":"/kapacitor/v1/tasks/ANOTHER_TASK_ID"}, + "id" : "ANOTHER_TASK_ID", + "type" : "stream", + "dbrps" : [{"db": "DATABASE_NAME", "rp" : "RP_NAME"}], + "script" : "stream|from().measurement('cpu')", + "dot" : "digraph ANOTHER_TASK_ID{ ... }", + "status" : "disabled", + "executing" : true, + "error" : "", + "stats" : {} + } + ] +} +``` + +Optionally, you can specify a glob `pattern` to list only matching tasks. + +``` +GET /kapacitor/v1/tasks?pattern=TASK* +``` + +```json +{ + "tasks" : [ + { + "link" : {"rel":"self", "href":"/kapacitor/v1/tasks/TASK_ID"}, + "id" : "TASK_ID", + "type" : "stream", + "dbrps" : [{"db": "DATABASE_NAME", "rp" : "RP_NAME"}], + "script" : "stream|from().measurement('cpu')", + "dot" : "digraph TASK_ID { ... }", + "status" : "enabled", + "executing" : true, + "error" : "", + "stats" : {} + } + ] +} +``` + +Get all tasks, but only the status, executing and error fields. + +``` +GET /kapacitor/v1/tasks?fields=status&fields=executing&fields=error +``` + +```json +{ + "tasks" : [ + { + "link" : {"rel":"self", "href":"/kapacitor/v1/tasks/TASK_ID"}, + "id" : "TASK_ID", + "status" : "enabled", + "executing" : true, + "error" : "", + }, + { + "link" : {"rel":"self", "href":"/kapacitor/v1/tasks/ANOTHER_TASK_ID"}, + "id" : "ANOTHER_TASK_ID", + "status" : "disabled", + "executing" : true, + "error" : "", + } + ] +} +``` + +#### Response + +| Code | Meaning | +| ---- | ------- | +| 200 | Success | + +>NOTE: If the pattern does not match any tasks an empty list will be returned, with a 200 success. + +### Custom task HTTP endpoints + +In TICKscript, it is possible to expose a cache of recent data via the [HTTPOut](https://docs.influxdata.com/kapacitor/latest/nodes/http_out_node/) node. +The data is available at the path `/kapacitor/v1/tasks/TASK_ID/ENDPOINT_NAME`. + +### Example + +For the TICKscript: + +```go +stream + |from() + .measurement('cpu') + |window() + .period(60s) + .every(60s) + |httpOut('mycustom_endpoint') +``` + +``` +GET /kapacitor/v1/tasks/TASK_ID/mycustom_endpoint +``` + +```json +{ + "series": [ + { + "name": "cpu", + "columns": [ + "time", + "value" + ], + "values": [ + [ + "2015-01-29T21:55:43.702900257Z", + 55 + ], + [ + "2015-01-29T21:56:43.702900257Z", + 42 + ], + ] + } + ] +} +``` + +The output is the same as a query for data to [InfluxDB](https://docs.influxdata.com/influxdb/latest/guides/querying_data/). + + +## Templates + +You can also define task templates. +A task template is defined by a template TICKscript, and a task type. + + +### Defining templates + +To define a template POST to the `/kapacitor/v1/templates` endpoint. +If a template already exists then use the `PATCH` method to modify any property of the template. + +Define a template using a JSON object with the following options: + +| Property | Purpose | +| -------- | ------- | +| id | Unique identifier for the template. If empty a random ID will be chosen. | +| type | The template type: `stream` or `batch`. | +| script | The content of the script. | + +When using PATCH, if any option is missing it will be left unmodified. + + +#### Updating templates + +When updating an existing template all associated tasks are reloaded with the new template definition. +The first error if any is returned when reloading associated tasks. +If an error occurs, any task that was updated to the new definition is reverted to the old definition. +This ensures that all associated tasks for a template either succeed or fail together. + +As a result, you will not be able to update a template if it introduces a breaking change in the TICKscript. +In order to update a template in a breaking way, you have two options: + +1. Create a new template and reassign each task to the new template, updating the task vars as needed. +2. If the breaking change is forward compatible (i.e. adds a new required var), first update each task with the needed vars, +then update the template once all tasks are ready. + + +#### Example + +Create a new template with ID `TEMPLATE_ID`. + +``` +POST /kapacitor/v1/templates +{ + "id" : "TEMPLATE_ID", + "type" : "stream", + "script": "stream\n |from()\n .measurement('cpu')\n" +} +``` + +Response with template `id` and `link`. + +```json +{ + "link" : {"rel": "self", "href": "/kapacitor/v1/templates/TASK_ID"}, + "id" : "TASK_ID", + "type" : "stream", + "script" : "stream\n |from()\n .measurement('cpu')\n", + "dot" : "digraph TASK_ID { ... }", + "error" : "", + "created": "2006-01-02T15:04:05Z07:00", + "modified": "2006-01-02T15:04:05Z07:00", +} +``` + +Modify only the script of the template. + +``` +PATCH /kapacitor/v1/templates/TEMPLATE_ID +{ + "script": "stream|from().measurement('mem')" +} +``` + +#### Response + +| Code | Meaning | +| ---- | ------- | +| 200 | Template created, contains template information. | +| 404 | Template does not exist | + +### Get Template + +To get information about a template, make a GET request to the `/kapacitor/v1/templates/TEMPLATE_ID` endpoint. + +| Query Parameter | Default | Purpose | +| --------------- | ------- | ------- | +| script-format | formatted | One of `formatted` or `raw`. Raw will return the script identical to how it was defined. Formatted will first format the script. | + + +A template has these read only properties in addition to the properties listed [above](#defining-templates). + +| Property | Description | +| -------- | ----------- | +| vars | Set of named vars from the TICKscript with their type, default values and description. | +| dot | [GraphViz DOT](https://en.wikipedia.org/wiki/DOT_(graph_description_language)) syntax formatted representation of the template DAG. NOTE: lables vs attributes does not matter since a template is never executing. | +| error | Any error encountered when reading the template. | +| created | Date the template was first created | +| modified | Date the template was last modified | + +#### Example + +Get information about a template using defaults. + +``` +GET /kapacitor/v1/templates/TEMPLATE_ID +``` + +```json +{ + "link" : {"rel": "self", "href": "/kapacitor/v1/templates/TEMPLATE_ID"}, + "id" : "TASK_ID", + "type" : "stream", + "script" : "var x = 5\nstream\n |from()\n .measurement('cpu')\n", + "vars": {"x":{"value": 5, "type":"int", "description": "threshold value"}}, + "dot" : "digraph TASK_ID { ... }", + "error" : "", + "created": "2006-01-02T15:04:05Z07:00", + "modified": "2006-01-02T15:04:05Z07:00", +} +``` + +#### Response + +| Code | Meaning | +| ---- | ------- | +| 200 | Success | +| 404 | Template does not exist | + + +### Deleting templates + +To delete a template, make a DELETE request to the `/kapacitor/v1/templates/TEMPLATE_ID` endpoint. + +>NOTE:Deleting a template renders all associated tasks as orphans. The current state of the orphaned tasks will be left unmodified, +but orphaned tasks will not be able to be enabled. + +``` +DELETE /kapacitor/v1/templates/TEMPLATE_ID +``` + +#### Response + +| Code | Meaning | +| ---- | ------- | +| 204 | Success | + +>NOTE: Deleting a non-existent template is not an error and will return a 204 success. + + +### Listing templates + +To get information about several templates, make a GET request to the `/kapacitor/v1/templates` endpoint. + +| Query Parameter | Default | Purpose | +| --------------- | ------- | ------- | +| pattern | | Filter results based on the pattern. Uses standard shell glob matching, see [this](https://golang.org/pkg/path/filepath/#Match) for more details. | +| fields | | List of fields to return. If empty returns all fields. Fields `id` and `link` are always returned. | +| script-format | formatted | One of `formatted` or `raw`. Raw will return the script identical to how it was defined. Formatted will first format the script. | +| offset | 0 | Offset count for paginating through templates. | +| limit | 100 | Maximum number of templates to return. | + +#### Example + +Get all templates. + +``` +GET /kapacitor/v1/templates +``` + +```json +{ + "templates" : [ + { + "link" : {"rel":"self", "href":"/kapacitor/v1/templates/TEMPLATE_ID"}, + "id" : "TEMPLATE_ID", + "type" : "stream", + "script" : "stream|from().measurement('cpu')", + "dot" : "digraph TEMPLATE_ID { ... }", + "error" : "" + }, + { + "link" : {"rel":"self", "href":"/kapacitor/v1/templates/ANOTHER_TEMPLATE_ID"}, + "id" : "ANOTHER_TEMPLATE_ID", + "type" : "stream", + "script" : "stream|from().measurement('cpu')", + "dot" : "digraph ANOTHER_TEMPLATE_ID{ ... }", + "error" : "" + } + ] +} +``` + +Optionally, specify a glob `pattern` to list only matching templates. + +``` +GET /kapacitor/v1/template?pattern=TEMPLATE* +``` + +```json +{ + "templates" : [ + { + "link" : {"rel":"self", "href":"/kapacitor/v1/templates/TEMPLATE_ID"}, + "id" : "TEMPLATE_ID", + "type" : "stream", + "script" : "stream|from().measurement('cpu')", + "dot" : "digraph TEMPLATE_ID { ... }", + "error" : "" + } + ] +} +``` + +Get all templates, but only the `script` and `error` fields. + +``` +GET /kapacitor/v1/templates?fields=status&fields=executing&fields=error +``` + +```json +{ + "templates" : [ + { + "link" : {"rel":"self", "href":"/kapacitor/v1/templates/TEMPLATE_ID"}, + "id" : "TEMPLATE_ID", + "script" : "stream|from().measurement('cpu')", + "error" : "" + }, + { + "link" : {"rel":"self", "href":"/kapacitor/v1/templates/ANOTHER_TEMPLATE_ID"}, + "id" : "ANOTHER_TEMPLATE_ID", + "script" : "stream|from().measurement('cpu')", + "error" : "" + } + ] +} +``` + +#### Response + +| Code | Meaning | +| ---- | ------- | +| 200 | Success | + +>NOTE: If the pattern does not match any templates an empty list will be returned, with a 200 success. + + +## Recordings + +Kapacitor can save recordings of data and replay them against a specified task. + +### Start Recording + +There are three methods for recording data with Kapacitor: +To create a recording make a POST request to the `/kapacitor/v1/recordings/METHOD` endpoint. + +| Method | Description | +| ------ | ----------- | +| stream | Record the incoming stream of data. | +| batch | Record the results of the queries in a batch task. | +| query | Record the result of an explicit query. | + +The request returns once the recording is started and does not wait for it to finish. +A recording ID is returned to later identify the recording. + +##### Stream + +| Parameter | Purpose | +| --------- | ------- | +| id | Unique identifier for the recording. If empty a random one will be chosen. | +| task | ID of a task, used to only record data for the DBRPs of the task. | +| stop | Record stream data until stop date. | + +##### Batch + +| Parameter | Purpose | +| --------- | ------- | +| id | Unique identifier for the recording. If empty a random one will be chosen. | +| task | ID of a task, records the results of the queries defined in the task. | +| start | Earliest date for which data will be recorded. RFC3339Nano formatted. | +| stop | Latest date for which data will be recorded. If not specified uses the current time. RFC3339Nano formatted data. | + +##### Query + +| Parameter | Purpose | +| --------- | ------- | +| id | Unique identifier for the recording. If empty a random one will be chosen. | +| type | Type of recording, `stream` or `batch`. | +| query | Query to execute. | +| cluster | Name of a configured InfluxDB cluster. If empty uses the default cluster. | + +>NOTE: A recording itself is typed as either a stream or batch recording and can only be replayed to a task of a corresponding type. +Therefore when you record the result of a raw query you must specify the type recording you wish to create. + + +#### Example + +Create a recording using the `stream` method + +``` +POST /kapacitor/v1/recordings/stream +{ + "task" : "TASK_ID", + "stop" : "2006-01-02T15:04:05Z07:00" +} +``` + +Create a recording using the `batch` method specifying a start time. + +``` +POST /kapacitor/v1/recordings/batch +{ + "task" : "TASK_ID", + "start" : "2006-01-02T15:04:05Z07:00" +} +``` + +Create a recording using the `query` method specifying a `stream` type. + +``` +POST /kapacitor/v1/recordings/query +{ + "query" : "SELECT mean(usage_idle) FROM cpu WHERE time > now() - 1h GROUP BY time(10m)", + "type" : "stream" +} +``` + +Create a recording using the `query` method specifying a `batch` type. + +``` +POST /kapacitor/v1/recordings/query +{ + "query" : "SELECT mean(usage_idle) FROM cpu WHERE time > now() - 1h GROUP BY time(10m)", + "type" : "batch" +} +``` + +Create a recording with a custom ID. + +``` +POST /kapacitor/v1/recordings/query +{ + "id" : "MY_RECORDING_ID", + "query" : "SELECT mean(usage_idle) FROM cpu WHERE time > now() - 1h GROUP BY time(10m)", + "type" : "batch" +} +``` + +#### Response + +All recordings are assigned an ID which is returned in this format with a link. + +```json +{ + "link" : {"rel": "self", "href": "/kapacitor/v1/recordings/e24db07d-1646-4bb3-a445-828f5049bea0"}, + "id" : "e24db07d-1646-4bb3-a445-828f5049bea0", + "type" : "stream", + "size" : 0, + "date" : "2006-01-02T15:04:05Z07:00", + "error" : "", + "status" : "running", + "progress" : 0 +} +``` + +| Code | Meaning | +| ---- | ------- | +| 201 | Success, the recording has started. | + +### Wait for Recording + +In order to determine when a recording has finished you must make a GET request to the returned link typically something like `/kapacitor/v1/recordings/RECORDING_ID`. + +A recording has these read only properties. + +| Property | Description | +| -------- | ----------- | +| size | Size of the recording on disk in bytes. | +| date | Date the recording finished. | +| error | Any error encountered when creating the recording. | +| status | One of `recording` or `finished`. | +| progress | Number between 0 and 1 indicating the approximate progress of the recording. | + + +#### Example + +``` +GET /kapacitor/v1/recordings/e24db07d-1646-4bb3-a445-828f5049bea0 +``` + +```json +{ + "link" : {"rel": "self", "href": "/kapacitor/v1/recordings/e24db07d-1646-4bb3-a445-828f5049bea0"}, + "id" : "e24db07d-1646-4bb3-a445-828f5049bea0", + "type" : "stream", + "size" : 1980353, + "date" : "2006-01-02T15:04:05Z07:00", + "error" : "", + "status" : "running", + "progress" : 0.75 +} +``` + +Once the recording is complete. + +``` +GET /kapacitor/v1/recordings/e24db07d-1646-4bb3-a445-828f5049bea0 +``` + +```json +{ + "link" : {"rel": "self", "href": "/kapacitor/v1/recordings/e24db07d-1646-4bb3-a445-828f5049bea0"}, + "id" : "e24db07d-1646-4bb3-a445-828f5049bea0", + "type" : "stream", + "size" : 1980353, + "date" : "2006-01-02T15:04:05Z07:00", + "error" : "", + "status" : "finished", + "progress" : 1 +} +``` + +Or if the recording fails. + +``` +GET /kapacitor/v1/recordings/e24db07d-1646-4bb3-a445-828f5049bea0 +``` + +```json +{ + "link" : {"rel": "self", "href": "/kapacitor/v1/recordings/e24db07d-1646-4bb3-a445-828f5049bea0"}, + "id" : "e24db07d-1646-4bb3-a445-828f5049bea0", + "type" : "stream", + "size" : 1980353, + "date" : "2006-01-02T15:04:05Z07:00", + "error" : "error message explaining failure", + "status" : "failed", + "progress" : 1 +} +``` + +#### Response + +| Code | Meaning | +| ---- | ------- | +| 200 | Success, the recording is no longer running. | +| 202 | Success, the recording exists but is not finished. | +| 404 | No such recording exists. | + +### Deleting recordings + +To delete a recording make a DELETE request to the `/kapacitor/v1/recordings/RECORDING_ID` endpoint. + +``` +DELETE /kapacitor/v1/recordings/RECORDING_ID +``` + +#### Response + +| Code | Meaning | +| ---- | ------- | +| 204 | Success | + +>NOTE: Deleting a non-existent recording is not an error and will return a 204 success. + +### Listing recordings + +To list all recordings make a GET request to the `/kapacitor/v1/recordings` endpoint. +Recordings are sorted by date. + +| Query Parameter | Default | Purpose | +| --------------- | ------- | ------- | +| pattern | | Filter results based on the pattern. Uses standard shell glob matching, see [this](https://golang.org/pkg/path/filepath/#Match) for more details. | +| fields | | List of fields to return. If empty returns all fields. Fields `id` and `link` are always returned. | +| offset | 0 | Offset count for paginating through tasks. | +| limit | 100 | Maximum number of tasks to return. | + +#### Example + +``` +GET /kapacitor/v1/recordings +``` + +```json +{ + "recordings" : [ + { + "link" : {"rel": "self", "href": "/kapacitor/v1/recordings/e24db07d-1646-4bb3-a445-828f5049bea0"}, + "id" : "e24db07d-1646-4bb3-a445-828f5049bea0", + "type" : "stream", + "size" : 1980353, + "date" : "2006-01-02T15:04:05Z07:00", + "error" : "", + "status" : "finished", + "progress" : 1 + }, + { + "link" : {"rel": "self", "href": "/kapacitor/v1/recordings/8a4c06c6-30fb-42f4-ac4a-808aa31278f6"}, + "id" : "8a4c06c6-30fb-42f4-ac4a-808aa31278f6", + "type" : "batch", + "size" : 216819562, + "date" : "2006-01-02T15:04:05Z07:00", + "error" : "", + "status" : "finished", + "progress" : 1 + } + ] +} +``` + +#### Response + +| Code | Meaning | +| ---- | ------- | +| 200 | Success | + + +## Replays + +### Replaying a recording + +To replay a recording make a POST request to `/kapacitor/v1/replays/` + +| Parameter | Default | Purpose | +| ---------- | ------- | ------- | +| id | random | Unique identifier for the replay. If empty a random ID is chosen. | +| task | | ID of task. | +| recording | | ID of recording. | +| recording-time | false | If true, use the times in the recording, otherwise adjust times relative to the current time. | +| clock | fast | One of `fast` or `real`. If `real` wait for real time to pass corresponding with the time in the recordings. If `fast` replay data without delay. For example, if clock is `real` then a stream recording of duration 5m will take 5m to replay. | + +#### Example + +Replay a recording using default parameters. + +``` +POST /kapacitor/v1/replays/ +{ + "task" : "TASK_ID", + "recording" : "RECORDING_ID" +} +``` + +Replay a recording in real-time mode and preserve recording times. + +``` +POST /kapacitor/v1/replays/ +{ + "task" : "TASK_ID", + "recording" : "RECORDING_ID", + "clock" : "real", + "recording-time" : true, +} +``` + +Replay a recording using a custom ID. + +``` +POST /kapacitor/v1/replays/ +{ + "id" : "MY_REPLAY_ID", + "task" : "TASK_ID", + "recording" : "RECORDING_ID" +} +``` + +#### Response + +The request returns once the replay is started and provides a replay ID and link. + +```json +{ + "link" : {"rel": "self", "href": "/kapacitor/v1/replays/ad95677b-096b-40c8-82a8-912706f41d4c"}, + "id" : "ad95677b-096b-40c8-82a8-912706f41d4c", + "task" : "TASK_ID", + "recording" : "RECORDING_ID", + "clock" : "fast", + "recording-time" : false, + "status" : "running", + "progress" : 0, + "error" : "", + "stats": {}, +} +``` + +| Code | Meaning | +| ---- | ------- | +| 201 | Success, replay has started. | + +### Replaying data without recording + +It is also possible to replay data directly without recording it first. +This is done by issuing a request similar to either a `batch` or `query` recording +but instead of storing the data it is immediately replayed against a task. +Using a `stream` recording for immediately replaying against a task is equivalent to enabling the task +and so is not supported. + +| Method | Description | +| ------ | ----------- | +| batch | Replay the results of the queries in a batch task. | +| query | Replay the results of an explicit query. | + + +##### Batch + +| Parameter | Default | Purpose | +| --------- | ------- | ------- | +| id | random | Unique identifier for the replay. If empty a random one will be chosen. | +| task | | ID of a task, replays the results of the queries defined in the task against the task. | +| start | | Earliest date for which data will be replayed. RFC3339Nano formatted. | +| stop | now | Latest date for which data will be replayed. If not specified uses the current time. RFC3339Nano formatted data. | +| recording-time | false | If true, use the times in the recording, otherwise adjust times relative to the current time. | +| clock | fast | One of `fast` or `real`. If `real` wait for real time to pass corresponding with the time in the recordings. If `fast` replay data without delay. For example, if clock is `real` then a stream recording of duration 5m will take 5m to replay. | + +##### Query + +| Parameter | Default | Purpose | +| --------- | ------- | ------- | +| id | random | Unique identifier for the replay. If empty a random one will be chosen. | +| task | | ID of a task, replays the results of the queries against the task. | +| query | | Query to execute. | +| cluster | | Name of a configured InfluxDB cluster. If empty uses the default cluster. | +| recording-time | false | If true, use the times in the recording, otherwise adjust times relative to the current time. | +| clock | fast | One of `fast` or `real`. If `real` wait for real time to pass corresponding with the time in the recordings. If `fast` replay data without delay. For example, if clock is `real` then a stream recording of duration 5m will take 5m to replay. | + +#### Example + +Perform a replay using the `batch` method specifying a start time. + +``` +POST /kapacitor/v1/replays/batch +{ + "task" : "TASK_ID", + "start" : "2006-01-02T15:04:05Z07:00" +} +``` + +Replay the results of the query against the task. + +``` +POST /kapacitor/v1/replays/query +{ + "task" : "TASK_ID", + "query" : "SELECT mean(usage_idle) FROM cpu WHERE time > now() - 1h GROUP BY time(10m)", +} +``` + +Create a replay with a custom ID. + +``` +POST /kapacitor/v1/replays/query +{ + "id" : "MY_REPLAY_ID", + "task" : "TASK_ID", + "query" : "SELECT mean(usage_idle) FROM cpu WHERE time > now() - 1h GROUP BY time(10m)", +} +``` + +#### Response + +All replays are assigned an ID which is returned in this format with a link. + +```json +{ + "link" : {"rel": "self", "href": "/kapacitor/v1/replays/e24db07d-1646-4bb3-a445-828f5049bea0"}, + "id" : "e24db07d-1646-4bb3-a445-828f5049bea0", + "task" : "TASK_ID", + "recording" : "", + "clock" : "fast", + "recording-time" : false, + "status" : "running", + "progress" : 0.57, + "error" : "", + "stats": {} +} +``` + +>NOTE: For a replay created in this manner the `recording` ID will be empty since no recording was used or created. + + +| Code | Meaning | +| ---- | ------- | +| 201 | Success, the replay has started. | + + +### Waiting for replays + +Like recordings you make a GET request to the `/kapacitor/v1/replays/REPLAY_ID` endpoint to get the status of the replay. + +A replay has these read only properties in addition to the properties listed [above](#replaying-a-recording). + +| Property | Description | +| -------- | ----------- | +| status | One of `replaying` or `finished`. | +| progress | Number between 0 and 1 indicating the approximate progress of the replay. | +| error | Any error that occured while perfoming the replay | + + +#### Example + +Get the status of a replay. + +``` +GET /kapacitor/v1/replays/ad95677b-096b-40c8-82a8-912706f41d4c +``` + +```json +{ + "link" : {"rel": "self", "href": "/kapacitor/v1/replays/ad95677b-096b-40c8-82a8-912706f41d4c"}, + "id" : "ad95677b-096b-40c8-82a8-912706f41d4c", + "task" : "TASK_ID", + "recording" : "RECORDING_ID", + "clock" : "fast", + "recording-time" : false, + "status" : "running", + "progress" : 0.57, + "error" : "", + "stats": {} +} +``` + +Once the replay is complete. + +``` +GET /kapacitor/v1/replays/ad95677b-096b-40c8-82a8-912706f41d4c +``` + +```json +{ + "link" : {"rel": "self", "href": "/kapacitor/v1/replays/ad95677b-096b-40c8-82a8-912706f41d4c"}, + "id" : "ad95677b-096b-40c8-82a8-912706f41d4c", + "task" : "TASK_ID", + "recording" : "RECORDING_ID", + "clock" : "fast", + "recording-time" : false, + "status" : "finished", + "progress" : 1, + "error" : "", + "stats": { + "task-stats": { + "throughput": 0 + }, + "node-stats": { + "alert2": { + "alerts_triggered": 5, + "avg_exec_time_ns": 1267486, + "collected": 8, + "crits_triggered": 2, + "emitted": 0, + "errors": 0, + "infos_triggered": 0, + "oks_triggered": 1, + "warns_triggered": 2, + "working_cardinality": 1 + }, + "from1": { + "avg_exec_time_ns": 0, + "collected": 8, + "emitted": 8, + "errors": 0, + "working_cardinality": 0 + }, + "stream0": { + "avg_exec_time_ns": 0, + "collected": 8, + "emitted": 8, + "errors": 0, + "working_cardinality": 0 + } + } + } +} +``` + +If the replay has finished, the `stats` field contains the statistics about the replay. + +Or if the replay fails. + +``` +GET /kapacitor/v1/replays/ad95677b-096b-40c8-82a8-912706f41d4c +``` + +```json +{ + "link" : {"rel": "self", "href": "/kapacitor/v1/replays/ad95677b-096b-40c8-82a8-912706f41d4c"}, + "id" : "ad95677b-096b-40c8-82a8-912706f41d4c", + "task" : "TASK_ID", + "recording" : "RECORDING_ID", + "clock" : "fast", + "recording-time" : false, + "status" : "failed", + "progress" : 1, + "error" : "error message explaining failure", + "stats": {} +} +``` + +#### Response + +| Code | Meaning | +| ---- | ------- | +| 200 | Success, replay is no longer running. | +| 202 | Success, the replay exists but is not finished. | +| 404 | No such replay exists. | + +### Delete Replay + +To delete a replay make a DELETE request to the `/kapacitor/v1/replays/REPLAY_ID` endpoint. + +``` +DELETE /kapacitor/v1/replays/REPLAY_ID +``` + +#### Response + +| Code | Meaning | +| ---- | ------- | +| 204 | Success | + +>NOTE: Deleting a non-existent replay is not an error and will return a 204 success. + + +### Listing replays + +You can list replays for a given recording by making a GET request to `/kapacitor/v1/replays`. + +| Query Parameter | Default | Purpose | +| --------------- | ------- | ------- | +| pattern | | Filter results based on the pattern. Uses standard shell glob matching, see [this](https://golang.org/pkg/path/filepath/#Match) for more details. | +| fields | | List of fields to return. If empty returns all fields. Fields `id` and `link` are always returned. | +| offset | 0 | Offset count for paginating through tasks. | +| limit | 100 | Maximum number of tasks to return. | + +#### Example + +``` +GET /kapacitor/v1/replays +``` + +```json +{ + "replays": [ + { + "link" : {"rel": "self", "href": "/kapacitor/v1/replays/ad95677b-096b-40c8-82a8-912706f41d4c"}, + "id" : "ad95677b-096b-40c8-82a8-912706f41d4c", + "task" : "TASK_ID", + "recording" : "RECORDING_ID", + "clock" : "fast", + "recording-time" : false, + "status" : "finished", + "progress" : 1, + "error" : "" + }, + { + "link" : {"rel": "self", "href": "/kapacitor/v1/replays/be33f0a1-0272-4019-8662-c730706dac7d"}, + "id" : "be33f0a1-0272-4019-8662-c730706dac7d", + "task" : "TASK_ID", + "recording" : "RECORDING_ID", + "clock" : "fast", + "recording-time" : false, + "status" : "finished", + "progress" : 1, + "error" : "" + } + ] +} +``` + +## Alerts + +Kapacitor can generate and handle alerts. +The API allows you to see the current state of any alert and to configure various handlers for the alerts. + +### Topics + +Alerts are grouped into topics. +An alert handler "listens" on a topic for any new events. +You can either specify the alert topic in the TICKscript or one will be generated for you. + +### Creating and Removing Topics + +Topics are created dynamically when they referenced in TICKscripts or in handlers. +To delete a topic make a `DELETE` request to `/kapacitor/v1/alerts/topics/`. +This will delete all known events and state for the topic. + +>NOTE: Since topics are dynamically created, a topic may return after having deleted it, if a new event is created for the topic. + + +#### Example + +``` +DELETE /kapacitor/v1/alerts/topics/system +``` + +### Listing topics + +To query the list of available topics make a GET requests to `/kapacitor/v1/alerts/topics`. + +| Query Parameter | Default | Purpose | +| --------------- | ------- | ------- | +| min-level | OK | Only return topics that are greater or equal to the min-level. Valid values include OK, INFO, WARNING, CRITICAL. | +| pattern | * | Filter results based on the pattern. Uses standard shell glob matching on the topic ID, see [this](https://golang.org/pkg/path/filepath/#Match) for more details. | + + +#### Example + +Get all topics. + +``` +GET /kapacitor/v1/alerts/topics +``` + +``` +{ + "link": {"rel":"self","href":"/kapacitor/v1/alerts/topics"}, + "topics": [ + { + "link": {"rel":"self","href":"/kapacitor/v/alerts/topics/system"}, + "events-link" : {"rel":"events","href":"/kapacitor/v1/alerts/topics/system/events"}, + "handlers-link": {"rel":"handlers","href":"/kapacitor/v1/alerts/topics/system/handlers"}, + "id": "system", + "level":"CRITICAL" + }, + { + "link": {"rel":"self","href":"/kapacitor/v1/alerts/topics/app"}, + "events-link" : {"rel":"events","href":"/kapacitor/v1/alerts/topics/app/events"}, + "handlers-link": {"rel":"handlers","href":"/kapacitor/v1/alerts/topics/app/handlers"}, + "id": "app", + "level":"OK" + } + ] +} +``` + +Get all topics in a WARNING or CRITICAL state. + + +``` +GET /kapacitor/v1/alerts/topics?min-level=WARNING +``` + +``` +{ + "link": {"rel":"self","href":"/kapacitor/v1/alerts/topics"}, + "topics": [ + { + "link": {"rel":"self","href":"/kapacitor/v1/alerts/topics/system"}, + "events-link" : {"rel":"events","href":"/kapacitor/v1/alerts/topics/system/events"}, + "handlers-link": {"rel":"handlers","href":"/kapacitor/v1/alerts/topics/system/handlers"}, + "id": "system", + "level":"CRITICAL" + } + ] +} +``` + +### Topic state + +To query the state of a topic make a GET request to `/kapacitor/v1/alerts/topics/`. + +#### Example + +``` +GET /kapacitor/v1/alerts/topics/system +``` + +``` +{ + "link": {"rel":"self","href":"/kapacitor/v1/alerts/topics/system"}, + "id": "system", + "level":"CRITICAL" + "events-link" : {"rel":"events","href":"/kapacitor/v1/alerts/topics/system/events"}, + "handlers-link": {"rel":"handlers","href":"/kapacitor/v1/alerts/topics/system/handlers"}, +} +``` + +### Listing topic events + +To query all the events within a topic make a GET request to `/kapacitor/v1/alerts/topics//events`. + +| Query Parameter | Default | Purpose | +| --------------- | ------- | ------- | +| min-level | OK | Only return events that are greater or equal to the min-level. Valid values include OK, INFO, WARNING, CRITICAL. | + +#### Example + +``` +GET /kapacitor/v1/alerts/topics/system/events +``` + +``` +{ + "link": {"rel":"self","href":"/kapacitor/v1/alerts/topics/system/events"}, + "topic": "system", + "events": [ + { + "link":{"rel":"self","href":"/kapacitor/v1/alerts/topics/system/events/cpu"}, + "id": "cpu", + "state": { + "level": "WARNING", + "message": "cpu is WARNING", + "time": "2016-12-01T00:00:00Z", + "duration": "5m" + } + }, + { + "link":{"rel":"self","href":"/kapacitor/v1/alerts/topics/system/events/mem"}, + "id": "mem", + "state": { + "level": "CRITICAL", + "message": "mem is CRITICAL", + "time": "2016-12-01T00:10:00Z", + "duration": "1m" + } + } + ] +} +``` + +### Topic events + +You can query a specific event within a topic by making a GET request to `/kapacitor/v1/alerts/topics//events/`. + +#### Example + +``` +GET /kapacitor/v1/alerts/topics/system/events/cpu +``` + +``` +{ + "link":{"rel":"self","href":"/kapacitor/v1/alerts/topics/system/events/cpu"}, + "id": "cpu", + "state": { + "level": "WARNING", + "message": "cpu is WARNING", + "time": "2016-12-01T00:00:00Z", + "duration": "5m" + } +} +``` + +### Listing topic handlers + +Handlers are created within a topic. +You can get a list of handlers configured for a topic by making a GET request to `/kapacitor/v1/alerts/topics//handlers`. + +| Query Parameter | Default | Purpose | +| --------------- | ------- | ------- | +| pattern | * | Filter results based on the pattern. Uses standard shell glob matching on the service name, see [this](https://golang.org/pkg/path/filepath/#Match) for more details. | + +>NOTE: Anonymous handlers (created automatically from TICKscripts) will not be listed under their associated anonymous topic as they are not configured via the API. + +#### Example + + +Get the handlers for the `system` topic. + +``` +GET /kapacitor/v1/alerts/topics/system/handlers +``` + +``` +{ + "link":{"rel":"self","href":"/kapacitor/v1/alerts/topics/system/handlers"}, + "topic": "system", + "handlers": [ + { + "link":{"rel":"self","href":"/kapacitor/v1/alerts/topics/system/handlers/slack"}, + "id":"slack", + "kind":"slack", + "options":{ + "channel":"#alerts" + }, + }, + { + "link":{"rel":"self","href":"/kapacitor/v1/alerts/topics/system/handlers/smtp"}, + "id":"smtp", + "kind":"smtp" + } + ] +} +``` + +This `main:alert_cpu:alert5` topic represents an auto-generated topic from a task that has defined handlers explicitly in the TICKscript. +Anonymous handlers cannot be listed or modified via the API. + +``` +GET /kapacitor/v1/alerts/topics/main:alert_cpu:alert5/handlers +``` + +``` +{ + "link":{"rel":"self","href":"/kapacitor/v1/alerts/topics/system/handlers"}, + "topic": "main:alert_cpu:alert5", + "handlers": null +} +``` + +### Getting handlers + +To query information about a specific handler make a GET request to `/kapacitor/v1/alerts/topics//handlers/`. + +#### Example + +``` +GET /kapacitor/v1/alerts/topics/system/handlers/slack +``` + +``` +{ + "link": { + "rel": "self", + "href": "/kapacitor/v1/alerts/topics/system/handlers/slack" + }, + "id": "slack", + "kind": "slack", + "options": { + "channel": "#alerts" + }, + "match": "" +} +``` + +### Creating handlers + +To create a new handler make a POST request to `/kapacitor/v1/alerts/topics/system/handlers`. + +``` +POST /kapacitor/v1/alerts/topics/system/handlers +{ + "id":"slack", + "kind":"slack", + "options": { + "channel":"#alerts" + } +} +``` + +``` +{ + "link": { + "rel": "self", + "href": "/kapacitor/v1/alerts/topics/system/handlers/slack" + }, + "id": "slack", + "kind": "slack", + "options": { + "channel": "#alerts" + }, + "match": "" +} +``` + +### Updating handlers + +To update an existing handler you can either make a PUT or PATCH request to `/kapacitor/v1/alerts/topics/system/handlers/`. + +Using PUT will replace the entire handler, by using PATCH specific parts of the handler can be modified. + +PATCH will apply JSON patch object to the existing handler, see [rfc6902](https://tools.ietf.org/html/rfc6902) for more details. + +#### Example + +Update the topics and actions for a handler using the PATCH method. + +``` +PATCH /kapacitor/v1/alerts/topics/system/handlers/slack +[ + {"op":"replace", "path":"/topics", "value":["system", "test"]}, + {"op":"replace", "path":"/options/channel", "value":"#testing_alerts"} +] +``` + +``` +{ + "link": { + "rel": "self", + "href": "/kapacitor/v1/alerts/topics/system/handlers/slack" + }, + "id": "slack", + "kind": "slack", + "options": { + "channel": "#testing_alerts" + }, + "match": "" +} +``` + +Replace an entire handler using the PUT method. + +``` +PUT /kapacitor/v1/alerts/topics/system/handlers/slack +{ + "id": "slack", + "kind":"slack", + "options": { + "channel":"#testing_alerts" + } +} +``` + +``` +{ + "link": { + "rel": "self", + "href": "/kapacitor/v1/alerts/topics/system/handlers/slack" + }, + "id": "slack", + "kind": "slack", + "options": { + "channel": "#testing_alerts" + }, + "match": "" +} +``` + +### Removing handlers + +To remove an existing handler make a DELETE request to `/kapacitor/v1/alerts/topics/system/handlers/`. + +``` +DELETE /kapacitor/v1/alerts/topics/system/handlers/ +``` + + +## Overriding configurations + +You can set configuration overrides via the API for certain sections of the config. +The overrides set via the API always take precedent over what may exist in the configuration file. +The sections available for overriding include the InfluxDB clusters and the alert handler sections. + + +The intent of the API is to allow for dynamic configuration of sensitive credentials without requiring that the Kapacitor process be restarted. +As such, it is recommended to use either the configuration file or the API to manage these configuration sections, but not both. +This will help to eliminate any confusion that may arise as to the source of a given configuration option. + +### Enabling and disabling configuration overrides + +By default the ability to override the configuration is enabled. +If you do not wish to enable this feature it can be disabled via the `config-override` configuration section. + +``` +[config-override] + enabled = false +``` + +If the `config-override` service is disabled then the relevant API endpoints will return 403 forbidden errors. + +### Recovering from bad configurations + +If somehow you have created a configuration that causes Kapacitor to crash or otherwise not function, +you can disable applying overrides during startup with the `skip-config-overrides` top level configuration option. + +``` +# This configuration option is only a safe guard and should not be needed in practice. +skip-config-overrides = true +``` + +This allows you to still access the API to fix any unwanted configuration without applying that configuration during startup. + +>NOTE: It is probably easiest and safest to set this option as an environment variable `KAPACITOR_SKIP_CONFIG_OVERRIDES=true`, since it is meant to be temporary. +That way you do not have to modify your on disk configuration file or accidentally leave it in place causing issues later on. + +### Overview + +The paths for the configuration API endpoints are as follows: + +`/kapacitor/v1/config/
/[]` + +Example: + +``` +/kapacitor/v1/config/smtp/ +/kapacitor/v1/config/influxdb/localhost +/kapacitor/v1/config/influxdb/remote +``` + +The optional `element name` path element corresponds to a specific item from a list of entries. + +For example the above paths correspond to the following configuration sections: + +``` +[smtp] + # SMTP configuration here + +[[influxdb]] + name = "localhost" + # InfluxDB configuration here for the "localhost" cluster + +[[influxdb]] + name = "remote" + # InfluxDB configuration here for the "remote" cluster +``` + + +### Retrieving the current configuration + +To retrieve the current configuration perform a GET request to the desired path. +The returned configuration will be the merged values from the configuration file and what has been stored in the overrides. +The returned content will be JSON encoded version of the configuration objects. + +All sensitive information will not be returned in the request body. +Instead a Boolean value will be in its place indicating whether the value is empty or not. +A list of which options are redacted is returned for each element. + +#### Example + +Retrieve all the configuration sections which can be overridden. + +``` +GET /kapacitor/v1/config +``` + +```json +{ + "link" : {"rel": "self", "href": "/kapacitor/v1/config"}, + "sections": { + "influxdb": { + "link" : {"rel": "self", "href": "/kapacitor/v1/config/influxdb"}, + "elements": [ + { + "link" : {"rel": "self", "href": "/kapacitor/v1/config/influxdb/localhost"}, + "options": { + "name": "localhost", + "urls": ["http://localhost:8086"], + "default": true, + "username": "", + "password": false + }, + "redacted" : [ + "password" + ] + }, + { + "link" : {"rel": "self", "href": "/kapacitor/v1/config/influxdb/remote"}, + "options": { + "name": "remote", + "urls": ["http://influxdb.example.com:8086"], + "default": false, + "username": "jim", + "password": true + }, + "redacted" : [ + "password" + ] + } + ] + }, + "smtp": { + "link" : {"rel": "self", "href": "/kapacitor/v1/config/smtp"}, + "elements": [{ + "link" : {"rel": "self", "href": "/kapacitor/v1/config/smtp/"}, + "options": { + "enabled": true, + "host": "smtp.example.com", + "port": 587, + "username": "bob", + "password": true, + "no-verify": false, + "global": false, + "to": [ "oncall@example.com"], + "from": "kapacitor@example.com", + "idle-timeout": "30s" + }, + "redacted" : [ + "password" + ] + }] + } + } +} +``` + + +Retrieve only the SMTP section. + +``` +GET /kapacitor/v1/config/smtp +``` + +```json +{ + "link" : {"rel": "self", "href": "/kapacitor/v1/config/smtp"}, + "elements": [{ + "link" : {"rel": "self", "href": "/kapacitor/v1/config/smtp/"}, + "options": { + "enabled": true, + "host": "smtp.example.com", + "port": 587, + "username": "bob", + "password": true, + "no-verify": false, + "global": false, + "to": ["oncall@example.com"], + "from": "kapacitor@example.com", + "idle-timeout": "30s" + }, + "redacted" : [ + "password" + ] + }] +} +``` + +Retrieve the single element from the SMTP section. + +``` +GET /kapacitor/v1/config/smtp/ +``` + +```json +{ + "link" : {"rel": "self", "href": "/kapacitor/v1/config/smtp/"}, + "options": { + "enabled": true, + "host": "smtp.example.com", + "port": 587, + "username": "bob", + "password": true, + "no-verify": false, + "global": false, + "to": ["oncall@example.com"], + "from": "kapacitor@example.com", + "idle-timeout": "30s" + }, + "redacted" : [ + "password" + ] +} +``` + +>NOTE: Sections that are not lists can be treated as having an empty string for their element name. + +Retrieve only the InfluxDB section. + +``` +GET /kapacitor/v1/config/influxdb +``` + +```json +{ + "link" : {"rel": "self", "href": "/kapacitor/v1/config/influxdb"}, + "elements" : [ + { + "link" : {"rel": "self", "href": "/kapacitor/v1/config/influxdb/localhost"}, + "options": { + "name": "localhost", + "urls": ["http://localhost:8086"], + "default": true, + "username": "", + "password": false + }, + "redacted" : [ + "password" + ] + }, + { + "link" : {"rel": "self", "href": "/kapacitor/v1/config/influxdb/remote"}, + "options": { + "name": "remote", + "urls": ["http://influxdb.example.com:8086"], + "default": false, + "username": "jim", + "password": true + }, + "redacted" : [ + "password" + ] + } + ] +} +``` + +Retrieve only the `remote` element of the InfluxDB section. + +``` +GET /kapacitor/v1/config/influxdb/remote +``` + +```json +{ + "link" : {"rel": "self", "href": "/kapacitor/v1/config/influxdb/remote"}, + "options": { + "name": "remote", + "urls": ["http://influxdb.example.com:8086"], + "default": false, + "username": "jim", + "password": true + }, + "redacted" : [ + "password" + ] +} +``` + +>NOTE: The password value is not returned, but the `true` value indicates that a non empty password has been set. + +#### Response + +| Code | Meaning | +| ---- | ------- | +| 200 | Success | +| 403 | Config override service not enabled | + +### Overriding configuration values + +To override a value in the configuration make a POST request to the desired path. +The request should contain a JSON object describing what should be modified. + +Use the following top level actions: + +| Key | Description | +| --- | ----------- | +| set | Set the value in the configuration overrides. | +| delete | Delete the value from the configuration overrides. | +| add | Add a new element to a list configuration section. | +| remove | Remove a previously added element from a list configuration section. | + +Configuration options not specified in the request will be left unmodified. + +#### Example + +To disable the SMTP alert handler: + +``` +POST /kapacitor/v1/config/smtp/ +{ + "set":{ + "enabled": false + } +} +``` + +To delete the override for the SMTP alert handler: + +``` +POST /kapacitor/v1/config/smtp/ +{ + "delete":[ + "enabled" + ] +} +``` + +Actions can be combined in a single request. +Enable the SMTP handler, set its host and remove the port override. + +``` +POST /kapacitor/v1/config/smtp/ +{ + "set":{ + "enabled": true, + "host": "smtp.example.com" + }, + "delete":[ + "port" + ] +} +``` + +Add a new InfluxDB cluster: + +``` +POST /kapacitor/v1/config/influxdb +{ + "add":{ + "name": "example", + "urls": ["https://influxdb.example.com:8086"], + "default": true, + "disable-subscriptions": true + } +} +``` + +Remove an existing InfluxDB cluster override: + +``` +POST /kapacitor/v1/config/influxdb +{ + "remove":[ + "example" + ] +} +``` + +>NOTE: Only the overrides can be removed, this means that InfluxDB clusters that exist in the configuration cannot be removed. + +Modify an existing InfluxDB cluster: + +``` +POST /kapacitor/v1/config/influxdb/remote +{ + "set":{ + "disable-subscriptions": false, + }, + "delete": [ + "default" + ] +} +``` + +#### Response + +| Code | Meaning | +| ---- | ------- | +| 200 | Success | +| 403 | Config override service not enabled | +| 404 | The specified configuration section/option does not exist | + +## Storage + +Kapacitor exposes some operations that can be performed on the underlying storage. + +>WARNING: Everything storage operation is directly manipulating the underlying storage database. +Always make a backup of the database before performing any of these operations. + +### Backing up the Storage + +Making a GET request to `/kapacitor/v1/storage/backup` will return a dump of the Kapacitor database. +To restore from a backup replace the `kapacitor.db` file with the contents of the backup request. + +``` +# Create a backup. +curl http://localhost:9092/kapacitor/v1/storage/backup > kapacitor.db +``` + +``` +# Restore a backup. +# The destination path is dependent on your configuration. +cp kapacitor.db ~/.kapacitor/kapacitor.db +``` + +### Stores + +Kapacitor's underlying storage system is organized into different stores. +Various actions can be performed on each individual store. + +>WARNING: Everything storage operation is directly manipulating the underlying storage database. +Always make a backup of the database before performing any of these operations. + +Available actions: + +| Action | Description | +| ------ | ----------- | +| rebuild | Rebuild all indexes in a store, this operation can be very expensive. | + +To perform an action make a POST request to the `/kapacitor/v1/storage/stores/` + +#### Example + +``` +POST /kapacitor/v1/storage/stores/tasks +{ + "action" : "rebuild" +} +``` + +#### Response + +| Code | Meaning | +| ---- | ------- | +| 204 | Success | +| 400 | Unknown action | +| 404 | The specified store does not exist | + +## Logging + +The logging API is being release under [Technical Preview](#technical-preview). +Kapacitor allows users to retrieve the Kapacitor logs remotely using HTTP +[Chunked Transfer Encoding](https://en.wikipedia.org/wiki/Chunked_transfer_encoding). +The logs may be queried using key-value pairs corresponding to the log entry. +These key-value are specified as query parameter. + +The logging API will return logs in two formats: + +* [logfmt](https://brandur.org/logfmt) +* JSON + +To receive logs in JSON format, specify `Content-Type: application/json`. +If Kapacitor receives any content type other than `application/json`, logs will be returned in logfmt format. + +Each chunk returned to the client will contain a single complete log, followed by a `\n`. + +### Example + +#### Logs as JSON +``` +GET /kapacitor/v1preview/logs?task=mytask +Content-Type: application/json +``` +returns the following + +``` +{"ts":"2017-11-08T17:40:47.183-05:00","lvl":"info","msg":"created log session","service":"sessions","id":"7021fb9d-467e-482f-870c-d811aa9e74b7","content-type":"application/json","tags":"nil"} +{"ts":"2017-11-08T17:40:47.183-05:00","lvl":"info","msg":"created log session","service":"sessions","id":"7021fb9d-467e-482f-870c-d811aa9e74b7","content-type":"application/json","tags":"nil"} +{"ts":"2017-11-08T17:40:47.183-05:00","lvl":"info","msg":"created log session","service":"sessions","id":"7021fb9d-467e-482f-870c-d811aa9e74b7","content-type":"application/json","tags":"nil"} +``` + +#### Logs as logfmt + +``` +GET /kapacitor/v1preview/logs?task=mytask +``` + +returns the following + +``` +ts=2017-11-08T17:42:47.014-05:00 lvl=info msg="created log session" service=sessions id=ce4d7819-1e38-4bf4-ba54-78b0a8769b7e content-type= +ts=2017-11-08T17:42:47.014-05:00 lvl=info msg="created log session" service=sessions id=ce4d7819-1e38-4bf4-ba54-78b0a8769b7e content-type= +ts=2017-11-08T17:42:47.014-05:00 lvl=info msg="created log session" service=sessions id=ce4d7819-1e38-4bf4-ba54-78b0a8769b7e content-type= +``` + + +## Testing services + +Kapacitor makes use of various service integrations. +The following API endpoints provide way for a user to run simple tests to ensure that a service is configured correctly. + +### Listing testable services + +A list of services that can be tested is available at the `/kapacitor/v1/service-tests` endpoint + +| Query Parameter | Default | Purpose | +| --------------- | ------- | ------- | +| pattern | * | Filter results based on the pattern. Uses standard shell glob matching on the service name, see [this](https://golang.org/pkg/path/filepath/#Match) for more details. | + + +#### Example + +``` +GET /kapacitor/v1/service-tests +``` + +``` +{ + "link": {"rel":"self", "href": "/kapacitor/v1/service-tests"}, + "services" : [ + { + "link": {"rel":"self", "href": "/kapacitor/v1/service-tests/influxdb"}, + "name": "influxdb", + "options": { + "cluster": "" + } + }, + { + "link": {"rel":"self", "href": "/kapacitor/v1/service-tests/slack"}, + "name": "slack", + "options": { + "message": "test slack message", + "channel": "#alerts", + "level": "CRITICAL" + } + }, + { + "link": {"rel":"self", "href": "/kapacitor/v1/service-tests/smtp"}, + "name": "smtp", + "options": { + "to": ["user@example.com"], + "subject": "test subject", + "body": "test body" + } + } + ] +} +``` + +#### Response + +| Code | Meaning | +| ---- | ------- | +| 200 | Success | + + +### Testing a service + +To test a service, make a POST request to the `/kapacitor/v1/service-tests/` endpoint. +The contents of the POST body depend on the service in the test. +To determine the available options use a GET request to the same endpoint. +The returned options are also the defaults. + +#### Example + +To see the available and default options for the Slack service, run the following request. + +``` +GET /kapacitor/v1/service-tests/slack +``` + +```json +{ + "link": {"rel":"self", "href": "/kapacitor/v1/service-tests/slack"}, + "name": "slack" + "options": { + "message": "test slack message", + "channel": "#alerts", + "level": "CRITICAL" + } +} +``` + +Test the Slack service integration using custom options: + +``` +POST /kapacitor/v1/service-tests/slack +{ + "message": "my custom test message", + "channel": "@user", + "level": "OK" +} +``` +A successful response looks like: + +```json +{ + "success": true, + "message": "" +} +``` + +A failed response looks like: + +```json +{ + "success": false, + "message": "could not connect to slack" +} +``` + + +#### Response + +| Code | Meaning | +| ---- | ------- | +| 200 | Success, even if the service under test fails a 200 is returned as the test complete correctly. | + + +## Miscellaneous + +### Ping + +You can 'ping' the Kapacitor server to validate you have a successful connection. +A ping request does nothing but respond with a 204. + +>NOTE: The Kapacitor server version is returned in the `X-Kapacitor-Version` HTTP header on all requests. +Ping is a useful request if you simply need the verify the version of server you are talking to. + +#### Example + +``` +GET /kapacitor/v1/ping +``` + +Response: +``` +| Code | Meaning | +| ---- | ------- | +| 204 | Success | +``` + +### Sideload reload + +You can trigger a reload of all sideload sources by making an HTTP POST request to `kapacitor/v1/sideload/reload`, with an empty body. + +#### Example + +``` +POST /kapacitor/v1/sideload/reload +``` +Response: + +``` +| Code | Meaning | +| ---- | ------- | +| 204 | Success | +``` + +### `/debug/vars` HTTP endpoint + +Kapacitor exposes statistics and information about its runtime through the `/debug/vars` endpoint, which can be accessed using the following cURL command: + +``` +curl http://localhost:9092/kapacitor/v1/debug/vars +``` +Server statistics and information are displayed in JSON format. + +>**Note:** You can use the [Telegraf Kapacitor input plugin](https://github.com/influxdata/telegraf/tree/release-1.7/plugins/inputs/kapacitor) to collect metrics (using the `/debug/vars` endpoint) from specified Kapacitor instances. For a list of the measurements and fields, see the plugin README. + + +### `/debug/pprof` HTTP endpoints + +Kapacitor supports the Go [net/http/pprof](https://golang.org/pkg/net/http/pprof/) endpoints, which can be useful for troubleshooting. The `pprof` package serves runtime profiling data in the format expected by the _pprof_ visualization tool. + + +``` +curl http://localhost:9092/kapacitor/v1/debug/pprof/ +``` + +The `/debug/pprof/` endpoint generates an HTML page with a list of built-in Go profiles and hyperlinks for each. + +| Profile | Description +| :---------------- | :-------------------- | +| block | Stack traces that led to blocking on synchronization primitives. | +| goroutine | Stack traces of all current goroutines. | +| heap | Sampling of stack traces for heap allocations. | +| mutex | Stack traces of holders of contended mutexes. | +| threadcreate | Stack traces that led to the creation of new OS threads. | + +To access one of the the `/debug/pprof/` profiles listed above, use the following cURL request, substituting `` with the name of the profile. The resulting profile is output to a file, specified for ``. + +``` +curl -o http://localhost:9092/kapacitor/v1/debug/pprof/ +``` + +In the following example, the cURL command outputs the resulting heap profile to the file specified in ``: + +``` +curl -o http://9092/kapacitor/v1/debug/pprof/heap +``` + +You can also use the [Go `pprof` interactive tool](https://github.com/google/pprof) to access the Kapacitor `/debug/pprof/` profiles. +For example, to look at the heap profile of a Kapacitor instance using this tool, you would use a command like this: + +``` +go tool pprof http://localhost:9092/kapacitor/v1/debug/pprof/heap +``` + +For more information about the Go `/net/http/pprof` package and the interactive `pprof` analysis and visualization tool, see: + +* [Package pprof (`net/http/pprof`)](https://golang.org/pkg/net/http/pprof/) +* [`pprof` analysis and visualization tool](https://github.com/google/pprof) +* [Profiling Go programs](https://blog.golang.org/profiling-go-programs) +* [Diagnostics - Profiling](https://golang.org/doc/diagnostics.html#profiling) + +### Routes + +Displays available routes for the API + +``` +GET /kapacitor/v1/:routes +``` diff --git a/content/kapacitor/v1.5/working/cli_client.md b/content/kapacitor/v1.5/working/cli_client.md new file mode 100644 index 000000000..1c754df1e --- /dev/null +++ b/content/kapacitor/v1.5/working/cli_client.md @@ -0,0 +1,1077 @@ +--- +title: Kapacitor command line client +description: The 'kapacitor' command line utility is used to manage Kapacitor servers and processes, providing access to server statistics as well as other management tasks. +menu: + kapacitor_1_5: + name: Kapacitor CLI + weight: 12 + parent: work-w-kapacitor +--- + +# Contents +* [General options](#general-options) +* [Core commands](#core-commands) +* [Server management](#server-management) + * [Services](#services) + * [Logging](#logging) +* [Data sampling](#data-sampling) +* [Topics and topic handlers](#topics-and-topic-handlers) +* [Tasks and task templates](#tasks-and-task-templates) + +## Overview + +Two key executables are packaged as a part of Kapacitor. The `kapacitord` daemon +runs the Kapacitor server, including its HTTP interface. The `kapacitor` command +line interface (CLI) leverages the HTTP interface and other resources, to provide +access to many Kapacitor features. + +A general introduction to the `kapacitor` client is presented in the +[Getting started with Kapacitor](/kapacitor/v1.5/introduction/getting-started/). + +When executed the client can take two options and one command followed by +arguments applicable to that command. + +```bash +kapacitor [options] [command] [args] +``` + +This document provides an overview of all the commands provided by the `kapacitor` CLI. +These include general options, core commands, server management, data sampling, +working with topics and topic handlers, and working with tasks and task templates. + +## General options + +By default the client attempts HTTP communication with the server running on +localhost at port 9092. The server can also be deployed with SSL enabled. Two +command line options make it possible to override the default communication +settings and to use the client against any Kapacitor server. + + +### `-url` +The `-url` option supplies an HTTP url string (`http(s)://host:port`) to the Kapacitor server. +When not set on the command line the value of the environment variable `KAPACITOR_URL` is used. +This can be used to run `kapacitor` commands on a remote Kapacitor server. + +> #### Include authentication credentials in the Kapacitor URL +> If authentication is enabled on InfluxDB and Kapacitor, include your InfluxDB +> username and password as query parameters, `u` and `p` respectively, in the Kapacitor URL. +> For both convenience and security, InfluxData recommends storing these credentials as +> part of the Kapacitor URL in the `KAPACITOR_URL` environment variable. +> +>```sh +export KAPACITOR_URL=https://192.168.67.88:9092?u=username&p=password + +# When KAPACITOR_URL is defined, the -url flag isn't necessary. +kapacitor list tasks +``` + +### `-skipVerify` +The `-skipVerify` option disables SSL verification. +This option should be used when connecting to a Kapacitor server secured using a self-signed SSL certificate. +When not set on the command line, the value of the environment variable `KAPACITOR_UNSAFE_SSL` is used. + +**Using command line options** + +``` +$ kapacitor -skipVerify -url https://192.168.67.88:9093 list tasks +ID Type Status Executing Databases and Retention Policies +batch_load_test batch enabled true ["telegraf"."autogen"] +chronograf-v1-b12b2554-cf38-4d7e-af24-5b0cd3cecc54 stream enabled true ["telegraf"."autogen"] +cpu_alert stream disabled false ["telegraf"."autogen"] +cpu_alert_topic stream disabled false ["telegraf"."autogen"] +top_scores stream disabled false ["game"."autogen"] +``` + +## Core commands + +Core commands are those most common in a command line application or are those +which are the most commonly used. + +### `help` +The `help` command brings up the help message. To get more detailed help on any command type `kapacitor help `. + +### `version` +The `version` command prints out the release version of the `kapacitor` client. + +### `list` +The `list` command can be used to print out lists of different Kapacitor artifacts. + +### `delete` +The `delete` command can be used to remove different Kapacitor artifacts. + +The commands `list` and `delete` are presented in more detail in the following sections. + +## Server management + +The `kapacitor` client can be used to investigate aspects of the server, +to backup its data and to work with logs. One planned feature will be the +ability to push task definitions to other servers. + +### `backup` +The `backup` command creates a backup of the Kapacitor database at a specified filepath. + +```bash +# Pattern +kapacitor backup [PATH_TO_BACKUP_FILE] + +# Example +kapacitor backup ~/bak/kapacitor-20180101.db +``` + +> This command will succeed silently. No status message is returned to the console. +> Errors such as insufficient permissions, or non-existent directories will be reported. +> To verify the results, check the file system. + +### `stats` +The `stats` command displays statistics about the Kapacitor server. +It requires either the `general` or `ingress` argument. + +```bash +# Pattern +kapacitor stats +``` + +#### `stats general` +Use `kapacitor stats general` to view values such as the server ID or hostname +and counts such as the number of tasks and subscriptions used by Kapacitor. + + +```bash +$ kapacitor stats general +ClusterID: ef3b3f9d-0997-4c0b-b1b6-5d0fb37fe509 +ServerID: 90582c9c-2e25-4654-903e-0acfc48fb5da +Host: localhost +Tasks: 8 +Enabled Tasks: 2 +Subscriptions: 12 +Version: 1.5.1~n201711280812 +``` + +#### `stats ingress` +Use the `kapacitor stats ingress` command to view InfluxDB measurements and the +number of data points that pass through the Kapacitor server. +This command can be used to ensure InfluxDB data is being written to Kapacitor. + +```bash +$ kapacitor stats ingress +Database Retention Policy Measurement Points Received +_internal monitor cq 5274 +_internal monitor database 52740 +_internal monitor httpd 5274 +_internal monitor queryExecutor 5274 +_internal monitor runtime 5274 +_internal monitor shard 300976 +_internal monitor subscriber 126576 +_internal monitor tsm1_cache 300976 +_internal monitor tsm1_engine 300976 +_internal monitor tsm1_filestore 300976 +_internal monitor tsm1_wal 300976 +_internal monitor write 5274 +_kapacitor autogen edges 26370 +_kapacitor autogen ingress 73817 +_kapacitor autogen kapacitor 2637 +_kapacitor autogen load 2637 +_kapacitor autogen nodes 23733 +_kapacitor autogen runtime 2637 +_kapacitor autogen topics 73836 +chronograf autogen alerts 1560 +telegraf autogen cpu 47502 +telegraf autogen disk 31676 +telegraf autogen diskio 52800 +telegraf autogen kernel 5280 +telegraf autogen mem 5280 +telegraf autogen processes 5280 +telegraf autogen swap 10560 +telegraf autogen system 15840 +``` + +### `vars` +The `vars` command displays a wide range of variables associated with the Kapacitor server. +Results are output in JSON format. + + +```bash +$ kapacitor vars +{"cluster_id": "39545771-7b64-4692-ab8f-1796c07f3314", +"cmdline": ["kapacitord"], +"host": "localhost", +"kapacitor": {"795eb8bd-00b5-4a78-9a10-6190546e0a08": {"name": "nodes", "tags": {"host": ... + +# Output example has been truncated +``` + +> To make the output more readable, pipe the command into a JSON formatter. +> ```bash +# Example using a Python JSON formatter +kapacitor vars | python -m json.tool +``` + +### `push` +_The `push` command is reserved for a planned feature which will allow tasks to +be pushed from one Kapacitor server to another._ + +## Services +Services are functional modules of the Kapacitor server that handle +communications with third-party applications, server configuration, and the +discovery and scraping of data. _For more information about services see the +[Configuration](/kapacitor/v1.5/administration/configuration/) documentation._ + +### `list service-tests` +The `list service-tests` lists all service tests currently available on the server. + +```bash +# Pattern +kapacitor list service-tests [ | ] + +# Example +kapacitor list service-tests +``` + +`PATTERN` can be a grep-like pattern. For example, to run tests of all services +beginning with the letter 'a' use the string 'a*'. + +> Depending on which terminal you're using, you may need to pass patterns as strings +> by wrapping them in quotes. For example: `"a*"`. + +_**Example list services-test output**_ +``` +Service Name +alerta +azure +consul +dns +ec2 +file-discovery +gce +hipchat +httppost +influxdb +kubernetes +marathon +mqtt +nerve +opsgenie +pagerduty +pushover +scraper +sensu +serverset +slack +smtp +snmptrap +static-discovery +swarm +talk +telegram +triton +victorops +``` + +### `service-tests` +The `service-tests` command executes one or more of the available service tests. + +```bash +kapacitor service-tests [ | ] +``` + +`PATTERN` can be a grep-like pattern. For example, to run tests of all services +beginning with the letter 'a' use the string 'a*'. + +> Depending on which terminal you're using, you may need to pass patterns as strings +> by wrapping them in quotes. For example: `"a*"`. + +_**Example of running service tests**_ +```bash +$ kapacitor service-tests slack talk smtp +Service Success Message +slack true +talk false service is not enabled +smtp false service is not enabled +``` + +> By combining the `list service-tests` and `service-tests` commands, it is possible +on a Linux system to test all services with the command: +>```bash +kapacitor list service-tests | xargs kapacitor service-tests +``` + +## Logging + +Kapacitor records a wealth of information about itself, its services and its tasks. +Information about configuring logging is available in the [Configuration](/kapacitor/v1.5/administration/configuration/#logging) +document. + +### `logs` +The `logs` command outputs the entire Kapacitor log stream or the log stream of a specific service. +Log streams can be filtered by log level. + +```bash +# Pattern +kapacitor logs [service=] [lvl=] +``` + +The value for `lvl` can be one of the following: + +1. `debug` +2. `info` +3. `error` + +By default this will return messages only for the selected level. +To view messages for the selected level and higher, add a `+` character to the +end of the string. + +_**Monitoring log messages of level DEBUG and above for the HTTP service**_ + +```bash +$ kapacitor logs service=http lvl=debug+ +ts=2018-01-15T10:47:10.017+01:00 lvl=info msg="http request" service=http host=127.0.0.1 username=- start=2018-01-15T10:47:10.014048161+01:00 method=POST uri=/write?consistency=&db=_internal&precision=ns&rp=monitor protocol=HTTP/1.1 status=204 referer=- user-agent=InfluxDBClient request-id=0e3c47c4-f9d9-11e7-85c5-000000000000 duration=3.234836ms +ts=2018-01-15T10:47:10.020+01:00 lvl=info msg="http request" service=http host=127.0.0.1 username=- start=2018-01-15T10:47:10.013091282+01:00 method=POST uri=/write?consistency=&db=_internal&precision=ns&rp=monitor protocol=HTTP/1.1 status=204 referer=- user-agent=InfluxDBClient request-id=0e3c2267-f9d9-11e7-85c4-000000000000 duration=7.555256ms +ts=2018-01-15T10:47:10.301+01:00 lvl=info msg="http request" service=http host=127.0.0.1 username=- start=2018-01-15T10:47:10.301315013+01:00 method=POST uri=/write?consistency=&db=telegraf&precision=ns&rp=autogen protocol=HTTP/1.1 status=204 referer=- user-agent=InfluxDBClient request-id=0e681d20-f9d9-11e7-85c7-000000000000 duration=306.967µs +ts=2018-01-15T10:47:10.301+01:00 lvl=info msg="http request" service=http host=127.0.0.1 username=- start=2018-01-15T10:47:10.301249656+01:00 method=POST uri=/write?consistency=&db=telegraf&precision=ns&rp=autogen protocol=HTTP/1.1 status=204 referer=- user-agent=InfluxDBClient request-id=0e681a95-f9d9-11e7-85c6-000000000000 duration=387.042µs +# ... +``` + +#### Tailing Kapacitor logs +To tail all Kapacitor logs, run the command without the `service` and `level` arguments. + + +```bash +$ kapacitor logs +ts=2018-01-15T10:54:07.884+01:00 lvl=info msg="created log session" service=sessions id=33a21e96-49d5-4891-aad8-0bc96099d148 content-type= +ts=2018-01-15T10:54:10.017+01:00 lvl=info msg="http request" service=http host=127.0.0.1 username=- start=2018-01-15T10:54:10.014885535+01:00 method=POST uri=/write?consistency=&db=_internal&precision=ns&rp=monitor protocol=HTTP/1.1 status=204 referer=- user-agent=InfluxDBClient request-id=0893527c-f9da-11e7-8672-000000000000 duration=2.870539ms +ts=2018-01-15T10:54:10.020+01:00 lvl=info msg="http request" service=http host=127.0.0.1 username=- start=2018-01-15T10:54:10.017509083+01:00 method=POST uri=/write?consistency=&db=_internal&precision=ns&rp=monitor protocol=HTTP/1.1 status=204 referer=- user-agent=InfluxDBClient request-id=0893b8f6-f9da-11e7-8673-000000000000 duration=2.920775ms +# ... +``` + +### `level` +The `level` command sets the log level for the Kapacitor log stream written to the log file from the Kapacitor server. +On Linux systems this file is located by default at `/var/log/kapacitor/kapacitor.log`. +The form it takes is as follows: + +``` +kapacitor level +``` + +The value for `LEVEL` can be one of the following: + +1. `debug` +1. `info` +1. `error` + +To see the command take effect, tail the log file (e.g., `$sudo tail -f -n 128 /var/log/kapacitor/kapacitor.log`) and then set the log level to error. + +#### Set Kapacitor log level to ERROR + +```bash +kapacitor level error +``` + +The stream to the Kapacitor log should appear to stop. To activate it again, +reset the log level to `debug`. + +#### Setting the log level to DEBUG + +```bash +kapacitor level debug +``` + +The tailed stream should become active again. + +### `watch` +The `watch` command follows logs associated with a **task**. + +> This is different from the `logs` command, which allows tracking logs associated with a service. + +```bash +# Pattern +kapacitor watch [ ...] +``` + +#### Steam logs from the `cpu_alert` tasks** + +```bash +$ kapacitor watch cpu_alert +ts=2018-01-15T11:31:30.301+01:00 lvl=debug msg="alert triggered" service=kapacitor task_master=main task=cpu_alert node=alert2 level=CRITICAL id=cpu:nil event_message="cpu:nil is CRITICAL" data="&{cpu map[cpu:cpu6 host:algonquin] [time usage_guest usage_guest_nice usage_idle usage_iowait usage_irq usage_nice usage_softirq usage_steal usage_system usage_user] [[2018-01-15 10:31:30 +0000 UTC 0 0 0 0 0 0 0 0 0 100.00000000000199]]}" +ts=2018-01-15T11:31:30.315+01:00 lvl=debug msg="alert triggered" service=kapacitor task_master=main task=cpu_alert node=alert2 level=OK id=cpu:nil event_message="cpu:nil is OK" data="&{cpu map[cpu:cpu7 host:algonquin] [time usage_guest usage_guest_nice usage_idle usage_iowait usage_irq usage_nice usage_softirq usage_steal usage_system usage_user] [[2018-01-15 10:31:30 +0000 UTC 0 0 99.89989989990681 0 0 0 0 0 0 0.1001001001001535]]}" +ts=2018-01-15T11:31:30.325+01:00 lvl=debug msg="alert triggered" service=kapacitor task_master=main task=cpu_alert node=alert2 level=CRITICAL id=cpu:nil event_message="cpu:nil is CRITICAL" data="&{cpu map[host:algonquin cpu:cpu6] [time usage_guest usage_guest_nice usage_idle usage_iowait usage_irq usage_nice usage_softirq usage_steal usage_system usage_user] [[2018-01-15 10:31:30 +0000 UTC 0 0 0 0 0 0 0 0 0 100.00000000000199]]}" +ts=2018-01-15T11:31:30.335+01:00 lvl=debug msg="alert triggered" service=kapacitor task_master=main task=cpu_alert node=alert2 level=OK id=cpu:nil event_message="cpu:nil is OK" data="&{cpu map[host:algonquin cpu:cpu7] [time usage_guest usage_guest_nice usage_idle usage_iowait usage_irq usage_nice usage_softirq usage_steal usage_system usage_user] [[2018-01-15 10:31:30 +0000 UTC 0 0 99.89989989990681 0 0 0 0 0 0 0.1001001001001535]]}" +# ... +``` + +## Data sampling + +At times it can be useful to record a sample of data or a query to troubleshoot +tasks before they are enabled. The Kapacitor command line client includes a +number of useful commands for managing data sampling. + +### `record` +The `record` command can be used to record either a snapshot of data or the result +of an InfluxDB query into the Kapacitor database. +The data snapshot is later accessible using its `recording-id`. +Three types of recording are available: `batch`, `stream` and `query`. + +#### `record batch` +`kapacitor record batch` records the result of an InfluxDB query used in a **batch** type task. +It requires either a time value for a window of past data from `now`, defined by the argument +`-past` or a past interval defined by the arguments `-start` and `-stop`. +A `-recording-id` is optional and will be generated if not provided. +The `-task` argument with its `TASK_ID` is also required. The optional Boolean +argument `-no-wait` will spawn the replay into a separate process and exit leaving +it to run in the background. + +```bash +# Pattern +kapacitor record batch (-no-wait) [-past | -start -stop ] [-recording-id ] -task + +# Example +kapacitor record batch -past 5m -recording-id BlueJaySilverTree -task batch_load_test +``` + +#### `record stream` +`kapacitor record stream` records a live stream of data. +It requires a `-duration` value to determine how long the recording will run. +The `-task` argument identifying the target task is also required +A `-recording-id` value is optional. When not provided will be automatically generated. +The optional boolean argument, `-no-wait`, will spawn the replay into a separate +process and exit leaving it to run in the background. + +> his command in combination with the `stream` option will run until the time +> duration has expired. It returns the recording ID in the console. + +```bash +# Pattern +kapacitor record stream -duration (-no-wait) (-recording-id ) -task + +# Example +kapacitor record stream -duration 1m -task cpu_alert +``` + +#### `record query` +`kapacitor record query` records an InfluxDB query. +It requires an InfluxDB query provided through the `-query` argument. +It also requires a `-type` value of either `batch` or `stream`. +A `-recording-id` can also be provided and when not provided will be generated. +The optional boolean argument `-no-wait` will spawn the replay into a separate +process and exit leaving it to run in the background. + +```bash +# Pattern +kapacitor record query [-cluster ] [-no-wait] -query [-recording-id ] -type + +# Example +$ kapacitor record query -query 'SELECT cpu, usage_idle from "telegraf"."autogen"."cpu" where time > now() - 5m' -type stream +``` + +### `replay` +The `replay` command replays a recording to a task to verify how the task will behave. +It requires a recording ID provided through the `-recording` argument and a task ID +provided through the `-task` argument. +The optional boolean argument, `-real-clock`, will toggle replaying the data according +to the intervals between the timestamps contained in the recording. +The optional Boolean argument `-rec-time` will +toggle using the actual recorded times instead of present times. Use of present +times is the default behavior. An optional `-replay-id` can also be provided and +when not provided will be generated. The optional Boolean argument `-no-wait` will +spawn the replay into a separate process and exit leaving it to run in the background. + +```bash +# Pattern +kapacitor replay [-no-wait] [-real-clock] [-rec-time] -recording [-replay-id ] -task + +# Example +$ kapacitor replay -recording 4e0f09c5-1426-4778-8f9b-c4a88f5c2b66 -task cpu_alert +``` + +### `replay-live` +The `replay-live` command allows for data to be played on the fly to verify task behavior. +It can be executed against either a `batch` or a `query` recording. +Kapacitor neither saves nor records the data in its database. + +#### `replay-live query` +With the query argument, the replay executes an InfluxDB query against the task. +The query should include the database, retention policy and measurement string. + +```bash +# Pattern +kapacitor replay-live query [-cluster ] [-no-wait] -query [-real-clock] [-rec-time] [-replay-id ] -task + +# Example +kapacitor replay-live query -task cpu_alert -query 'select cpu, usage_idle from "telegraf"."autogen"."cpu" where time > now() - 5m' +``` + +This command requires an InfluxDB query provided through the `-query` argument. +It also requires a task identified by the `-task` argument. +A `-replay-id` is option, but when not provided will be automatically generated. +The optional boolean argument, `-no-wait`, will spawn the replay into a separate +process and exit leaving it to run in the background. +The optional boolean argument, `-real-clock`, will toggle replaying the data +according to the intervals between the timestamps contained within. +The optional boolean argument, `-rec-time`, will toggle using the actual recorded +times instead of present times. +Use of present times is the default behavior. + + +#### `replay-live batch` +With the batch argument the replay executes the task with batch data already stored to InfluxDB. It takes the following form: + +```bash +# Pattern +kapacitor replay-live batch [-no-wait] [ -past | -start -stop ] [-real-clock] [-rec-time] [-replay-id ] -task + +# Example +kapacitor replay-live batch -start 2018-01-16T00:00:00Z -stop 2018-01-16T12:00:00Z -replay-id GoldRoosterColdBridge180116 -task batch_load_test +``` + +This command requires either a time value for a window of past data from `now`, +defined by the argument `-past` or a past interval defined by the arguments `-start` +and `-stop`. A `-replay-id` is optional and will be generated if not provided. +The `-task` argument with its `TASK_ID` is also required. The optional Boolean +argument `-no-wait` will spawn the replay into a separate process and exit leaving +it to run in the background. The optional Boolean argument +`-real-clock` will toggle replaying the data according to the intervals between +the timestamps contained within. The optional Boolean argument `-rec-time` will +toggle using the actual recorded times instead of present times. Use of present +times is the default behavior. + + +### `list recordings` +The `list recordings` command can be used to list existing recordings and replays. + +```bash +$ kapacitor list recordings +ID Type Status Size Date +0970bcb5-685c-48cc-9a92-741633633f1f stream finished 3.2 kB 15 Jan 18 16:37 CET +78d3a26e-ea1f-4c52-bd56-2016997313fe stream finished 23 B 15 Jan 18 15:33 CET +4e0f09c5-1426-4778-8f9b-c4a88f5c2b66 stream finished 2.2 kB 15 Jan 18 15:25 CET +BlueJaySilverTree batch finished 1.0 kB 15 Jan 18 15:18 CET +7d30caff-e443-4d5f-a0f2-6a933ea35284 batch finished 998 B 15 Jan 18 15:17 CET +``` + +### `list replays` +The `list replays` command to lists all replays. + + +```bash +$ kapacitor list replays +ID Task Recording Status Clock Date +d861ee94-aec1-43b8-b362-5c3d9a036aff cpu_alert 4e0f09c5-1426-4778-8f9b-c4a88f5c2b66 running real 16 Jan 18 11:02 CET +GoldRoosterColdBridge180116 batch_load_test finished fast 16 Jan 18 10:23 CET +2d9be22c-647a-425e-89fb-40543bdd3670 cpu_alert finished fast 16 Jan 18 10:12 CET +b972582b-5be9-4626-87b7-c3d9bfc67981 batch_load_test finished fast 15 Jan 18 17:26 CET +c060f960-6b02-49a7-9376-0ee55952a7f0 cpu_alert finished fast 15 Jan 18 17:25 CET +4a43565c-4678-4c98-94b7-e534efdff860 cpu_alert 4e0f09c5-1426-4778-8f9b-c4a88f5c2b66 finished fast 15 Jan 18 16:52 CET +31f8ea34-455b-4eee-abf2-ed1eb60166a5 cpu_alert 4e0f09c5-1426-4778-8f9b-c4a88f5c2b66 finished real 15 Jan 18 16:50 CET +bbe8567c-a642-4da9-83ef-2a7d32ad5eb1 cpu_alert 4e0f09c5-1426-4778-8f9b-c4a88f5c2b66 finished fast 15 Jan 18 16:49 CET +``` + + +### `delete recordings` +The `delete recordings` command deletes one or more recordings. + +```bash +# Pattern +kapacitor delete recordings + +# Examples +kapacitor delete recordings d861ee94-aec1-43b8-b362-5c3d9a036aff +kapacitor delete recordings "test*" +``` + +`ID` needs to be the full ID of the recording, preferably copied and pasted from +the results of the `list recordings` command. + +`Pattern` can be a grep-like pattern used to identify a set of recordings. For +example, if the value `test0` was assigned to multiple `recording-id`s, (e.g. +`test01`, `test02`, `test03`) then all `test` recordings could be removed with +the pattern `"test*"`. + +This command returns no status or additional messages. +It fails or succeeds silently. +To verify results, use the `list recordings` command. + +### `delete replays` +The `delete replays` command deletes one or more replays. + +```bash +# Pattern +kapacitor delete replays + +# Examples +kapacitor delete replays d861ee94-aec1-43b8-b362-5c3d9a036aff +kapacitor delete replays "jan-run*" +``` + +`ID` needs to be the full ID of the replay, preferably copied and pasted from +the results of the `list replays` command. + +`Pattern` can be a grep-like pattern used to identify a set of replays. For +example, if the value `test0` was assigned to multiple `replay-id`s, (e.g. +`jan-run01`, `jan-run02`, `jan-run03`) then all `run` replays could be removed with +the pattern `"jan-run*"`. + +This command returns no status or additional messages. +It fails or succeeds silently. +To verify the results, use the `list replays` command. + +## Topics and topic handlers + +Topics are classes of subjects to which alerts can publish messages and to which +other services can subscribe in order to receive those messages. +Topic handlers bind topics to services, allowing messages to be forwarded by various means. + +Working with topics and topic handlers is introduced in the +[Using alert topics](/kapacitor/v1.5/working/using_alert_topics/) documentation. + +Topics are created through the `topic()` method of the [AlertNode](/kapacitor/v1.5/nodes/alert_node) in TICKscripts. + +### `define-topic-handler` +The `define-topic-handler` command defines or redefines a topic handler based on +the contents of a topic handler script. + +```bash +# Pattern +kapacitor define-topic-handler + +# Example +$ kapacitor define-topic-handler ./slack_cpu_handler.yaml +``` + +This command returns no status or additional messages. +It fails or succeeds silently. +To verify the results, use the `list topic-handlers` command. + +### `list topics` +The `list topics` displays all topics currently stored by Kapacitor. + +```bash +$ kapacitor list topics +ID Level Collected +1252f40d-c998-430d-abaf-277c43d390e1:cpu_alert:alert2 OK 0 +32fdb276-4d60-42bc-8f5d-c093e97bd3d0:batch_cpu_alert:alert2 OK 0 +666c444c-a33e-42b5-af4d-732311b0e148:batch_cpu_alert:alert2 CRITICAL 0 +cpu OK 0 +main:batch_load_test:alert2 OK 7 +main:chronograf-v1-b12b2554-cf38-4d7e-af24-5b0cd3cecc54:alert3 OK 1028 +main:chronograf-v1-e77137c5-dcce-4fd5-a612-3cdaa5f98ef9:alert7 OK 0 +main:cpu-alert-test:alert3 OK 0 +``` + +### `list topic-handlers` +The `list topic-handlers` command displays handlers stored by Kapacitor. + +```bash +$ kapacitor list topic-handlers +Topic ID Kind +cpu slack slack +``` + +### `show-topic` +Use the `show-topic` command to see the details of a topic. + +```bash +# Pattern +kapacitor show-topic [TOPIC_ID] + +# Example +$ kapacitor show-topic 1252f40d-c998-430d-abaf-277c43d390e1:cpu_alert:alert2 +ID: 1252f40d-c998-430d-abaf-277c43d390e1:cpu_alert:alert2 +Level: OK +Collected: 0 +Handlers: [] +Events: +Event Level Message Date +cpu:nil OK cpu:nil is OK 13 Nov 17 13:34 CET +``` + +### `show-topic-handler` +The `show-topic-handler` command outputs the topic-handler's contents to the console. + +```bash +# Pattern +kapacitor show-topic-handler [TOPIC_ID] [HANDLER_ID] + +# Example +$ kapacitor show-topic-handler cpu slack +ID: slack +Topic: cpu +Kind: slack +Match: +Options: {"channel":"#kapacitor"} +``` + +### `delete topics` +Use the `delete topics` command to remove one or more topics. + +```bash +# Pattern +kapacitor delete topics + +# Examples +kapacitor delete topics 1252f40d-c998-430d-abaf-277c43d390e1:cpu_alert:alert2 +kapacitor delete topics "cluster*" +``` + +`Pattern` can be a grep-like pattern used to identify a set of topics. +For example, if the value `cluster0` was assigned to multiple `topic`s, (e.g. +`cluster01`, `cluster02`, `cluster03`), then all `cluster` topics could be removed with +the pattern `"cluster*"`. + +This command returns no status or additional messages. +It fails or succeeds silently. +To verify the results, use the `list topics` command. + +### `delete topic-handlers` +The `topic-handlers` command removes a topic handler. + +```bash +# Pattern +kapacitor delete topic-handlers [TOPIC_ID] [HANDLER_ID] + +# Example +kapacitor delete topic-handlers cpu slack +``` + +> The values for `TOPIC_ID` and `HANDLER_ID` can be determined using the `list` command. + +This command returns no status or additional messages. +It fails or succeeds silently. +To verify the results, use the `list topics` command. + +## Tasks and task templates + +Tasks and task definitions comprise the core Kapacitor functionality. +Tasks are introduced in the [Getting Started](/kapacitor/v1.5/introduction/getting-started/) guide +and are explored in detail along side the [TICKscript](/kapacitor/v1.5/tick/). + +Task templates make it easy to reuse generic task structures to create a suite of similar tasks. +They are introduced in the [Template Tasks](/kapacitor/v1.5/working/template_tasks/) document. + +### `define` +The `define` command is used to create a new task from a TICKscript. +It takes one of the following three forms: + +1. [As a straight-forward task](#define-a-straight-forward-task) +2. [From a template](#define-a-task-from-a-template) +3. [From a template with a descriptor file](#define-a-task-from-a-template-with-a-descriptor-file) + + +#### Define a straight-forward task +```bash +# Pattern +kapacitor define -tick -type [-no-reload] -dbrp . + +# Example +kapacitor define sandbox -tick sandbox.tick -type stream -dbrp "telegraf"."autogen" +``` + +This form of the `define` command requires a new or existing task identifier +provided immediately after the `define` token. +If the identifier does not yet exist in Kapacitor, a new task will be created. +If the identifier already exists, the existing task will be updated. +A required path to a TICKscript is provided through the argument `tick`. +The `-type` of task is also required, as is the target database and retention policy identified by the argument `-dbrp`. +The optional Boolean argument `-no-reload` will prevent reloading the task into memory. +The default behavior is to reload an updated task. + +This command returns no status or additional messages. +Some error messages associated with malformed or invalid TICKscripts may be returned. +To verify the results, use the `list tasks` command. + +#### Define a task from a template +```bash +# Pattern +kapacitor define -template -vars [-no-reload] -dbrp . + +# Example +kapacitor define cpu_idle -template generic_mean_alert -vars cpu_vars.json -dbrp "telegraf"."autogen" +``` + +This form of the `define` command requires a new or existing task identifier +provided immediately after the `define` token. +If the identifier does not yet exist in Kapacitor, a new task will be created. +If the identifier already exists, the existing task will be updated. +The required template to be used is identified with the `-template` argument. +The target database and retention policy identified by the argument `-dbrp` is also +required as is a path to the file containing variable definitions identified by the `-var` argument. +The optional Boolean argument `-no-reload` will prevent reloading the task into memory. +The default behavior is to reload an updated task. + +This task returns no status or additional messages. +To verify the results, use the `list tasks` command. + +#### Define a task from a template with a descriptor file +```bash +# Pattern +kapacitor define -file [-no-reload] + +# Example +kapacitor define mem_alert -file mem_alert_from_template.json +``` + +This form of the `define` command requires a new or existing task identifier +provided immediately after the `define` token. If the identifier does not yet +exist in Kapacitor a new task will be created. If the identifier already exists +the existing task will be updated. A path to the file defining the template, +database and retention policy and variables is required and provided through the +`-file` argument. The optional Boolean argument `-no-reload` will prevent +reloading the task into memory. The default behavior is to reload an updated +task. + +This task returns no status or additional messages. +To verify the results, use the `list tasks` command. + +### `define-template` +Use this command to load a task template to Kapacitor. It takes the following form: + +```bash +# Pattern +kapacitor define-template -tick -type + +# Example +kapacitor define-template generic_mean_alert -tick template-task.tick -type stream +``` + +This command requires a new or existing template identifier provided immediately +after the `define-template` token. If the identifier does not yet +exist in Kapacitor a new template will be created. If the identifier already exists +the existing template will be updated. The path to a TICKscript defining the +template is also required and is provided through the argument `-tick`. Finally +the `-type` of task must also be defined. + +This task returns no status or additional messages. +To verify the results, use the `list templates` command. + +### `enable` +The `enable` command enables one or more tasks. +When tasks are first created, they are in a `disabled` state. + +```bash +# Pattern +kapacitor enable + +# Example +kapacitor enable cpu_alert +``` + +This command returns no status or additional messages. +To verify the results, use the `list tasks` command. + +### `disable` +The `disable` command disables one or more active tasks. + +```bash +# Pattern +kapacitor disable ... + +# Examples +kapacitor disable cpu_alert +kapacitor disable cpu_alert cpu_alert_topic sandbox +``` + +This command returns no status or additional messages. +To verify the result, use the `list tasks` command. + +### `reload` +The `reload` command disables and then reenables one or more tasks. +It's useful when troubleshooting a tasks to stop and start it again. + +```bash +# Pattern +kapacitor reload +kapacitor reload cpu_alert +``` + +This command returns no status or additional messages. +To verify the result use the `list tasks` command. + +> If troubleshooting and making changes to a task, before reloading, redefine the +> using the [`define`](#define) command with the updated TICKscript, template or template file. + +### `list tasks` +The `list tasks` command displays all tasks currently stored by Kapacitor. + +```bash +$ kapacitor list tasks +ID Type Status Executing Databases and Retention Policies +8405b862-e488-447d-a021-b1b7fe0d7194 stream disabled false ["telegraf"."autogen"] +batch_load_test batch enabled true ["telegraf"."autogen"] +chronograf-v1-b12b2554-cf38-4d7e-af24-5b0cd3cecc54 stream enabled true ["telegraf"."autogen"] +cpu_alert stream enabled true ["telegraf"."autogen"] +cpu_idle stream disabled false ["telegraf"."autogen"] +sandbox stream disabled false ["blabla"."autogen"] +``` + +### `list templates` +The `list templates` command displays all templates currently stored by Kapacitor. + +```bash +$ kapacitor list templates +ID Type Vars +generic_mean_alert stream crit,field,groups,measurement,slack_channel,warn,where_filter,window +``` + +### `show` +The `show` command outputs the details of a task. + +```bash +# Pattern +kapacitor show [-replay ] + +# Example +kapacitor show cpu_alert +``` + +`REPLAY_ID` is the identifier of a currently running replay. + +_**Example show task output**_ +```bash +ID: cpu_alert +Error: +Template: +Type: stream +Status: enabled +Executing: true +Created: 13 Nov 17 13:38 CET +Modified: 16 Jan 18 17:11 CET +LastEnabled: 16 Jan 18 17:11 CET +Databases Retention Policies: ["telegraf"."autogen"] +TICKscript: +stream + // Select just the cpu measurement from our example database. + |from() + .measurement('cpu') + |alert() + .crit(lambda: int("usage_idle") < 70) + // Whenever we get an alert write it to a file. + .log('/tmp/alerts.log') + +DOT: +digraph cpu_alert { +graph [throughput="0.00 points/s"]; + +stream0 [avg_exec_time_ns="0s" errors="0" working_cardinality="0" ]; +stream0 -> from1 [processed="2574"]; + +from1 [avg_exec_time_ns="1.92µs" errors="0" working_cardinality="0" ]; +from1 -> alert2 [processed="2574"]; + +alert2 [alerts_triggered="147" avg_exec_time_ns="1.665189ms" crits_triggered="104" errors="0" infos_triggered="0" oks_triggered="43" warns_triggered="0" working_cardinality="1" ]; +} +``` + +### `show-template` +The `show-template` command outputs the details of a task template. + +```bash +# Pattern +kapacitor show-template + +# Example +kapacitor show-template generic_mean_alert +``` + +_**Example show-template output**_ +```bash +ID: generic_mean_alert +Error: +Type: stream +Created: 25 Oct 17 10:12 CEST +Modified: 16 Jan 18 16:52 CET +TICKscript: +// Which measurement to consume +var measurement string + +// Optional where filter +var where_filter = lambda: TRUE + +// Optional list of group by dimensions +var groups = [*] + +// Which field to process +var field string + +// Warning criteria, has access to 'mean' field +var warn lambda + +// Critical criteria, has access to 'mean' field +var crit lambda + +// How much data to window +var window = 5m + +// The slack channel for alerts +var slack_channel = '#kapacitor' + +stream + |from() + .measurement(measurement) + .where(where_filter) + .groupBy(groups) + |window() + .period(window) + .every(window) + |mean(field) + |alert() + .warn(warn) + .crit(crit) + .slack() + .channel(slack_channel) + +Vars: +Name Type Default Value Description +crit lambda Critical criteria, has access to 'mean' field +field string Which field to process +groups list [*] Optional list of group by dimensions +measurement string Which measurement to consume +slack_channel string #kapacitor The slack channel for alerts +warn lambda Warning criteria, has access to 'mean' field +where_filter lambda TRUE Optional where filter +window duration 5m0s How much data to window +DOT: +digraph generic_mean_alert { +stream0 -> from1; +from1 -> window2; +window2 -> mean3; +mean3 -> alert4; +} +``` + +### `delete tasks` +The `delet tasks` command removes one or more tasks. + +```bash +# Pattern +kapacitor delete tasks + +# Example +kapacitor delete tasks 8405b862-e488-447d-a021-b1b7fe0d7194 +``` + +`Pattern` can be a GREP like pattern used to identify a set of tasks. +For example if the value `cpu0` was assigned to multiple `task`s, (e.g. +`cpu01`, `cpu02`, `cpu03`) then all `cpu` tests could be removed with +the pattern `"cpu*"`. + +This command returns no status or additional messages. +It fails or succeeds silently. +To verify the results, use the `list tasks` command. + +### `delete templates` +The `delete templates` command removes one or more templates. + +```bash +# Pattern +kapacitor delete templates + +# Example +kapacitor delete templates generic_mean_alert +``` + +`Pattern` can be a GREP like pattern used to identify a set of task templates. For +example if the value `generic0` was assigned to multiple `template`s, (e.g. +`generic01`, `generic02`, `generic03`) then all `generic` templates could be removed with +the pattern `"generic*"`. + +This command returns no status or additional messages. +It fails or succeeds silently. +To verify the results, use the `list templates` command. diff --git a/content/kapacitor/v1.5/working/custom_alert.md b/content/kapacitor/v1.5/working/custom_alert.md new file mode 100644 index 000000000..f70fc2767 --- /dev/null +++ b/content/kapacitor/v1.5/working/custom_alert.md @@ -0,0 +1,13 @@ +--- +title: How to contribute a new alert integration to Kapacitor +aliases: + - kapacitor/v1.5/contributing/custom_alert/ + - kapacitor/v1.5/about_the_project/custom_alert/ +menu: + kapacitor_1_5: + name: Writing your own alert integration + identifier: custom_alert + weight: 4 + parent: work-w-kapacitor + url: https://github.com/influxdata/kapacitor/blob/master/alert/HANDLERS.md +--- diff --git a/content/kapacitor/v1.5/working/custom_output.md b/content/kapacitor/v1.5/working/custom_output.md new file mode 100644 index 000000000..bcaf6b6d9 --- /dev/null +++ b/content/kapacitor/v1.5/working/custom_output.md @@ -0,0 +1,555 @@ +--- +title: Contributing new Kapacitor output nodes +aliases: + - kapacitor/v1.5/contributing/custom_output/ + - kapacitor/v1.5/about_the_project/custom_output/ +menu: + kapacitor_1_5: + name: Writing your own output node + identifier: custom_output + weight: 5 + parent: work-w-kapacitor +--- + +If you haven't already, check out the [Kapacitor contributing guidelines](https://github.com/influxdb/kapacitor/blob/master/CONTRIBUTING.md) +for information about how to get started contributing. + +The goal +-------- + +Add a new node to Kapacitor that can output data to a custom endpoint. +For this guide assume we want to output data to a fictitous in-house database called HouseDB. + +Overview +-------- + +Kapacitor processes data through a pipeline. +A pipeline is formally a directed acyclic graph ([DAG](https://en.wikipedia.org/wiki/Directed_acyclic_graph)). +The basic idea is that each node in the graph represents some form of processing on the data and each edge passes the data between nodes. +In order to add a new type of node there are two components that need to be written: + +1. The API (TICKscript) for creating and configuring the node, and +2. The implementation of the data processing step. + +In our example the data processing step is outputting the data to HouseDB. + +The code mirrors these requirements with two Go packages. + +1. `pipeline`: this package defines what types of nodes are available and how they are configured. +2. `kapacitor`: this package provides implementations of each of the nodes defined in the `pipeline` package. + +To make the API (i.e., a TICKscript) clean and readable, defining the node is split out from the implementation of the node. + +### Updating TICKscript + +First things first, we need to update TICKscript so that users can define a our new node. +What should the TICKscript look like to send data to HouseDB? +To connect to a HouseDB instance, we need both a URL and a database name, so we need a way to provide that information. +How about this? + +```js + node + |houseDBOut() + .url('house://housedb.example.com') + .database('metrics') +``` + +In order to update TICKscript to support those new methods we need to write a Go type that implements the `pipeline.Node` interface. +The interface can be found [here](https://github.com/influxdb/kapacitor/blob/master/pipeline/node.go) +as well as a complete implementation via the `pipeline.node` type. +Since the implementation of the `Node` is done for us we just need to use it. +First we need a name. `HouseDBOutNode` follows the naming convention. +Let's define a Go `struct` that will implement the interface via composition. +Create a file in the `pipeline` directory called `housedb_out.go` with the following contents: + +```go +package pipeline + +// A HouseDBOutNode will take the incoming data stream and store it in a +// HouseDB instance. +type HouseDBOutNode struct { + // Include the generic node implementation. + node +} +``` + +Just like that we have a type in Go that implements the needed interface. +In order to allow for the `.url` and `.database` methods we need, simply define fields on the type with the same name. +The first letter needs to capitalized so that it is exported. +It's important that the fields be exported since they will be consumed by the node in the `kapacitor` package. +The rest of the name should have the same capitaization as the method name. +TICKscript will take care of matching the case at runtime. +Update the `housedb_out.go` file. + +```go +package pipeline + +// A HouseDBOutNode will take the incoming data stream and store it in a +// HouseDB instance. +type HouseDBOutNode struct { + // Include the generic node implementation. + node + + // URL for connecting to HouseDB + Url string + + // Database name + Database string +} +``` + +Next we need a consistent way to create a new instance of our node. +But to do so we need to think about how this node connects to other nodes. +Since we are an output node as far as Kapacitor is concerned this is the end of the pipeline. +We will not provide any outbound edges, the graph ends on this node. +Our imaginary HouseDB is flexible and can store data in batches or as single data points. +As a result we do not care what type of data the HouseDBOutNode node receives. +With these facts in mind we can define a function to create a new HouseDBOutNode. +Add this function to the end of the `housedb_out.go` file: + +```go +// Create a new HouseDBOutNode that accepts any edge type. +func newHouseDBOutNode(wants EdgeType) *HouseDBOutNode { + return &HouseDBOutNode{ + node: node{ + desc: "housedb", + wants: wants, + provides: NoEdge, + } + } +} +``` + +By explicitly stating the types of edges the node `wants` and `provides`, Kapacitor will do the necessary type checking to prevent invalid pipelines. + +Finally we need to add a new `chaining method` so that users can connect HouseDBOutNodes to their existing pipelines. +A `chaining method` is one that creates a new node and adds it as a child of the calling node. +In effect the method chains nodes together. +The `pipeline.chainnode` type contains the set of all methods that can be used for chaining nodes. +Once we add our method to that type any other node can now chain with a HouseDBOutNode. +Add this function to the end of the `pipeline/node.go` file: + +```go +// Create a new HouseDBOutNode as a child of the calling node. +func (c *chainnode) HouseDBOut() *HouseDBOutNode { + h := newHouseDBOutNode(c.Provides()) + c.linkChild(h) + return h +} +``` + +We have now defined all the necessary pieces so that TICKscripts can define HouseDBOutNodes: + +```js + node + |houseDBOut() // added as a method to the 'chainnode' type + .url('house://housedb.example.com') // added as a field to the HouseDBOutNode + .database('metrics') // added as a field to the HouseDBOutNode +``` + +### Implementing the HouseDB output + +Now that a TICKscript can define our new output node we need to actually provide an implementation so that Kapacitor knows what to do with the node. +Each node in the `pipeline` package has a node of the same name in the `kapacitor` package. +Create a file called `housedb_out.go` and put it in the root of the repo. +Put the contents below in the file. + +```go +package kapacitor + +import ( + "github.com/influxdb/kapacitor/pipeline" +) + +type HouseDBOutNode struct { + // Include the generic node implementation + node + // Keep a reference to the pipeline node + h *pipeline.HouseDBOutNode +} +``` + +The `kapacitor` package also defines an interface named `Node` and provides a default implementation via the `kapacitor.node` type. +Again we use composition to implement the interface. +Notice we also have a field that will contain an instance of the `pipeline.HouseDBOutNode` we just finished defining. +This `pipeline.HouseDBOutNode` acts like a configuration struct telling the `kapacitor.HouseDBOutNode` what it needs to do its job. + +Now that we have a struct let's define a function for creating an instance of our new struct. +The `new*Node` methods in the `kapacitor` package follow a convention of: + +```go +func newNodeName(et *ExecutingTask, n *pipeline.NodeName) (*NodeName, error) {} +``` + +In our case we want to define a function called `newHouseDBOutNode`. +Add the following method to the `housedb_out.go` file.: + +```go +func newHouseDBOutNode(et *ExecutingTask, n *pipeline.HouseDBOutNode, d NodeDiagnostic) (*HouseDBOutNode, error) { + h := &HouseDBOutNode{ + // pass in necessary fields to the 'node' struct + node: node{Node: n, et: et, diag: d}, + // Keep a reference to the pipeline.HouseDBOutNode + h: n, + } + // Set the function to be called when running the node + // more on this in a bit. + h.node.runF = h.runOut + return h +} +``` + +In order for an instance of our node to be created we need to associate it with the node from the `pipeline` package. +This can be done via the switch statement in the `createNode` method in the `task.go` file. +To continue our example: + +```go +// Create a node from a given pipeline node. +func (et *ExecutingTask) createNode(p pipeline.Node, d NodeDiagnostic) (n Node, err error) { + switch t := p.(type) { + ... + case *pipeline.HouseDBOutNode: + n, err = newHouseDBOutNode(et, t, d) + ... +} +``` + +Now that we have associated our two types let's get back to implementing the output code. +Notice the line `h.node.runF = h.runOut` in the `newHouseDBOutNode` function. +This line sets the method of the `kapacitor.HouseDBOutNode` that will be called when the node starts execution. +Now we need to define the `runOut` method. +In the file `housedb_out.go` add this method: + +```go +func (h *HouseDBOutNode) runOut(snapshot []byte) error { + return nil +} +``` + +With that change the `HouseDBOutNode` is syntactically complete but doesn't do anything yet. +Let's give it something to do! + +As we learned earlier nodes communicate via edges. +There is a Go type `edge.Edge` that handles this communication. +All we want to do is read data from the edge and send it to HouseDB. +Data is represented in the form of an `edge.Message` type. +A node reads messages using an `edge.Consumer`, and a node processes messages by implementing the `edge.Receiver` interface. +The both the `Consumer` and `Receiver` interfaces can be found [here](https://github.com/influxdb/kapacitor/blob/master/edge/consumer.go) + +The `node` type we included via composition in the `HouseDBOutNode` provides a list of edges in the field named `ins`. +Since `HouseDBOutNode` can have only one parent, the edge we are concerned with is the 0th edge. +We can consume and process messages from an edge using the `NewConsumerWithReceiver` function. + +```go +// NewConsumerWithReceiver creates a new consumer for the edge e and receiver r. +func NewConsumerWithReceiver(e Edge, r Receiver) Consumer { + return &consumer{ + edge: e, + r: r, + } +} +``` + +Let's update `runOut` to read and process messages using this function. + +```go +func (h *HouseDBOutNode) runOut(snapshot []byte) error { + consumer := edge.NewConsumerWithReceiver( + n.ins[0], + h, + ) + return consumer.Consume() +} +``` + +All that's left is for `HouseDBOutNode` to implement the `Receiver` interface and to write a function that takes a batch of points and writes it to HouseDB. +To make it easy on ourselves we can use an `edge.BatchBuffer` for receiving batch messages. +We can also convert single point messages into batch messages containing just one point. + +```go +func (h *HouseDBOutNode) BeginBatch(begin edge.BeginBatchMessage) (edge.Message, error) { + return nil, h.batchBuffer.BeginBatch(begin) +} + +func (h *HouseDBOutNode) BatchPoint(bp edge.BatchPointMessage) (edge.Message, error) { + return nil, h.batchBuffer.BatchPoint(bp) +} + +func (h *HouseDBOutNode) EndBatch(end edge.EndBatchMessage) (edge.Message, error) { + msg := h.batchBuffer.BufferedBatchMessage(end) + return msg, h.write(msg) +} + +func (h *HouseDBOutNode) Point(p edge.PointMessage) (edge.Message, error) { + batch := edge.NewBufferedBatchMessage( + edge.NewBeginBatchMessage( + p.Name(), + p.Tags(), + p.Dimensions().ByName, + p.Time(), + 1, + ), + []edge.BatchPointMessage{ + edge.NewBatchPointMessage( + p.Fields(), + p.Tags(), + p.Time(), + ), + }, + edge.NewEndBatchMessage(), + ) + return p, h.write(batch) +} + +func (h *HouseDBOutNode) Barrier(b edge.BarrierMessage) (edge.Message, error) { + return b, nil +} +func (h *HouseDBOutNode) DeleteGroup(d edge.DeleteGroupMessage) (edge.Message, error) { + return d, nil +} +func (h *HouseDBOutNode) Done() {} + +// Write a batch of data to HouseDB +func (h *HouseDBOutNode) write(batch edge.BufferedBatchMessage) error { + // Implement writing to HouseDB here... + return nil +} +``` + +Once we have implemented the `write` method we are done. +As the data arrives at the `HouseDBOutNode`, it will be written to the specified HouseDB instance. + +### Summary + +We first wrote a node in the `pipeline` package (filepath: `pipeline/housedb_out.go`) to define the TICKscript API for sending data to a HouseDB instance. +We then wrote the implementation of that node in the `kapacitor` package (filepath: `housedb_out.go`). +We also updated `pipeline/node.go` to add a new chaining method and `task.go` to associate the two types. + +Here are the complete file contents: + +pipeline/housedb_out.go: + +```go +package pipeline + +// A HouseDBOutNode will take the incoming data stream and store it in a +// HouseDB instance. +type HouseDBOutNode struct { + // Include the generic node implementation. + node + + // URL for connecting to HouseDB + Url string + + // Database name + Database string +} + +// Create a new HouseDBOutNode that accepts any edge type. +func newHouseDBOutNode(wants EdgeType) *HouseDBOutNode { + return &HouseDBOutNode{ + node: node{ + desc: "housedb", + wants: wants, + provides: NoEdge, + } + } +} +``` + +housedb_out.go + +```go +package kapacitor + +import ( + "github.com/influxdb/kapacitor/pipeline" +) + +type HouseDBOutNode struct { + // Include the generic node implementation + node + // Keep a reference to the pipeline node + h *pipeline.HouseDBOutNode + // Buffer for a batch of points + batchBuffer *edge.BatchBuffer +} + +func newHouseDBOutNode(et *ExecutingTask, n *pipeline.HouseDBOutNode, d NodeDiagnostic) (*HouseDBOutNode, error) { + h := &HouseDBOutNode{ + // pass in necessary fields to the 'node' struct + node: node{Node: n, et: et, diag: d}, + // Keep a reference to the pipeline.HouseDBOutNode + h: n, + // Buffer for a batch of points + batchBuffer: new(edge.BatchBuffer), + } + // Set the function to be called when running the node + h.node.runF = h.runOut + return h +} + +func (h *HouseDBOutNode) runOut(snapshot []byte) error { + consumer := edge.NewConsumerWithReceiver( + n.ins[0], + h, + ) + return consumer.Consume() +} + +func (h *HouseDBOutNode) BeginBatch(begin edge.BeginBatchMessage) (edge.Message, error) { + return nil, h.batchBuffer.BeginBatch(begin) +} + +func (h *HouseDBOutNode) BatchPoint(bp edge.BatchPointMessage) (edge.Message, error) { + return nil, h.batchBuffer.BatchPoint(bp) +} + +func (h *HouseDBOutNode) EndBatch(end edge.EndBatchMessage) (edge.Message, error) { + msg := h.batchBuffer.BufferedBatchMessage(end) + return msg, h.write(msg) +} + +func (h *HouseDBOutNode) Point(p edge.PointMessage) (edge.Message, error) { + batch := edge.NewBufferedBatchMessage( + edge.NewBeginBatchMessage( + p.Name(), + p.Tags(), + p.Dimensions().ByName, + p.Time(), + 1, + ), + []edge.BatchPointMessage{ + edge.NewBatchPointMessage( + p.Fields(), + p.Tags(), + p.Time(), + ), + }, + edge.NewEndBatchMessage(), + ) + return p, h.write(batch) +} + +func (h *HouseDBOutNode) Barrier(b edge.BarrierMessage) (edge.Message, error) { + return b, nil +} +func (h *HouseDBOutNode) DeleteGroup(d edge.DeleteGroupMessage) (edge.Message, error) { + return d, nil +} +func (h *HouseDBOutNode) Done() {} + +// Write a batch of data to HouseDB +func (h *HouseDBOutNode) write(batch edge.BufferedBatchMessage) error { + // Implement writing to HouseDB here... + return nil +} +``` + +pipeline/node.go (only the new chaining method is shown): + +```go +... +// Create a new HouseDBOutNode as a child of the calling node. +func (c *chainnode) HouseDBOut() *HouseDBOutNode { + h := newHouseDBOutNode(c.Provides()) + c.linkChild(h) + return h +} +... +``` + +task.go (only the new case is shown): + +```go +... +// Create a node from a given pipeline node. +func (et *ExecutingTask) createNode(p pipeline.Node, d NodeDiagnostic) (n Node, err error) { + switch t := p.(type) { + ... + case *pipeline.HouseDBOutNode: + n, err = newHouseDBOutNode(et, t, d) + ... +} +... +``` + +### Documenting your new node + +Since TICKscript is its own language we have built a small utility similiar to [godoc](https://godoc.org/golang.org/x/tools/cmd/godoc) named [tickdoc](https://github.com/influxdb/kapacitor/tree/master/tick/cmd/tickdoc). +`tickdoc` generates documentation from comments in the code. +The `tickdoc` utility understands two special comments to help it generate clean documentation. + +1. `tick:ignore`: can be added to any field, method, function or struct. `tickdoc` will skip it and not + generate any documentation for it. This is most useful to ignore fields that are set via property methods. +2. `tick:property`: only added to methods. Informs `tickdoc` that the method is a `property method` not a `chaining method`. + +Place one of these comments on a line all by itself and `tickdoc` will find it and behave accordingly. Otherwise document your code normaly and `tickdoc` will do the rest. + +### Contributing non output node. + +Writing any node (not just an output node) is a very similar process and is left as an exercise to the reader. +There are few things that can differ: + +The first difference is that your new node will want to use the `pipeline.chainnode` implementation +of the `pipeline.Node` interface in the `pipeline` package if it can send data on to child nodes. +For example: + +```go +package pipeline + +type MyCustomNode struct { + // Include pipeline.chainnode so we have all the chaining methods available + // to our new node + chainnode + +} + +func newMyCustomNode(e EdgeType, n Node) *MyCustomNode { + m := &MyCustomNode{ + chainnode: newBasicChainNode("mycustom", e, e), + } + n.linkChild(m) + return m +} +``` + +The second difference is that it is possible to define a method that sets fields on a pipeline Node and returns the same instance in order to create a `property method`. +For example: + +```go +package pipeline + +type MyCustomNode struct { + // Include pipeline.chainnode so we have all the chaining methods available + // to our new node + chainnode + + // Mark this field as ignored for docs + // Since it is set via the Names method below + // tick:ignore + NameList []string `tick:"Names"` + +} + +func newMyCustomNode(e EdgeType, n Node) *MyCustomNode { + m := &MyCustomNode{ + chainnode: newBasicChainNode("mycustom", e, e), + } + n.linkChild(m) + return m +} + +// Set the NameList field on the node via this method. +// +// Example: +// node.names('name0', 'name1') +// +// Use the tickdoc comment 'tick:property' to mark this method +// as a 'property method' +// tick:property +func (m *MyCustomNode) Names(name ...string) *MyCustomNode { + m.NameList = name + return m +} +``` diff --git a/content/kapacitor/v1.5/working/kapa-and-chrono.md b/content/kapacitor/v1.5/working/kapa-and-chrono.md new file mode 100644 index 000000000..bebab9f22 --- /dev/null +++ b/content/kapacitor/v1.5/working/kapa-and-chrono.md @@ -0,0 +1,468 @@ +--- +title: Using Kapacitor in Chronograf + +menu: + kapacitor_1_5: + weight: 2 + parent: work-w-kapacitor +--- + +## Contents +- [Configuring Chronograf to work with Kapacitor](#configuring-chronograf-to-work-with-kapacitor) + - [Add a Kapacitor Instance](#add-a-kapacitor-instance) + - [Managing Kapacitor from Chronograf](#managing-kapacitor-from-chronograf) + - [Event Handlers](#event-handlers) + - [Creating Alerts in Chronograf](#creating-alerts-in-chronograf) + - [Managing tasks through Chronograf](#managing-tasks-through-chronograf) + - [Viewing Alert tasks in Chronograf](#viewing-alert-tasks-in-chronograf) +- [Kapacitor Tasks and Chronograf](#kapacitor-tasks-and-chronograf) + - [Viewing Alerts from Tasks in the Alert History of Chronograf](#viewing-alerts-from-tasks-in-the-alert-history-of-chronograf) + - [Writing a Task to be editable in Chronograf](#writing-a-task-to-be-editable-in-chronograf) +- [Summary](#summary) + +## Overview +As a part of the [TICK stack](https://www.influxdata.com/products/), +[Chronograf](/chronograf/v1.3/) is a graphical user interface designed to +simplify monitoring infrastructure, visualizing data, administering databases +and managing alerts. It is through alert management that Chronograf gets +integrated with Kapacitor. + +In the Chronograf documentation, working with Kapacitor is covered in the +sections [Connecting Chronograf to Kapacitor](/chronograf/v1.3/introduction/getting-started/#4-connect-chronograf-to-kapacitor), +[Create a Kapacitor Alert](/chronograf/v1.3/guides/create-a-kapacitor-alert/), +[Configure Kapacitor Event Handlers](/chronograf/v1.3/guides/configure-kapacitor-event-handlers/), +and [Advanced Kapacitor Usage](/chronograf/v1.3/guides/advanced-kapacitor/). + +This current document will summarize many of the points presented there and +provide tips for working with tasks and TICKscripts in Chronograf. + +## Configuring Chronograf to work with Kapacitor +Kapacitor instances in Chronograf are associated with specific InfluxDB databases +which should already be bound to both Kapacitor and Chronograf. To define an +InfluxDB database in Kapacitor, see [Getting started with Kapacitor](/kapacitor/v1.5/introduction/getting-started/) +or the [Configuring Kapacitor](/kapacitor/v1.5/administration/configuration/#influxdb) +guides. To define an InfluxDB database in Chronograf, see [InfluxDB setup](/chronograf/v1.3/introduction/getting-started/#influxdb-setup) +in the Chronograf documentation. + +### Add a Kapacitor instance +To add a Kapacitor instance to Chronograf: + +1. In the left navigation bar click the **Configuration** cog-wheel icon. A + list of InfluxDB sources is loaded. + + conifguration-open + +2. Locate the InfluxDB source in the list and in the right most column under the + "Acitve Kapacitor" heading, click **Add Config**. The Configure Kapacitor page + loads with default settings. + + conifguration-new + +3. In the grouping "Connection Details" set the values for Kapacitor URL and a + Name for this Kapacitor, also add username and password credentials if necessary. + + conifguration-details + +4. Click the **Connect** button. If the "Connection Details" are correct a + success message is displayed and a new section will appear "Configure Alert + Endpoints". + + conifguration-success + +5. If a third party alert service or SMTP is used, update, the third party + settings in the "Configure Alert Endpoints" section. +6. Return to the "Configuration" page by clicking on the **Configuration** icon + once more. The new Kapacitor instance should be listed under the "Active + Kapacitor" heading. + + conifguration-review + +### Managing Kapacitor from Chronograf + +#### Event Handlers +One of key set of Kapacitor features that can be modified through Chronograf are +third party alert handlers. + +##### To modify a third party alert handler: + +1. In the Configuration table locate the Influxdata instance and its associated + Kapacitor instance, click the Kapacitor drop down menu and then the **edit icon**. + + conifguration-open + +2. Click on the handler that needs to be changed. Its tab will become active. + + conifguration-open + +3. Edit the relevant fields and click the **Update Config** button. + + conifguration-open + +4. If the configuration properties are valid a success message will appear. + + conifguration-open + +5. The updated configuration can be verified over the Kapacitor HTTP API. For + example, to verify an updated SMTP configuration check the JSON document at the + endpoint `/kapacitor/v1/config/smtp` + (e.g. **http://localhost:9092/kapacitor/v1/config/smtp**). + +For more information see the section [Configuration with the HTTP API](/kapacitor/v1.5/administration/configuration/#configuring-with-the-http-api). + +### Creating Alerts in Chronograf +Alerts in Chronograf correspond to Kapacitor tasks designed specifically to +trigger alerts whenever the data stream values rise above or fall below +designated thresholds. Please note that only the most common alerting use +cases are manageable through Chronograf. These include: + +- Thresholds with static ceilings, floors and ranges. +- Relative thresholds based on unit or percentage changes. +- Deadman switches. + +More refined alerts and other tasks need to be defined directly in Kapacitor. + +#### To create a basic static threshold alert based on the CPU measurements provided by Telegraf: + +1. Open the Alert rules tab by clicking on the **Alerting** icon in the left + navigation bar and then on **Create** in the pop up menu. A table of alert + rules (Kapacitor tasks) will load. These are queried from Kapacitor. + + conifguration-open + +2. Click on the **Create Rule** button. The Create/Edit rule page will load. + + conifguration-open + +3. Notice in the top left the rule name edit box with the string **Untitled Rule**. + Change this name to something sensible for the alert to be created. + + conifguration-open + +4. In the section **Select a Time Series**, select a database, a measurement + and a field to be monitored. Note that in the measurement one or more tags can + be selected. However, selecting specific tags is not required. Note as well that + alongside each tag it is possible to select the tag for a _group by_ clause. + + conifguration-open + +5. In the section **Rule Conditions**, for this example, keep the tag + **Thresholds** selected. In the drop down list box for the **is** clause, select + _less than_. And, in the edit box for the quantity enter the value _80_, which + for this field means percent. + + conifguration-open + +6. In the section **Alert Message** keep the tab **smtp**. Note that this + requires the SMTP handler to be correctly configured. Update the values for the + addressees and the message body. Note as well that the bottom or _template_ + text area accepts the template fields suggested just below it. Click on a + template field to add it to the template. + + conifguration-open + +7. When the three key sections are correctly configured click the **Save Rule** + button. The rule list will load once again. + + conifguration-open + +8. The new rule is visible in the list. It can be opened for editing by + clicking on its name. + + conifguration-open + + +The rule is also visible through the Kapacitor command line client. + +**Example 1 – Viewing a Chronograf Alert in Kapacitor** + +``` +$ kapacitor list tasks +ID Type Status Executing Databases and Retention Policies +batch_load_test batch enabled true ["telegraf"."autogen"] +chronograf-v1-b12b2554-cf38-4d7e-af24-5b0cd3cecc54 stream enabled true ["telegraf"."autogen"] +cpu_alert stream disabled false ["telegraf"."autogen"] +top_scores stream disabled false ["game"."autogen"] +``` + +Tasks (or Alert Rules) generated by Chronograf are listed with the `chronograf` +and version tokens (e.g. `v1`) followed by a UUID. + +### Managing Tasks through Chronograf +Through Chronograf Kapacitor tasks can be enabled, disabled and deleted. This +applies even to tasks that were not generated as Chronograf alerts. + +#### To enable a task through Chronograf: +1. Locate the task in the **Alert Rules** table. +2. In the column **Enabled** toggle the state of the task from _disabled_ to + _enabled_ . A message indicating the change of state will appear at the top of + the page. + + enable-disable screenshot + +The change of state can also be verified on the Kapacitor side by listing the +tasks with the command line client and checking the _Status_ column. + +**Example 2 – Viewing a task enabled through Chronograf in Kapacitor** + +``` +$ kapacitor list tasks +ID Type Status Executing Databases and Retention Policies +batch_load_test batch enabled true ["telegraf"."autogen"] +chronograf-v1-b12b2554-cf38-4d7e-af24-5b0cd3cecc54 stream enabled true ["telegraf"."autogen"] +chronograf-v1-fa28d99e-e875-4521-8bd2-463807522bbd stream enabled true ["co2accumulator"."autogen"] +cpu_alert stream disabled false ["telegraf"."autogen"] +top_scores stream disabled false ["game"."autogen"] +``` + +#### To disable a task through Chronograf: +1. Locate the task in the **Alert Rules** table. See the screenshot above. +2. In the column **Enabled** toggle the state of the task from _enabled_ to + _disabled_ . A message indicating the change of state will appear at the top of + the page. + +The change of state can also be verified on the Kapacitor side by listing the +tasks with the command line client and checking the _Status_ column. + +**Example 3 – Viewing a task disabled through Chronograf in Kapacitor** + +``` +$ kapacitor list tasks +ID Type Status Executing Databases and Retention Policies +batch_load_test batch enabled true ["telegraf"."autogen"] +chronograf-v1-b12b2554-cf38-4d7e-af24-5b0cd3cecc54 stream enabled true ["telegraf"."autogen"] +chronograf-v1-fa28d99e-e875-4521-8bd2-463807522bbd stream disabled false ["co2accumulator"."autogen"] +cpu_alert stream disabled false ["telegraf"."autogen"] +top_scores +``` + +#### To delete a task through Chronograf: +1. Locate the task in the **Alert Rules** table. +2. Click on the **Delete** button in the final column of the table. A message + indicating that the task was deleted will appear at the top of the page. + + delete screenshot + +The deletion can also be verified on the Kapacitor side by listing the tasks +with the command line client. + +**Example 4 – Verification of a task deleted through Chronograf** + +``` +$ kapacitor list tasks +ID Type Status Executing Databases and Retention Policies +batch_load_test batch enabled true ["telegraf"."autogen"] +chronograf-v1-b12b2554-cf38-4d7e-af24-5b0cd3cecc54 stream enabled true ["telegraf"."autogen"] +cpu_alert stream disabled false ["telegraf"."autogen"] +top_scores stream disabled false ["game"."autogen"] +``` + +> Note: Please remember that all Kapacitor tasks are accessible through +Chronograf. When disabling, enabling and deleting tasks with Chronograf, attention +should be paid to not change inadvertently the state or existence of a task not +associated with Chronograf alerts. + +### Viewing Alert tasks in Chronograf +Chronograf Alerts are made visible in the Alert History page. + +To view the page. + +1. Click on the **Alerts** Icon in the left navigation bar. A menu will pop up. +2. In the pop-up menu select the item **History**. + + delete screenshot + +3. The _Alert History_ page will load with a table showing the alerts posted + within the time frame defined by the drop-down filter in the top right corner of + the page. + delete screenshot + + +## Kapacitor Tasks and Chronograf +When first writing with TICKscripts and working with Kapacitor tasks, inspecting +tasks generated by Chronograf can be instructive. + +To view a generated task open it with the `kapacitor` client `show` command. + +**Example 5 – Showing a Chronograf Alert task in Kapacitor** + +``` +$ kapacitor show chronograf-v1-b12b2554-cf38-4d7e-af24-5b0cd3cecc54 +ID: chronograf-v1-b12b2554-cf38-4d7e-af24-5b0cd3cecc54 +Error: +Template: +Type: stream +Status: enabled +Executing: true +... +``` + +The console output includes a TICKscript section showing the script used to +create the task. + + +**Example 6 – The TICKscript of a Chronograf Alert task** + +``` +... +TICKscript: +var db = 'telegraf' + +var rp = 'autogen' + +var measurement = 'cpu' + +var groupBy = [] + +var whereFilter = lambda: TRUE + +var name = 'CPU USAGE' + +var idVar = name + ':{{.Group}}' + +var message = ' {{.ID}} is {{.Level}} for {{ index .Fields "value" }}' + +var idTag = 'alertID' + +var levelTag = 'level' + +var messageField = 'message' + +var durationField = 'duration' + +var outputDB = 'chronograf' + +var outputRP = 'autogen' + +var outputMeasurement = 'alerts' + +var triggerType = 'threshold' + +var details = 'CPU usage is up. ' + +var crit = 80 + +var data = stream + |from() + .database(db) + .retentionPolicy(rp) + .measurement(measurement) + .groupBy(groupBy) + .where(whereFilter) + |eval(lambda: "usage_idle") + .as('value') + +var trigger = data + |alert() + .crit(lambda: "value" < crit) + .stateChangesOnly() + .message(message) + .id(idVar) + .idTag(idTag) + .levelTag(levelTag) + .messageField(messageField) + .durationField(durationField) + .details(details) + .email('heinrich.faust@1808.io', 'valentin.faust@1808.io') + +trigger + |influxDBOut() + .create() + .database(outputDB) + .retentionPolicy(outputRP) + .measurement(outputMeasurement) + .tag('alertName', name) + .tag('triggerType', triggerType) + +trigger + |httpOut('output') +... +``` + +Note that all values not generated by method calls are defined as variables +before they are used in method calls. Furthermore the pipeline is broken up +into four parts, two of which result in a variable assignment. The variable +`trigger` is of special interest. It takes the alert stream and then first +writes it to the `chronograf` database's `alerts` measurement before additionally +caching it to an endpoint of Kapacitor's HTTP server. + +### Viewing Alerts from Tasks in the Alert History of Chronograf +Kapacitor tasks that generate alerts do not have to be defined in Chronograf in +order for those alerts to appear in Chronograf's **Alert History**. They +simply need to write alert data to the `alerts` measurement of the `chronograf` +database in InfluxDB. + +For example an alert based on CPU usage in Telegraf can be written similar to +the Chronograf generated alert shown above. The name can be changed and the +fields in the property methods of the `InfluxDBOut()` chaining method can be +defined using literal values. + +**Example 7 – influxDBOut configured for Chronograf Alert History** + +``` +... +var name = 'CPU ALERTS FROM FUBAR' +... +var trigger = data + |alert() +... +trigger + |influxDBOut() + .create() + .database('chronograf') + .retentionPolicy('autogen') + .measurement('alerts') + .tag('alertName', name) + .tag('triggerType', 'custom') +... +``` + +A TICKscript with this `trigger` block can be defined as follows. + +``` +$ kapacitor define cpu_alert_fubar -tick cpu_chron_alert.tick -type stream -dbrp "telegraf"."autogen" +``` + +It will then be visible in Chronograf alerts. + +delete screenshot + +Enable it and then add an artificial load to the CPU. + +``` +while true; do i=0; done +``` + +After a few minutes the alert will appear in the **Alert History** table of +Chronograf. + +delete screenshot + +In order for the alert to be visible in Chronograf, it is important to add the +`tag` with the key value `alertName` to the datapoints written by +`influxDBOut()` as well as the `tag` with the key value `triggerType`. + +### Writing a Task to be editable in Chronograf +For a task to be editable in Chronograf it needs to contain the variables +generated by a Chronograph alert task. These variables are visible in +[Example 6](#example-6). The task should also be a simple alert task like those +generated by Chronograf. By including the variables and the chaining method +`infludDBOut()` the task will then be accessible through a link in the +Chronograf _Alert Rules_ table. + +delete screenshot + +By clicking through the link the _Edit Alert_ page will load with the task. + +delete screenshot + +### Summary +This document presented the essentials of adding a Kapacitor Instance to +Chronograf and then using that instance to create, enable, edit, disable and +delete alert tasks. The alerts generated by Chronograf are written as tasks +directly to the Kapacitor instance, where they can be listed, reviewed and +managed further using the `kapacitor` command line client application. + +Also presented was how to enable Kapacitor tasks to write alerts to the +`chronograf` database in InfluxDB, so that they will be visible in the +_Alert History_ of Chronograf. By adding Chronograf variables to a _simple_ +alert TICKscript, the task will be editable in Chronograf. diff --git a/content/kapacitor/v1.5/working/scraping-and-discovery.md b/content/kapacitor/v1.5/working/scraping-and-discovery.md new file mode 100644 index 000000000..75d2ff24a --- /dev/null +++ b/content/kapacitor/v1.5/working/scraping-and-discovery.md @@ -0,0 +1,104 @@ +--- +title: Scraping and discovery +aliases: + - /kapacitor/v1.5/pull_metrics/scraping-and-discovery/ + - /kapacitor/v1.5/pull_metrics/ +menu: + kapacitor_1_5: + name: Dynamic Data Scraping + weight: 6 + parent: work-w-kapacitor +--- + +Data can be pulled from a dynamic list of remote targets with the discovery and scraping features of Kapacitor. +Use those features with [TICKscripts](/kapacitor/v1.5/tick/) to monitor targets, process the data, and write data to [InfluxDB](/influxdb/v1.3/). +Currently, Kapacitor supports only Prometheus style targets. + +>**Note**: Scraping and discovery is currently under technical preview. +There may be changes to the configuration and behavior in subsequent releases. + +### Content + +* [Overview](#overview) +* [Configuring Scrapers and Discoverers](#configuring-scrapers-and-discoverers) + +## Overview + +The diagram below outlines the infrastructure for discovering and scraping data with Kapacitor. + +**Image 1 – Scrapping and Discovery work flow** + +conifguration-open + +1. First, Kapacitor implements the discovery process to identify the available targets in your infrastructure. +It requests that information at regular intervals and receives that information from an [authority](#available-discoverers). +In the diagram, the authority informs Kapacitor of three targets: `A`, `B`, and `C`. +1. Next, Kapacitor implements the scraping process to pull metrics data from the existing targets. +It runs the scraping process at regular intervals. +Here, Kapacitor requests metrics from targets `A`, `B`, and `C`. +The application running on `A`, `B`, and `C` exposes a `/metrics` endpoint on its HTTP API which returns application-specific statistics. +1. Finally, Kapacitor processes the data according to configured [TICKscripts](/kapacitor/v1.5/tick/). +Use TICKscripts to filter, transform, and perform other tasks on the metrics data. +In addition, if the data should be stored, configure a TICKscript to send it to [InfluxDB](/influxdb/v1.3/). + +### Pushing vs. Pulling Metrics + +By combining discovery with scraping, Kapacitor enables a metrics gathering infrastructure to pull metrics off of targets instead of requiring them to push metrics out to InfluxDB. +Pulling metrics has several advantages in dynamic environments where a target may have a short lifecycle. + +## Configuring Scrapers and Discoverers + +A single scraper scrapes the targets from a single discoverer. +Configuring both scrapers and discoverers comes down to configuring each individually and then informing the scraper about the discoverer. + +Below are all the configuration options for a scraper. + +**Example 1 – Scrapper Configuration** + +``` +[[scraper]] + enabled = false + name = "myscraper" + # ID of the discoverer to use + discoverer-id = "" + # The kind of discoverer to use + discoverer-service = "" + db = "mydb" + rp = "myrp" + type = "prometheus" + scheme = "http" + metrics-path = "/metrics" + scrape-interval = "1m0s" + scrape-timeout = "10s" + username = "" + password = "" + bearer-token = "" + ssl-ca = "" + ssl-cert = "" + ssl-key = "" + ssl-server-name = "" + insecure-skip-verify = false +``` + + +### Available Discoverers + +Kapacitor supports the following services for discovery: + +| Name | Description | +| ---- | ----------- | +| azure | Discover targets hosted in [Azure](https://azure.microsoft.com/). | +| consul | Discover targets using [Consul](https://www.consul.io/) service discovery. | +| dns | Discover targets via DNS queries. | +| ec2 | Discover targets hosted in [AWS EC2](https://aws.amazon.com/ec2/). | +| file-discovery | Discover targets listed in files. | +| gce | Discover targets hosted in [GCE](https://cloud.google.com/compute/). | +| kubernetes | Discover targets hosted in [Kubernetes](https://kubernetes.io/). | +| marathon | Discover targets using [Marathon](https://mesosphere.github.io/marathon/) service discovery. | +| nerve | Discover targets using [Nerve](https://github.com/airbnb/nerve) service discovery. | +| serverset | Discover targets using [Serversets](https://github.com/twitter/finagle/tree/master/finagle-serversets) service discovery. | +| static-discovery | Statically list targets. | +| triton | Discover targets using [Triton](https://github.com/joyent/triton) service discovery. | + + +See the example [configuration file](https://github.com/influxdata/kapacitor/blob/master/etc/kapacitor/kapacitor.conf) for details on configuring each discoverer. diff --git a/content/kapacitor/v1.5/working/template_tasks.md b/content/kapacitor/v1.5/working/template_tasks.md new file mode 100644 index 000000000..3c287def2 --- /dev/null +++ b/content/kapacitor/v1.5/working/template_tasks.md @@ -0,0 +1,411 @@ +--- +title: Template tasks +description: Create Kapacitor task and TICKscript templates that can be used to quickly create new tasks and TICKscripts. +aliases: + - /kapacitor/v1.5/examples/template_tasks/ + - /kapacitor/v1.5/guides/template_tasks/ +menu: + kapacitor_1_5: + name: Template tasks + identifier: template_tasks + weight: 9 + parent: work-w-kapacitor +--- + +Use templates in the CLI and the [API](/kapacitor/v1.5/working/api) to define and reuse tasks. + +To create a task template, do the following: + +1. Create a task template script +2. Run the `define-template` command + +Then, use the task template to define new tasks. + +>Note: Chronograf does **not** display template details, including variable values. + +## Create a task template script + +The following task template script computes the mean of a field and triggers an alert. + +_**Example: generic\_alert\_template.tick**_ +```js +// Which measurement to consume +var measurement string +// Optional where filter +var where_filter = lambda: TRUE +// Optional list of group by dimensions +var groups = [*] +// Which field to process +var field string +// Warning criteria, has access to 'mean' field +var warn lambda +// Critical criteria, has access to 'mean' field +var crit lambda +// How much data to window +var window = 5m +// The slack channel for alerts +var slack_channel = '#alerts' + +stream + |from() + .measurement(measurement) + .where(where_filter) + .groupBy(groups) + |window() + .period(window) + .every(window) + |mean(field) + |alert() + .warn(warn) + .crit(crit) + .slack() + .channel(slack_channel) +``` + +Notice all fields in the script are declared variables, which lets you customize variable values when using the template to define a task. + +### Define variables + +In a task template, use the following pattern to define variables: + +```js +// Required variable pattern +var varName dataType + +// Optional variable patterns +var varName = dataType: defaultValue +var varName = [*] +``` + +For information about available data types, see [literal value types](/kapacitor/v1.5/tick/syntax/#types). + +#### Optional variables + +In some cases, a template task may be used for tasks that do not require values for all template variables. +To ensure a variable is optional, provide a default value. In most cases, the default can simply be `TRUE`: + +```js +// Pattern +var varName = datatype: defaultValue + +// Examples +var where_filter = lambda: TRUE +var warn = lambda: TRUE +var groups = [*] +``` + +## Run the `define-template` command + +To define a new template, run the `define-template` command: + +```bash +kapacitor define-template generic_mean_alert -tick path/to/template_script.tick +``` + +Use `show-template` to see more information about the newly created template. + +```bash +kapacitor show-template generic_mean_alert +``` + +A list of variables declared for the template is returned in the group `vars` as part of the console output as shown in this example: + +_**Example: The Vars section of kapacitor show-template output**_ +```output +... + +Vars: +Name Type Default Value Description +crit lambda Critical criteria, has access to 'mean' field +field string Which field to process +groups list [*] Optional list of group by dimensions +measurement string Which measurement to consume +slack_channel string #alerts The slack channel for alerts +warn lambda Warning criteria, has access to 'mean' field +where_filter lambda TRUE Optional where filter +window duration 5m0s How much data to window + +... +``` + +Each task will acquire its type and TICKscript structure from the template. +Variable descriptions are derived from comments above each variable in the template. +The specific values of variables and of the database/retention policy are unique for each task +created using the template. + +## Define a new task + +Define a new task using the template to trigger an alert on CPU usage. + +1. Pass variable values into the template using a simple JSON file. + +_**Example: A JSON variable file**_ +```json +{ + "measurement": {"type" : "string", "value" : "cpu" }, + "where_filter": {"type": "lambda", "value": "\"cpu\" == 'cpu-total'"}, + "groups": {"type": "list", "value": [{"type":"string", "value":"host"},{"type":"string", "value":"dc"}]}, + "field": {"type" : "string", "value" : "usage_idle" }, + "warn": {"type" : "lambda", "value" : "\"mean\" < 30.0" }, + "crit": {"type" : "lambda", "value" : "\"mean\" < 10.0" }, + "window": {"type" : "duration", "value" : "1m" }, + "slack_channel": {"type" : "string", "value" : "#alerts_testing" } +} +``` + +2. Pass in the template file and the JSON variable file by running a command with both `-template` and `-vars` arguments: + + ```bash + kapacitor define cpu_alert -template generic_mean_alert -vars cpu_vars.json -dbrp telegraf.autogen + ``` + +3. Use the `show` command to display the variable values associated with the newly created task. + +> **Note:** For Kapacitor instances with authentication enabled, use the following form: +`./kapacitor -url http://username:password@MYSERVER:9092 show TASKNAME` + +##### Example + +``` +kapacitor show cpu_alert +``` +``` +... + +Vars: +Name Type Value +crit lambda "mean" < 10.0 +field string usage_idle +groups list [host,dc] +measurement string cpu +slack_channel string #alerts_testing +warn lambda "mean" < 30.0 +where_filter lambda "cpu" == 'cpu-total' +window duration 1m0s + +... +``` + +A similar task for a memory based alert can also be created using the same template. +Create a `mem_vars.json` and use this snippet. + +_**Example: A JSON variables file for memory alerts**_ +```json +{ + "measurement": {"type" : "string", "value" : "mem" }, + "groups": {"type": "list", "value": [{"type":"star", "value":"*"}]}, + "field": {"type" : "string", "value" : "used_percent" }, + "warn": {"type" : "lambda", "value" : "\"mean\" > 80.0" }, + "crit": {"type" : "lambda", "value" : "\"mean\" > 90.0" }, + "window": {"type" : "duration", "value" : "10m" }, + "slack_channel": {"type" : "string", "value" : "#alerts_testing" } +} +``` + +The task can now be defined as before, but this time with the new variables file +and a different task identifier. + +``` +kapacitor define mem_alert -template generic_mean_alert -vars mem_vars.json -dbrp telegraf.autogen +``` + +Running `show` will display the `vars` associated with this task which are unique to the `mem_alert` task. + +``` +kapacitor show mem_alert +``` + +And again the `vars` output: + +_**Example: The Vars section of the mem\_alert task**_ +``` +... +Vars: +Name Type Value +crit lambda "mean" > 90.0 +field string used_percent +groups list [*] +measurement string mem +slack_channel string #alerts_testing +warn lambda "mean" > 80.0 +window duration 10m0s +... +``` + +Any number of tasks can be defined using the same template. + +> **Note:** Updates to the template will update all associated tasks and reload them if necessary. + +## Using Variables + +Variables work with normal tasks as well and can be used to overwrite any defaults in the script. +Since at any point a TICKscript could come in handy as a template, the recommended best practice is to always use `var` declarations in TICKscripts. +Normal tasks work and, if at a later date you decide another similar task is needed, you can easily create a template from the existing TICKscript and define additional tasks with variable files. + +## Using the `-file` flag + +Starting with Kapacitor 1.4, tasks may be generated from templates using a task definition file. +The task definition file is extended from the variables file of previous releases. +Three new fields are made available. + +* The `template-id` field is used to select the template. +* The `dbrps` field is used to define one or more database/retention policy sets that the task will use. +* The `vars` field groups together the variables, which were the core of the file in previous releases. + +This file may be in either JSON or YAML. + +A task for a memory-based alert can be created using the same template that was defined above. +Create a `mem_template_task.json` file using the snippet in Example 7. + + +_**Example: A task definition file in JSON**_ +```json +{ + "template-id": "generic_mean_alert", + "dbrps": [{"db": "telegraf", "rp": "autogen"}], + "vars": { + "measurement": {"type" : "string", "value" : "mem" }, + "groups": {"type": "list", "value": [{"type":"star", "value":"*"}]}, + "field": {"type" : "string", "value" : "used_percent" }, + "warn": {"type" : "lambda", "value" : "\"mean\" > 80.0" }, + "crit": {"type" : "lambda", "value" : "\"mean\" > 90.0" }, + "window": {"type" : "duration", "value" : "10m" }, + "slack_channel": {"type" : "string", "value" : "#alerts_testing" } + } +} +``` + +The task can then be defined with the `file` parameter which, with the new content of the +task definition file, replaces the command-line parameters `template`, `dbrp`, and `vars`. + +``` +kapacitor define mem_alert -file mem_template_task.json +``` + +Using YAML, the task definition file `mem_template_task.yaml` appears as follows: + +_**Example: A task definition file in YAML**_ +```yaml +template-id: generic_mean_alert +dbrps: +- db: telegraf + rp: autogen +vars: + measurement: + type: string + value: mem + groups: + type: list + value: + - type: star + value: "*" + field: + type: string + value: used_percent + warn: + type: lambda + value: '"mean" > 80.0' + crit: + type: lambda + value: '"mean" > 90.0' + window: + type: duration + value: 10m + slack_channel: + type: string + value: "#alerts_testing" +``` + +The task can then be defined with the `file` parameter as previously shown. + +``` +kapacitor define mem_alert -file mem_template_task.yaml +``` + +## Specifying `dbrp` implicitly + +The following is a simple example that defines a template that computes the mean of a field and triggers an alert, where the `dbrp` is specified in the template. + +_**Example: Defining the database and retention policy in the template**_ +```js +dbrp "telegraf"."autogen" + +// Which measurement to consume +var measurement string +// Optional where filter +var where_filter = lambda: TRUE +// Optional list of group by dimensions +var groups = [*] +// Which field to process +var field string +// Warning criteria, has access to 'mean' field +var warn lambda +// Critical criteria, has access to 'mean' field +var crit lambda +// How much data to window +var window = 5m +// The slack channel for alerts +var slack_channel = '#alerts' + +stream + |from() + .measurement(measurement) + .where(where_filter) + .groupBy(groups) + |window() + .period(window) + .every(window) + |mean(field) + |alert() + .warn(warn) + .crit(crit) + .slack() + .channel(slack_channel) +``` + +Define a new template from this template script: + +```bash +kapacitor define-template implicit_generic_mean_alert -tick path/to/script.tick +``` + +Then define a task in a YAML file, `implicit_mem_template_task.yaml` + +_**Example: A YAML vars file leveraging a template with a predefined database and retention policy**_ +```yaml +template-id: implicit_generic_mean_alert +vars: + measurement: + type: string + value: mem + groups: + type: list + value: + - type: star + value: "*" + field: + type: string + value: used_percent + warn: + type: lambda + value: '"mean" > 80.0' + crit: + type: lambda + value: '"mean" > 90.0' + window: + type: duration + value: 10m + slack_channel: + type: string + value: "#alerts_testing" +``` + +Create the task: + +```bash +kapacitor define mem_alert -file implicit_mem_template_task.yaml +``` + +> **NOTE:** When the `dbrp` value has already been declared in the template, +the `dbrps` field must **not** appear in the task definition file, e.g. in `implicit_mem_template_task.yaml`. + Doing so will will cause an error. diff --git a/content/kapacitor/v1.5/working/using_alert_topics.md b/content/kapacitor/v1.5/working/using_alert_topics.md new file mode 100644 index 000000000..e36ed3758 --- /dev/null +++ b/content/kapacitor/v1.5/working/using_alert_topics.md @@ -0,0 +1,235 @@ +--- +title: Using alert topics +aliases: + - /kapacitor/v1.5/examples/using_alert_topics/ + - /kapacitor/v1.5/guides/using_alert_topics/ +menu: + kapacitor_1_5: + name: Alerts - Using Topics + identifier: using_alert_topics + weight: 4 + parent: work-w-kapacitor +--- + +Kapacitor's alert system allows a publish-and-subscribe design pattern to be used. +Alerts are published to a `topic` and `handlers` subscribe to it. + +This example will walk the reader through setting up a simple cpu threshold alert that sends alerts to Slack. + +### Requirements + +It is expected that the reader is already familiar the basics of Kapacitor +presented in the [Getting Started](/kapacitor/v1.5/introduction/getting-started/) +guide. The reader should also have a basic understanding of working with tasks +and [TICKscripts](/kapacitor/v1.5/tick/introduction/). + +It is further expected that a working Telegraf and Kapacitor are installed to +walk through this example. If these are not installed, please take a second to +set up both of them. + +## The task + +This walk-through is going to demonstrate how to set up a `cpu` alert topic and send alerts to that topic. + +First define a simple cpu alert. + +```go +dbrp "telegraf"."autogen" + +stream + |from() + .measurement('cpu') + .groupBy(*) + |alert() + .warn(lambda: "usage_idle" < 20) + .crit(lambda: "usage_idle" < 10) + // Send alerts to the `cpu` topic + .topic('cpu') +``` + +The above TICKscript creates a threshold alert for CPU usage and sends the alerts to the `cpu` topic. + +Save the above script as `cpu_alert.tick`. +Create and start the task by running the following commands: + +```sh +$ kapacitor define cpu_alert -tick cpu_alert.tick +$ kapacitor enable cpu_alert +``` + +## The Slack handler + +At this point a Kapacitor task which is generating alerts and sending them to +the `cpu` topic, but since the topic does not have any handlers nothing happens +with the alerts. + +Confirm that there are no handlers by checking the topic: + +```sh +$ kapacitor show-topic cpu +``` + +The output should look something like: + +``` +ID: cpu +Level: OK +Collected: 27 +Handlers: [] +Events: +Event Level Message Date +cpu:cpu=cpu3,host=localhost OK cpu:cpu=cpu3,host=localhost is OK 23 Jan 17 14:04 MST +``` + +>NOTE: If the error message `unkown topic: "cpu"` is returned, please be aware, +that topics are created only when needed, as such if the task has not triggered an alert yet, the topic will not exist. +If this error about the topic not existing is returned, then, try and cause an alert to be triggered. +Either change the thresholds on the task or create some cpu load. + + + +To configure a handler first the handler binding must be defined. +A handler binding has a few parts: + +* Topic - The topic ID. +* ID - The unique ID of the handler. +* Kind - The kind of handler, in this case it will be a `slack` handler +* Match - A lambda expression to filter matching alerts. By default all alerts match. +* Options - A map of values, differs by kind. + +The slack handler binding can be defined in either yaml or json, here yaml is used: + +```yaml +topic: cpu +id: slack +kind: slack +options: + channel: '#alerts' +``` + +The above handler binding definition defines a handler that sends alerts to the slack channel `#alerts`. + +Save the above text as `slack.yaml`. +Now the new handler can be bound to the topic via the `kapacitor` client. +To do this the `define-topic-handler` command is used. It takes one argument. + +``` +$ kapacitor define-topic-handler +Usage: kapacitor define-topic-handler +``` + +```sh +$ kapacitor define-topic-handler ./slack.yaml +``` + +Validate the handler was defined as expected: + +```sh +$ kapacitor show-topic-handler cpu slack +``` + +Finally confirm the topic is configured as expected: + +```sh +$ kapacitor show-topic cpu +``` + +The output should look something like: + +``` +ID: cpu +Level: OK +Collected: 27 +Handlers: [slack] +Events: +Event Level Message Date +cpu:cpu=cpu3,host=localhost OK cpu:cpu=cpu3,host=localhost is OK 23 Jan 17 14:04 MST +``` + +That is it! Future alerts triggered by the `cpu_alert` task will be sent to Slack via the `cpu` topic. + +## Summary + +While it is simple to define alert handlers directly in the TICKscript, tracking and maintenance can become burdensome once many tasks have been created. +Using topics decouples the definition of the alert from the handling of the alert. +With topic and handler bindings defined, to change the slack channel is a single API call to update the slack handler. More importantly, no TICKscripts have to change. + +## Going further + +### Chaining topics + +Topics can be chained together using the `publish` action handler. +This allows alerts to be further grouped into various topics. + +For example the above task could be modified to send alerts to the `system` topic instead of the `cpu` topic. +This way all system related alerts can be handled in a consistent manner. + +The new TICKscript: + +```go +stream + |from() + .measurement('cpu') + .groupBy(*) + |alert() + .warn(lambda: "usage_idle" < 20) + .crit(lambda: "usage_idle" < 10) + // Send alerts to the `system` topic + .topic('system') +``` + +To send all system alerts to a new topic `ops_team`, create a new handler for the `system` topic. + +```yaml +topic: system +id: publish-to-ops_team +kind: publish +options: + topics: + - ops_team +``` + +```sh +kapacitor define-topic-handler ./publish-to-ops_team.yaml +``` + +Since the operations team has an on-call rotation, handling of alerts on the `ops_team` topic can be set up accordingly. + + +```yaml +topic: ops_team +id: victorops +kind: victorops +options: + routing-key: ops_team +``` + +```sh +kapacitor define-topic-handler ./victorops.yaml +``` + +Now all `system` related alerts get sent to the `ops_team` topic which in turn get handled in Victor Ops. + +### Match conditions + +Match conditions can be applied to handlers. +Only alerts matching the conditions will be handled by that handler. + +For example it is typical to only send Slack messages when alerts change state instead of every time an alert is evaluated. +Modifing the slack handler definition from the first example results in the following: + +```yaml +topic: cpu +id: slack +kind: slack +match: changed() == TRUE +options: + channel: '#alerts' +``` + + +Now update the handler and only alerts that changed state will be sent to Slack. + +``` +kapacitor define-topic-handler ./slack.yaml +``` diff --git a/content/telegraf/v1.15/_index.md b/content/telegraf/v1.15/_index.md new file mode 100644 index 000000000..c8a99bcd5 --- /dev/null +++ b/content/telegraf/v1.15/_index.md @@ -0,0 +1,27 @@ +--- +title: Telegraf 1.15 documentation +description: > + Documentation for Telegraf, the plugin-driven server agent of the InfluxData + time series platform, used to collect and report metrics. + Telegraf supports four categories of plugins -- input, output, aggregator, and processor. +menu: + telegraf: + name: v1.15 + identifier: telegraf_1_15 + weight: 1 +--- + +Telegraf is a plugin-driven server agent for collecting & reporting metrics, +and is the first piece of the [TICK stack](https://influxdata.com/time-series-platform/). +Telegraf has plugins to source a variety of metrics directly from the system it's running on, pull metrics from third party APIs, or even listen for metrics via a statsd and Kafka consumer services. +It also has output plugins to send metrics to a variety of other datastores, services, and message queues, including InfluxDB, Graphite, OpenTSDB, Datadog, Librato, Kafka, MQTT, NSQ, and many others. + +## Key features + +Here are some of the features that Telegraf currently supports that make it a great choice for metrics collection. + +* Written entirely in Go. +It compiles into a single binary with no external dependencies. +* Minimal memory footprint. +* Plugin system allows new inputs and outputs to be easily added. +* A wide number of plugins for many popular services already exist for well known services and APIs. diff --git a/content/telegraf/v1.15/about_the_project/_index.md b/content/telegraf/v1.15/about_the_project/_index.md new file mode 100644 index 000000000..fcd6cf763 --- /dev/null +++ b/content/telegraf/v1.15/about_the_project/_index.md @@ -0,0 +1,26 @@ +--- + title: About the Telegraf project + + menu: + telegraf_1_15: + name: About the project + weight: 10 +--- + +## [Telegraf release notes](/telegraf/v1.15/about_the_project/release-notes-changelog/) + +## [Contributing to Telegraf](https://github.com/influxdata/telegraf/blob/master/CONTRIBUTING.md) + +## [Contributor License Agreement (CLA)](https://influxdata.com/community/cla/) + +## [License](https://github.com/influxdata/telegraf/blob/master/LICENSE) + +## Third party software +InfluxData products contain third party software, which means the copyrighted, patented, or otherwise legally protected +software of third parties that is incorporated in InfluxData products. + +Third party suppliers make no representation nor warranty with respect to such third party software or any portion thereof. +Third party suppliers assume no liability for any claim that might arise with respect to such third party software, nor for a +customer’s use of or inability to use the third party software. + +The [list of third party software components, including references to associated licenses and other materials](https://github.com/influxdata/telegraf/blob/release-1.12/docs/LICENSE_OF_DEPENDENCIES.md), is maintained on a version by version basis. diff --git a/content/telegraf/v1.15/about_the_project/cla.md b/content/telegraf/v1.15/about_the_project/cla.md new file mode 100644 index 000000000..ee7146148 --- /dev/null +++ b/content/telegraf/v1.15/about_the_project/cla.md @@ -0,0 +1,10 @@ +--- +title: InfluxData Contributor License Agreement (CLA) + +menu: + telegraf_1_15: + name: Contributor License Agreement (CLA) + parent: About the project + weight: 30 + url: https://influxdata.com/community/cla/ +--- diff --git a/content/telegraf/v1.15/about_the_project/contributing.md b/content/telegraf/v1.15/about_the_project/contributing.md new file mode 100644 index 000000000..34d683f31 --- /dev/null +++ b/content/telegraf/v1.15/about_the_project/contributing.md @@ -0,0 +1,10 @@ +--- + title: Contributing to Telegraf + + menu: + telegraf_1_15: + name: Contributing + parent: About the project + weight: 20 + url: https://github.com/influxdata/telegraf/blob/master/CONTRIBUTING.md +--- diff --git a/content/telegraf/v1.15/about_the_project/license.md b/content/telegraf/v1.15/about_the_project/license.md new file mode 100644 index 000000000..81b3d9969 --- /dev/null +++ b/content/telegraf/v1.15/about_the_project/license.md @@ -0,0 +1,10 @@ +--- + title: License + + menu: + telegraf_1_15: + name: License + parent: About the project + weight: 40 + url: https://github.com/influxdata/telegraf/blob/master/LICENSE +--- diff --git a/content/telegraf/v1.15/about_the_project/release-notes-changelog.md b/content/telegraf/v1.15/about_the_project/release-notes-changelog.md new file mode 100644 index 000000000..13efb9021 --- /dev/null +++ b/content/telegraf/v1.15/about_the_project/release-notes-changelog.md @@ -0,0 +1,2469 @@ +--- +title: Telegraf 1.15 release notes +description: See the new features, bug fixes, breaking changes, and enhancements in the latest and earlier Telegraf releases. +menu: + telegraf_1_15: + name: Release notes + weight: 10 + parent: About the project +--- + +## v1.15.1 [2020-07-22] + +### Bug fixes + +- Fix architecture in non-amd64 deb and rpm packages. + +## v1.15.0 [2020-07-22] + +{{% warn %}} +Critical bug that impacted non-amd64 packages was introduced in 1.15.0. **Do not install this release.** Instead, install 1.15.1, which includes the features, new plugins, and bug fixes below. +{{% /warn %}} + +### Breaking changes + +Breaking changes are updates that may cause Telegraf plugins to fail or function incorrectly. If you have one of the following plugins installed, make sure to update your plugin as needed: + +- **Logparser** (`logparser`) input plugin: Deprecated. Use the `tail` input with `data_format = "grok"` as a replacement. +- **Cisco GNMI Telemetry** (`cisco_telemetry_gnmi`) input plugin: Renamed to `gnmi` to better reflect its general support for gNMI devices. +- **Splunkmetric** (`splunkmetric`) serializer: Several fields used primarily for debugging have been removed. If you are making use of these fields, they can be added back with the `tag` option. + +### New plugins + +#### Inputs + +- [NGINX Stream STS Input Plugin](https://github.com/influxdata/telegraf/blob/master/plugins/inputs/nginx_sts/README.md)(`nginx_sts`) - Contributed by [@zdmytriv](https://github.com/zdmytriv) +- [Redfish Input Plugin](https://github.com/influxdata/telegraf/blob/master/plugins/inputs/redfish/README.md)(`redfish`) - Contributed by [@sarvanikonda](https://github.com/sarvanikonda) + +#### Outputs + +- [Execd Output Plugin](https://github.com/influxdata/telegraf/blob/master/plugins/outputs/execd/README.md)(`execd`) - Contributed by [@influxdata](https://github.com/influxdata) +- [New Relic Output Plugin](https://github.com/influxdata/telegraf/blob/master/plugins/outputs/newrelic/README.md)(`newrelic`) - Contributed by [@hsingkalsi](https://github.com/hsingkalsi) + +#### Processors + +- [Defaults Processor Plugin](https://github.com/influxdata/telegraf/blob/master/plugins/processors/defaults/README.md)(`defaults`) - Contributed by [@jregistr](https://github.com/jregistr) +- [Execd Processor Plugin](https://github.com/influxdata/telegraf/blob/master/plugins/processors/execd/README.md)(`execd`) - Contributed by [@influxdata](https://github.com/influxdata) +- [Filepath Processor Plugin](https://github.com/influxdata/telegraf/blob/master/plugins/processors/filepath/README.md)(`filepath`) - Contributed by [@kir4h](https://github.com/kir4h) +- [Network Interface Name Processor Plugin](https://github.com/influxdata/telegraf/blob/master/plugins/processors/ifname/README.md)(`ifname`) - Contributed by [@influxdata](https://github.com/influxdata) +- [Port Name Processor Plugin](https://github.com/influxdata/telegraf/blob/master/plugins/processors/port_name/README.md)(`port_name`) - Contributed by [@influxdata](https://github.com/influxdata) +- [Reverse DNS Processor Plugin](https://github.com/influxdata/telegraf/blob/master/plugins/processors/reverse_DNS/README.md)(`reverse_dns`) - Contributed by [@influxdata](https://github.com/influxdata) +- [Starlark Processor Plugin](https://github.com/influxdata/telegraf/blob/master/plugins/processors/starlark/README.md)(`starlark`) - Contributed by [@influxdata](https://github.com/influxdata) + +### Features + +- Telegraf's `--test` mode runs processors and aggregators before printing metrics. +- Official packages built with Go 1.14.5. +- When updating the Debian package, you will no longer be prompted to merge the `telegraf.conf` file. Instead, the new version will be installed to `/etc/telegraf/telegraf.conf.sample`. The `tar` and `zip` packages now include the version in the top-level directory. +- Allow per input overriding of `collection_jitter` and `precision`. +- Deploy Telegraf configuration as `telegraf.conf.sample`. +- Use Docker log timestamp as metric time. +- Apply ping deadline to DNS lookup. +- Support multiple templates for graphite serializers. +- Add configurable separator graphite serializer and output. +- Add support for SIGUSR1 to trigger flush. +- Add support for once mode that writes to outputs and exits. +- Run processors and aggregators during test mode. +- Add timezone configuration to CSV parser. + + +#### Input plugin updates + +- **Ceph Storage** (`ceph`): Add support for MDS and RGW sockets. +- **ECS** (`ecs`): Add v3 metadata support. +- **Fibaro** (`fibaro`): Add support for battery-level monitoring. +- **File** (`file`): + - Support UTF-16. + - Exclude `csv_timestamp_column` and `csv_measurement_column` from fields. +- **HTTP** (`http`): Add reading bearer token. +- **HTTP Listener v2** (`http_listener_v2`): Add ability to specify HTTP headers as tags. +- **HTTP Response** (`http_response`): + - Add authentication support. + - Allow collection of HTTP headers. + - Add ability to collect response body as field. +- **Icinga 2** (`icinga2`): + - Fix source field. + - Add tag for server hostname. +- **InfluxDB Listener** (`influxdb_listener`): Add option to save retention policy as tag. +- **IPtables** (`iptables`): Extract target as a tag for each rule. +- **Kibana** (`kibana`): Fix `json unmarshal` error. +- **Kubernetes Inventory** (`kube_inventory`): Add ability to add selectors as tags. +- **Mem** (`mem`): Add laundry on FreeBSD. +- **Microsoft SQL Server** (`sqlserver`): + - Add `VolumeSpace` query. + - Add `cpu` query. + - Add counter type to `perfmon` collector. + - Improve compatibility with older server versions. + - Fix typo in `total_elapsed_time_ms` field. +- **Modbus** (`modbus`): + - Add support for 64-bit integer types. + - Add retry when replica is busy. + - Add ability to specify measurement per register. +- **MongoDB** (`monogdb`): + - Add commands stats. + - Add additional fields. + - Add cluster state integer. + - Add option to disable cluster status. + - Add additional conccurrent transaction information. +- **NVIDIA SMI** (`nvidia_smi`): Add video codec stats. +- **Procstat** (`procstat`): + - Improve performance. + - Fix memory leak. +- **S.M.A.R.T.** (`smart`): Add missing `nvme` attributes. +- **SNMP Trap** (`snmp_trap`): Add SNMPv3 trap support. +- **System** (`system`): Fix incorrect uptime when clock is adjusted. +- **Tail** (`tail`): Support UTF-16. + +#### Output plugin updates + +- **Enum** (`enum`): Add integer mapping support. + +#### Processor plugin updates + +- **Date** (`date`): + - Add field creation. + - Add integer unix time support. +- **Wavefront** (`wavefront`): Add `truncate_tags` setting. + + +### Bug fixes +- Fix ability to write metrics to CloudWatch with IMDSv1 disabled. +- Fix vSphere 6.7 missing data issue. +- Fix gzip support in `socket_listener` with tcp sockets. +- Fix interval drift when `round_interval` is set in agent. +- Fix incorrect uptime when clock is adjusted. +- Remove trailing backslash from tag keys/values in `influx` serializer. +- Fix incorrect Azure SQL DB server properties. +- Send metrics in FIFO order. + +## v1.14.5 [2020-06-30] + +### Bug fixes + +- Improve the performance of the `procstat` input. +- Fix ping exit code handling on non-Linux operating systems. +- Fix errors in output of the `sensors` command. +- Prevent startup when tags have incorrect type in configuration file. +- Fix panic with GJSON multiselect query in JSON parser. +- Allow any key usage type on x509 certificate. +- Allow histograms and summary types without buckets or quantiles in `prometheus_client` output. + +## v1.14.4 [2020-06-09] + +### Bug fixes + +- Fix the `cannot insert the value NULL` error with the `PerformanceCounters` query in the `sqlServer` input plugin. +- Fix a typo in the naming of `the gc_cpu_fraction` field in the `influxdb` input plugin. +- Fix a numeric to bool conversion in the `converter` processor. +- Fix an issue with the `influx` stream parser blocking when the data is in buffer. + +## v1.14.3 [2020-05-19] + +### Bug fixes + +- Use same timestamp for all objects in arrays in the `json` parser. +- Handle multiple metrics with the same timestamp in `dedup` processor. +- Fix reconnection of timed out HTTP2 connections `influxdb` outputs. +- Fix negative value parsing in `impi_sensor` input. + +## v1.14.2 [2020-04-28] + +### Bug fixes + +- Trim white space from instance tag in `sqlserver` input . +- Use increased AWS Cloudwatch GetMetricData limit of 500 metrics per call. +- Fix limit on dimensions in `azure_monitor` output. +- Fix 64-bit integer to string conversion in `snmp` input. +- Fix shard indices reporting in `elasticsearch` input plugin. +- Ignore fields with Not a Number or Infinity floats in the JSON serializer. +- Fix typo in name of `gc_cpu_fraction` field of the `kapacitor` input. +- Don't retry create database when using database_tag if forbidden by the server in `influxdb` output. +- Allow CR and FF inside of string fields in InfluxDB line protocol parser. + +## v1.14.1 [2020-04-14] + +### Bug fixes + +- Fix `PerformanceCounter` query performance degradation in `sqlserver` input. +- Fix error when using the `Name` field in template processor. +- Fix export timestamp not working for Prometheus on v2. +- Fix exclude database and retention policy tags. +- Fix status path when using globs in `phpfpm`. + +## v1.14 [2020-03-26] + +### Breaking changes + +Breaking changes are updates that may cause Telegraf plugins to fail or function incorrectly. If you have one of the following plugins installed, make sure to update your plugin as needed: + +- **Microsoft SQL Server** (`sqlserver`) input plugin: Renamed the `sqlserver_azurestats` measurement to `sqlserver_azure_db_resource_stats` to resolve an issue where numeric metrics were previously being reported incorrectly as strings. +- **Date** (`date`) processor plugin: Now uses the UTC timezone when creating its tag. Previously, the local time was used. + +{{% note %}} +Support for SSL v3.0 is deprecated in this release. +Telegraf now uses the [Go TLS library](https://golang.org/pkg/crypto/tls/). +{{% /note %}} + +### New plugins + +#### Inputs + +- [Arista LANZ Consumer](`lanz`) - Contributed by [@timhughes](https://github.com/timhughes) +- [ClickHouse](https://github.com/influxdata/telegraf/blob/release-1.14/plugins/inputs/clickhouse/README.md)(`clickhouse`) - Contributed by [@kshvakov](https://github.com/kshvakov) +- [Execd](https://github.com/influxdata/telegraf/blob/release-1.14/plugins/inputs/execd/README.md)(`execd`) - Contributed by [@jgraichen](https://github.com/jgraichen) +- [Event Hub Consumer](https://github.com/influxdata/telegraf/blob/master/plugins/inputs/eventhub_consumer/README.md)(`eventhub_consumer`) - Contributed by [@R290](https://github.com/R290) +- [InfiniBand](https://github.com/influxdata/telegraf/blob/master/plugins/inputs/infiniband/README.md)(`infiniband`) - Contributed by [@willfurnell](https://github.com/willfurnell) +- [Modbus](https://github.com/influxdata/telegraf/blob/master/plugins/inputs/modbus/README.md)(`modbus`) - Contributed by [@garciaolais](https://github.com/garciaolais) +- [Monit](https://github.com/influxdata/telegraf/blob/master/plugins/inputs/monit/README.md)(`monit`) - Contributed by [@SirishaGopigiri](https://github.com/SirishaGopigiri) +- [SFlow](https://github.com/influxdata/telegraf/blob/master/plugins/inputs/sflow/README.md)(`sflow`) - Contributed by [@influxdata](https://github.com/influxdata) +- [Wireguard](https://github.com/influxdata/telegraf/blob/master/plugins/inputs/wireguard/README.md)(`wireguard`) - Contributed by [@LINKIWI](https://github.com/LINKIWI) + +#### Processors + +- [Dedup](`dedup`) - Contributed by [@igomura](https://github.com/igomura) +- [S2 Geo](`s2geo`) - Contributed by [@alespour](https://github.com/alespour) +- [Template](`template`) - Contributed by [@RobMalvern](https://github.com/RobMalvern) + +#### Outputs + +- [Warp10](`warp10`) - Contributed by [@aurrelhebert](https://github.com/aurrelhebert) + +### Features + +#### Input plugin updates + +- **Apache Kafka Consumer** (`kafka_consumer`): Add SASL version control to support Microsoft Azure Event Hub. +- **Apcupsd** (`apcupsd`): Add new tag `model` and new metrics: `battery_date`, `nominal_input_voltage`, `nominal_battery_voltage`, `nominal_power`, `firmware`. +- **Cisco Model-driven Telemetry (MDT)** (`cisco_telemetry_gnmi`) input plugin: + - Add support for GNMI DecimalVal type. + - Replace dash (`-`) with underscore (`_`) when handling embedded tags. +- **DiskIO** (`diskio`): Add counters for merged reads and writes. +- **IPMI Sensor** (`ipmi_sensor`): Add `use_sudo` option. +- **Jenkins** (`jenkins`): + - Add `source` and `port` tags to `jenkins_job` metrics. + - Add new fields `total_executors` and `busy_executors`. +- **Kubernetes** (`kubernetes`): Add ability to collect pod labels. +- **Microsoft SQL Server** (`sqlserver`): + - Add RBPEX IO statistics to DatabaseIO query. + - Add space on disk for each file to DatabaseIO query. + - Calculate DB Name instead of GUID in `physical_db_name`. + - Add `DatabaseIO` TempDB per Azure DB. + - Add `query_include` option for explicitly including queries. + - Add `volume_mount_point` to DatabaseIO query. +- **MongoDB** (`mongodb`): + - Add `page_faults` for WiredTiger storage engine. + - Add latency statistics. + - Add replica set tag (`rs_name`). +- **NATS Consumer** (`nats_consumer`): Add support for credentials file. +- **NGINX Plus API** (`nginx_plus_api`): Add support for new endpoints. +- **OpenLDAP** (`openldap`): Add support for MDB database information. +- **PHP-FPM** (`phpfpm`): Allow globs in FPM unix socket paths (`unixsocket`). +- **Procstat** (`procstat`): Add process `created_at` time. +- **Prometheus** (`prometheus`) input plugin: Add `label` and `field` selectors for Kubernetes service discovery. +- **RabbitMQ** (`rabbitmq`): Add `slave_nodes` and `synchronized_slave_nodes` metrics. +- **StatsD** (`statsd`): Add UDP internal metrics. +- **Unbound** (`unbound`): Expose [`-c cfgfile` option of `unbound-control`](https://linux.die.net/man/8/unbound-control) and set the default unbound configuration (`config_file= "/etc/unbound/unbound.conf`) in the Telegraf configuration file. +- **VMware vSphere** (`vsphere`): Add option to exclude resources by inventory path, including `vm_exclude`, `host_exclude`, `cluster_exclude` (for both clusters and datastores), and `datacenter_exclude`. +- **X.509 Certificate** (`x509_cert`): Add `server_name` override. + +#### Output plugin updates + +- **Apache Kafka** (`kafka`): Add `topic_tag` and `exclude_topic_tag` options. +- **Graylog** (`graylog`): Allow a user defined field (`short_message_field`) to be used as the `GELF short_message`. +- **InfluxDB v1.x** (`influxdb`): Add support for setting the retention policy using a tag (`retention_policy_tag`). +- **NATS Output** (`nats`): Add support for credentials file. + +#### Aggregator plugin updates + +- **Histogram** (`histogram`): Add non-cumulative histogram. + +#### Processor plugin updates + +- **Converter** (`converter`): Add support for converting `tag` or `field` to `measurement`. +- **Date** (`date`): Add date offset and timezone options. +- **Strings** (`strings`): Add support for titlecase transformation. + +### Bug fixes + +- Fix Telegraf log rotation to use actual file size instead of bytes written. +- Fix internal Telegraf metrics to prevent output split into multiple lines. +- **Chrony** (`chrony`) input plugin: When plugin is enabled, search for `chronyc` only. +- **Microsoft SQL Server** (`sqlserver`) input plugin: + - Fix conversion to floats in AzureDBResourceStats query. + - Fix case sensitive collation. + - Fix several issues with DatabaseIO query. + - Fix schedulers query compatibility with pre SQL-2016. +- **InfluxDB Listener** (`influxdb_listener`): + - Fix request failing with EOF. + - Continue parsing after error. + - Set headers on ping URL. + +## v1.13.4 [2020-02-25] + +### Release Notes +Official packages now built with Go 1.13.8. + +### Bug fixes +- Parse NaN values from summary types in Prometheus (`prometheus`) input plugin. +- Fix PgBouncer (`pgbouncer`) input plugin when used with newer PgBouncer versions. +- Support up to 8192 stats in the Ethtool (`ethtool`) input plugin. +- Fix performance counters collection on named instances in Microsoft SQL Server (`sqlserver`) input plugin. +- Use add time for Prometheus expiration calculation. +- Fix inconsistency with input error counting in Telegraf v1.x (`internal`) input plugin. +- Use the same timestamp per call if no time is provided in Prometheus (`prometheus`) input plugin. + +## v1.13.3 [2020-02-04] + +### Bug fixes + +- Update Kibana (`kibana`) input plugin to support Kibana 6.4 and later. +- Prevent duplicate `TrackingIDs` from being returned in the following queue consumer input plugins: + - Amazon Kineses Consumer (`kinesis_consumer`) + - AMQP Consumer (`amqp_consumer`) + - Apache Consumer (`apache_consumer`) + - MQTT Consumer (`mqtt_consumer`) + - NATS Consumer (`nats_consumer`) + - NSQ Consumer (`nsq_consumer`) +- Increase support for up to 4096 statistics in the Ethtool (`ethtool`) input plugin. +- Remove expired metrics from the Prometheus Client (`prometheus_client`) output plugin. Previously, expired metrics were only removed when new metrics were added. + +## v1.13.2 [2020-01-21] + +### Bug fixes + +- Warn without error when Processes (`processes`) input is started on Windows. +- Only parse certificate blocks in X.509 Certificate (`x509_cert`) input plugin. +- Add custom attributes for all resource types in VMware vSphere (`vsphere`) input plugin. +- Support URL agent address form with UDP in SNMP (`snmp`) input plugin. +- Record device fields in the SMART (`smart`) input plugin when attributes is `false`. +- Remove invalid timestamps from Kafka messages. +- Update `json` parser to fix `json_strict` option and set `default` to `true`. + +## v1.13.1 [2020-01-08] + +### Bug fixes +- Fix ServerProperty query stops working on Azure after failover. +- Add leading period to OID in SNMP v1 generic traps. +- Fix missing config fields in prometheus serializer. +- Fix panic on connection loss with undelivered messages in MQTT Consumer + (`mqtt_consumer`) input plugin. +- Encode query hash fields as hex strings in SQL Server (`sqlserver`) input plugin. +- Invalidate diskio cache if the metadata mtime has changed. +- Show platform not supported warning only on plugin creation. +- Fix rabbitmq cannot complete gather after request error. +- Fix `/sbin/init --version` executed on Telegraf startup. +- Use last path element as field key if path fully specified in Cisco GNMI Telemetry + (`cisco_telemetry_gnmi`) input plugin. + +## v1.13 [2019-12-12] + +### Release Notes +Official packages built with Go 1.13.5. +The Prometheus Format (`prometheus`) input plugin and Prometheus Client (`prometheus_client`) +output have a new mapping to and from Telegraf metrics, which can be enabled by setting `metric_version = 2`. +The original mapping is deprecated. When both plugins have the same setting, +passthrough metrics are unchanged. +Refer to the [Prometheus input plugin](https://github.com/influxdata/telegraf/blob/release-1.13/plugins/inputs/prometheus/README.md) +for details about the mapping. + +### New Inputs +- [Azure Storage Queue](https://github.com/influxdata/telegraf/blob/release-1.13/plugins/inputs/azure_storage_queue/README.md) + (`azure_storage_queue`) - Contributed by [@mjiderhamn](https://github.com/mjiderhamn) +- [Ethtool](https://github.com/influxdata/telegraf/blob/release-1.13/plugins/inputs/ethtool/README.md) + (`ethtool`) - Contributed by [@philippreston](https://github.com/philippreston) +- [SNMP Trap](https://github.com/influxdata/telegraf/blob/release-1.13/plugins/inputs/snmp_trap/README.md) + (`snmp_trap`) - Contributed by [@influxdata](https://github.com/influxdata) +- [Suricata](https://github.com/influxdata/telegraf/blob/release-1.13/plugins/inputs/suricata/README.md) + (`suricata`) - Contributed by [@satta](https://github.com/satta) +- [Synproxy](https://github.com/influxdata/telegraf/blob/release-1.13/plugins/inputs/synproxy/README.md) + (`synproxy`) - Contributed by [@rfrenayworldstream](https://github.com/rfrenayworldstream) +- [Systemd Units](https://github.com/influxdata/telegraf/blob/release-1.13/plugins/inputs/systemd_units/README.md) + (`systemd_units`) - Contributed by [@benschweizer](https://github.com/benschweizer) + +### New Processors +- [Clone](https://github.com/influxdata/telegraf/blob/release-1.13/plugins/processors/clone/README.md) + (`clone`) - Contributed by [@adrianlzt](https://github.com/adrianlzt) + +### New Aggregators +- [Merge](https://github.com/influxdata/telegraf/blob/release-1.13/plugins/aggregators/merge/README.md) + (`merge`) - Contributed by [@influxdata](https://github.com/influxdata) + +### Features +- Add per node memory stats to RabbitMQ (`rabbitmq`) input plugin. +- Add ability to read query from file to PostgreSQL (`postgresql_extensible`) input plugin. +- Add replication metrics to the Redis (`redis`) input plugin. +- Support NX-OS telemetry extensions in Cisco Model-driven Telemetry (`cisco_telemetry_mdt`) + input plugin. +- Allow `graphite` parser to create `Inf` and `NaN` values. +- Use prefix base detection for ints in `grok` parser. +- Add more performance counter metrics to Microsoft SQL Server (`sqlserver`) input plugin. +- Add millisecond unix time support to `grok` parser. +- Add container ID as optional source tag to Docker (`docker`) and Docker Log + (`docker_log`) input plugins. +- Add `lang` parameter to OpenWeatherMap (`openweathermap`) input plugin. +- Log file open errors at debug level in Tail (`tail`) input plugin. +- Add timeout option to Amazon CloudWatch (`cloudwatch`) input plugin. +- Support custom success codes in HTTP (`http`) input plugin. +- Improve IPVS (`ipvs`) input plugin error strings and logging. +- Add strict mode to JSON parser that can be disabled to ignore invalid items. +- Add support for Kubernetes 1.16 and remove deprecated API usage. +- Add gathering of RabbitMQ federation link metrics. +- Add bearer token defaults for Kubernetes plugins. +- Add support for SNMP over TCP. +- Add support for per output flush jitter. +- Add a nameable file tag to File (`file`) input plugin. +- Add Splunk MultiMetric support. +- Add support for sending HTTP Basic Auth in InfluxDB (`influxdb`) input plugin. +- Add ability to configure the url tag in the Prometheus Format (`prometheus`) input plugin. +- Add Prometheus `metric_version=2` mapping to internal metrics/line protocol. +- Add Prometheus `metric_version=2` support to Prometheus Client (`prometheus_client`) output plugin. +- Add content_encoding compression support to Socket Listener (`socket_listener`) input plugin. +- Add high resolution metrics support to Amazon CloudWatch (`cloudwatch`) output plugin. +- Add `SReclaimable` and `SUnreclaim ` to Memory (`mem`) input plugin. +- Allow multiple certificates per file in X.509 Certificate (`x509_cert`) input plugin. +- Add additional tags to the X.509 Certificate (`x509_cert`) input plugin. +- Add batch data format support to File (`file`) output plugin. +- Support partition assignment strategy configuration in Apache Kafka Consumer + (`kafka_consumer`) input plugin. +- Add node type tag to MongoDB (`mongodb`) input plugin. +- Add `uptime_ns` field to MongoDB (`mongodb`) input plugin. +- Support resolution of symlinks in Filecount (`filecount`) input plugin. +- Set message timestamp to the metric time in Apache Kafka (`kafka`) output plugin. +- Add base64decode operation to String (`string`) processor. +- Add option to control collecting global variables to MySQL (`mysql`) input plugin. + +### Bug fixes +- Show correct default settings in MySQL (`mysql`) sample configuration. +- Use `1h` or `3h` rain values as appropriate in OpenWeatherMap (`openweathermap`) input plugin. +- Fix `not a valid field` error in Windows with Nvidia SMI (`nvidia_smi`) input plugin. +- Fix InfluxDB (`influxdb`) output serialization on connection closed. +- Fix ping skips remaining hosts after DNS lookup error. +- Log MongoDB oplog auth errors at debug level. +- Remove trailing underscore trimming from json flattener. +- Revert change causing CPU usage to be capped at 100 percent. +- Accept any media type in the Prometheus Format (`prometheus`) input plugin. +- Fix unix socket dial arguments in uWSGI (`uwsgi`) input plugin. +- Replace colon characters in Prometheus (`prometheus_client`) output labels with `metric_version=1`. +- Set TrimLeadingSpace when TrimSpace is on in CSV (`csv`) parser. + +## v1.12.6 [2019-11-19] + +### Bug fixes +- Fix many plugin errors logged at debug logging level. +- Use nanosecond precision in Docker Log (`docker_log`) input plugin. +- Fix interface option with `method = native` in Ping (`ping`) input plugin. +- Fix panic in MongoDB (`mongodb`) input plugin if shard connection pool stats are unreadable. + +## v1.12.5 [2019-11-12] + +### Bug fixes +- Fix incorrect results in Ping (`ping`) input plugin. +- Add missing character replacement to `sql_instance` tag. +- Change `no metric` error message to `debug` level in CloudWatch (`cloudwatch`) input plugin. +- Add missing `ServerProperties` query to SQLServer (`sqlserver`) input plugin documentation. +- Fix MongoDB `connections_total_created` field loading. +- Fix metric creation when node is offline in Jenkins (`jenkins`) input plugin. +- Fix Docker `uptime_ns` calculation when container has been restarted. +- Fix MySQL field type conflict in conversion of `gtid_mode` to an integer. +- Fix MySQL field type conflict with `ssl_verify_depth` and `ssl_ctx_verify_depth`. + +## v1.12.4 [2019-10-23] + +- Build official packages with Go 1.12.12. + +### Bug fixes +- Fix metric generation with Ping (`ping`) input plugin `native` method. +- Exclude alias tag if unset from plugin internal stats. +- Fix `socket_mode` option in PowerDNS Recursor (`powerdns_recursor`) input plugin. + +## v1.12.3 [2019-10-07] + +- Build official packages with Go 1.12.10. + +### Bug fixes +- Use batch serialization format in Exec (`exec`) output plugin. +- Use case-insensitive serial number match in S.M.A.R.T. (`smart`) input plugin. +- Add authorization header only when environment variable is set. +- Fix issue when running multiple MySQL and SQL Server plugin instances. +- Fix database routing on retry with `exclude_database_tag`. +- Fix logging panic in Exec (`exec`) input plugin with Nagios data format. + +## v1.12.2 [2019-09-24] + +### Bug fixes +- Fix timestamp format detection in `csv` and `json` parsers. +- Apcupsd input (`apcupsd`) + - Fix parsing of `BATTDATE`. +- Keep boolean values listed in `json_string_fields`. +- Disable Go plugin support in official builds. +- Cisco GNMI Telemetry input (`cisco_telemetry_gnmi`) + - Fix path handling issues. + +## v1.12.1 [2019-09-10] + +### Bug fixes +- Fix dependenciess on GLIBC_2.14 symbol version. +- Filecount input (`filecount`) + - Fix filecount for paths with trailing slash. +- Icinga2 input (`icinga2`) + - Convert check state to an integer. +- Apache Kafka Consumer input (`kafka_consumer`) + - Fix could not mark message delivered error. +- MongoDB input (`mongodb`) + - Skip collection stats when disabled. +- HTTP Response input (`http_response`) + - Fix error reading closed response body. +- Apcupsd input (`apcupsd`) + - Fix documentation to reflect plugin. +- InfluxDB v2 output (`influxdb_v2`) + - Display retry log message only when retry after is received. + + +## v1.12 [2019-09-03] + +### Release Notes +- The cluster health related fields in the Elasticsearch input have been split out + from the `elasticsearch_indices` measurement into the new `elasticsearch_cluster_health_indices` + measurement as they were originally combined by error. + +### New Inputs +- [Apcupsd](https://github.com/influxdata/telegraf/blob/master/plugins/inputs/apcupsd/README.md) (`apcupsd`) - Contributed by @jonaz +- [Docker Log](https://github.com/influxdata/telegraf/blob/master/plugins/inputs/docker_log/README.md) (`docker_log`) - Contributed by @prashanthjbabu +- [Fireboard](https://github.com/influxdata/telegraf/blob/master/plugins/inputs/fireboard/README.md) (`fireboard`) - Contributed by @ronnocol +- [Logstash](https://github.com/influxdata/telegraf/blob/master/plugins/inputs/logstash/README.md) (`logstash`) - Contributed by @lkmcs @dmitryilyin @arkady-emelyanov +- [MarkLogic](https://github.com/influxdata/telegraf/blob/master/plugins/inputs/marklogic/README.md) (`marklogic`) - Contributed by @influxdata +- [OpenNTPD](https://github.com/influxdata/telegraf/blob/master/plugins/inputs/openntpd/README.md) (`openntpd`) - Contributed by @aromeyer +- [uWSGI](https://github.com/influxdata/telegraf/blob/master/plugins/inputs/uwsgi) (`uwsgi`) - Contributed by @blaggacao + +### New Parsers +- [From Urlencoded](https://github.com/influxdata/telegraf/blob/master/plugins/parsers/form_urlencoded) (`form_urlencoded`) - Contributed by @byonchev + +### New Processors +- [Date](https://github.com/influxdata/telegraf/blob/master/plugins/processors/date/README.md) (`date`) - Contributed by @influxdata +- [Pivot](https://github.com/influxdata/telegraf/blob/master/plugins/processors/pivot/README.md) (`pivot`) - Contributed by @influxdata +- [Tag Limit](https://github.com/influxdata/telegraf/blob/master/plugins/processors/tag_limit/README.md) (`tag_limit`) - Contributed by @memory +- [Unpivot](https://github.com/influxdata/telegraf/blob/master/plugins/processors/unpivot/README.md) (`unpivot`) - Contributed by @influxdata + +### New Outputs +- [Exec](https://github.com/influxdata/telegraf/blob/master/plugins/outputs/exec/README.md) (`exec`) - Contributed by @Jaeyo + +### Features +- Improve performance of `wavefront` serializer. +- Allow `regex` processor to append tag values. +- Add `starttime` field to `phpfpm` input. +- Add cluster name tag to elasticsearch indices. +- Add support for interface field in `http_response` input plugin. +- Add container uptime_ns in `docker` input plugin. +- Add better user-facing errors for API timeouts in docker input. +- Add TLS mutual auth support to `jti_openconfig_telemetry` input. +- Add support for ES 7.x to `elasticsearch` output. +- Add basic auth to `prometheus` input plugin. +- Add node roles tag to `elasticsearch` input. +- Support floats in `statsd` percentiles. +- Add native Go ping method to `ping` input plugin. +- Resume from last known offset in `tail` input when reloading Telegraf. +- Add improved support for Azure SQL Database to `sqlserver` input. +- Add extra attributes for NVMe devices to `smart` input. +- Add `docker_devicemapper` measurement to `docker` input plugin. +- Add basic auth support to `elasticsearch` input. +- Support string field glob matching in `json` parser. +- Update gjson to allow multipath syntax in `json` parser. +- Add support for collecting SQL Requests to identify waits and blocking to `sqlserver` input. +- Collect k8s endpoints, ingress, and services in `kube_inventory` plugin. +- Add support for field/tag keys to `strings` processor. +- Add certificate verification status to `x509_cert` input. +- Support percentage value parsing in `redis` input. +- Load external Go plugins from `--plugin-directory`. +- Add ability to exclude db/bucket tag from `influxdb` outputs. +- Gather per collections stats in `mongodb` input plugin. +- Add TLS & credentials configuration for `nats_consumer` input plugin. +- Add support for enterprise repos to `github` plugin. +- Add Indices stats to `elasticsearch` input. +- Add left function to `string` processor. +- Add grace period for metrics late for aggregation. +- Add `diff` and `non_negative_diff` to `basicstats` aggregator. +- Add device tags to `smart_attributes`. +- Collect `framework_offers` and `allocator` metrics in `mesos` input. +- Add Telegraf and Go version to the `internal` input plugin. +- Update the number of logical CPUs dynamically in `system` plugin. +- Add darwin (macOS) builds to the release. +- Add configurable timeout setting to `smart` input. +- Add `memory_usage` field to `procstat` input plugin. +- Add support for custom attributes to `vsphere` input. +- Add `cmdstat` metrics to `redis` input. +- Add `content_length` metric to `http_response` input plugin. +- Add `database_tag` option to `influxdb_listener` to add database from query string. +- Add capability to limit TLS versions and cipher suites. +- Add `topic_tag` option to `mqtt_consumer`. +- Add ability to label inputs for logging. +- Add TLS support to `nginx_plus`, `nginx_plus_api` and `nginx_vts`. + +### Bug fixes +- Fix sensor read error stops reporting of all sensors in `temp` input. +- Fix double pct replacement in `sysstat` input. +- Fix race in master node detection in `elasticsearch` input. +- Fix SSPI authentication not working in `sqlserver` input. +- Fix memory error panic in `mqtt` input. +- Support Kafka 2.3.0 consumer groups. +- Fix persistent session in `mqtt_consumer`. +- Fix finder inconsistencies in `vsphere` input. +- Fix parsing multiple metrics on the first line of tailed file. +- Send TERM to `exec` processes before sending KILL signal. +- Query oplog only when connected to a replica set. +- Use environment variables to locate Program Files on Windows. + +## v1.11.5 [2019-08-27] + +### Bug fixes +- Update `go-sql-driver/mysql` driver to 1.4.1 to address auth issues. +- Return error status from `--test` if input plugins produce an error. +- Fix with multiple instances only last configuration is used in smart input. +- Build official packages with Go 1.12.9. +- Split out `-w` argument in `iptables` input plugin. +- Add support for parked process state on Linux. +- Remove leading slash from rcon command. +- Allow jobs with dashes in the name in `lustre2` input plugin. + +## v1.11.4 [2019-08-06] + +### Bug fixes + +#### Plugins +- Kubernetes input (`kubernetes`) + - Correct typo in `logsfs_available_bytes` field. +- Datadog output (`datadog`) + - Skip floats that are `NaN` or `Inf`. +- Socket Listener input (`socket_listener`) + - Fix reload panic. + +## v1.11.3 [2019-07-23] + +### Bug fixes + +#### Agent + +- Treat empty array as successful parse in JSON parser. +- Fix template pattern partial wildcard matching. + +#### Plugins + +- Bind input (`bind`) + - Add missing `rcode` and `zonestat`. +- GitHub input + - - Fix panic. +- Lustre2 input (`lustre2`) + - Fix config parse regression. +- NVIDIA-SMI output (`nvidia-smi`) + - Handle unknown error. +- StatD input (`statd`) + - Fix panic when processing Datadog events. +- VMware vSphere input (`vsphere`) + - Fix unable to reconnect after vCenter reboot. + +## v1.11.2 [2019-07-09] + +### Bug fixes + +#### Plugins + +- Bind input (`bind`) + - Fix `value out of range` error on 32-bit systems. +- Burrow input (`burrow`) + - Apply topic filter to partition metrics. +- Filecount input (`filecount`) + - Fix path separator handling in Windows. +- Logparser input (`logparser`) + - Fix stop working after reload. +- Ping input (`ping`) + - Fix source address ping flag on BSD. +- StatsD input (`statsd`) + - Fix panic with empty Datadog tag string. +- Tail input (`tail`) + - Fix stop working after reload. + +## v1.11.1 [2019-06-25] + +### Bug fixes + +#### Agent + +- Fix panic if `pool_mode` column does not exist. +- Add missing `container_id` field to `docker_container_status` metrics. +- Add `device`, `serial_no`, and `wwn` tags to synthetic attributes. + +#### Plugins + +- Cisco GNMI Telemetry input (`cisco_telemetry_gnmi`) + - Omit keys when creating measurement names for GNMI telemetry. +- Disk input (`disk`) + - Cannot set `mount_points` option. +- NGINX Plus API input (`nginx_plus_api`) + - Skip 404 error reporting. +- Procstat input (`procstat`) + - Don't consider `pid` of `0` when using systemd lookup. +- StatsD input (`statsd`) + - Fix parsing of remote TCP address. +- System input (`system`) + - Ignore error when `utmp` is missing. + +## v1.11.0 [2019-06-11] + +- System (`system`) input plugin + - The `uptime_format` field has been deprecated — use the `uptime` field instead. +- Amazon Cloudwatch Statistics (`cloudwatch`) input plugin + - Updated to use a more efficient API and now requires `GetMetricData` permissions + instead of `GetMetricStatistics`. The `units` tag is not + available from this API and is no longer collected. + +### New input plugins + +- [BIND 9 Nameserver Statistics (`bind`)](https://github.com/influxdata/telegraf/blob/release-1.11/plugins/inputs/bind/README.md) - Contributed by @dswarbrick & @danielllek +- [Cisco GNMI Telemetry (`cisco_telemetry_gnmi`)](https://github.com/influxdata/telegraf/blob/release-1.11/plugins/inputs/cisco_telemetry_gnmi/README.md) - Contributed by @sbyx +- [Cisco Model-driven Telemetry (`cisco_telemetry_mdt`)](https://github.com/influxdata/telegraf/blob/release-1.11/plugins/inputs/cisco_telemetry_mdt/README.md) - Contributed by @sbyx +- [ECS (`ecs`)](https://github.com/influxdata/telegraf/blob/release-1.11/plugins/inputs/ecs/README.md) - Contributed by @rbtr +- [GitHub (`github`)](https://github.com/influxdata/telegraf/blob/release-1.11/plugins/inputs/github/README.md) - Contributed by @influxdata +- [OpenWeatherMap (`openweathermap`)](https://github.com/influxdata/telegraf/blob/release-1.11/plugins/inputs/openweathermap/README.md) - Contributed by @regel +- [PowerDNS Recursor (`powerdns_recursor`)](https://github.com/influxdata/telegraf/blob/release-1.11/plugins/inputs/powerdns_recursor/README.md) - Contributed by @dupondje + +### New aggregator plugins + +- [Final (`final`)](https://github.com/influxdata/telegraf/blob/release-1.11/plugins/aggregators/final/README.md) - Contributed by @oplehto + +### New output plugins + +- [Syslog (`syslog`)](https://github.com/influxdata/telegraf/blob/release-1.11/plugins/outputs/syslog/README.md) - Contributed by @javicrespo +- [Health (`health`)](https://github.com/influxdata/telegraf/blob/release-1.11/plugins/outputs/health/README.md) - Contributed by @influxdata + +### New output data formats (serializers) + +- [wavefront](https://github.com/influxdata/telegraf/blob/release-1.11/plugins/serializers/wavefront/README.md) - Contributed by @puckpuck + +### Features + +#### Agent + +- Add CLI support for outputting sections of the configuration. +- Add `service-display-name` option for use with Windows service. +- Add support for log rotation. +- Allow env vars `${}` expansion syntax in configuration file. +- Allow devices option to match against devlinks. + +### Input data formats + +- Nagios + - Add support for multiple line text and perfdata. + +#### Input plugins + +- AMQP Consumer (`amqp_consumer`) + - Support passive queue declaration. + - Add support for gzip compression. +- Amazon Cloudwatch Statistics (`cloudwatch`) + - Use more efficient GetMetricData API to collect Cloudwatch metrics. + - Allow selection of collected statistic types in cloudwatch input. +- Apache Solr (`solr`) + - Add support for HTTP basic auth. +- Hddtemp (`hddtemp`) + - Add source tag. +- InfluxDB Listener (`influxdb_listener`) + - Support verbose query parameter in ping endpoint. +- NVIDIA SMI (`nvidia-smi`) + - Extend metrics collected from Nvidia GPUs. +- Net (`net`) + - Speed up interface stat collection. +- PHP FM (`phpfm`) + - Enhance HTTP connection options. +- Ping (`ping`) + - Add TTL field. +- Procstat (`procstat`) + - Add `cmdline` tag. + - Add pagefault data. +- Prometheus (`prometheus`) + - Add namespace restriction. +- SMART (`smart`) + - Support more drive types. +- Socket Listener (`socket_listener`) + - Add option to set permissions for UNIX domain sockets. +- StatsD (`statsd`) + - Add support for Datadog events. + +### Output plugins + +- AMQP (`amqp`) + - Add support for gzip compression. +- File (`file`) + - Add file rotation support. +- Stackdriver (`stackdriver`) + - Set user agent. +-- VMware Wavefront (`wavefront`) + - Add option to use strict sanitization rules. + +### Aggregator plugins + +- Histogram aggregator + - Add option to reset buckets on flush. + +#### Processor plugins + +- Converter (`converter`) + - Add hexadecimal string to integer conversion. +- Enum (`enum`) + - Support tags. + +### Bug fixes + +#### Agent + +- Create Windows service only when specified or in service manager. +- Don't start Telegraf when stale pid file found. +- Fix inline table support in configuration file. +- Fix multi-line basic strings support in configuration file. +- Fix multiple SIGHUP causes Telegraf to shutdown. +- Fix batch fails when single metric is unserializable. +- Log a warning on write if the metric buffer has overflowed. + +#### Plugins + +- AMQP (`amqp`) output + - Fix direct exchange routing key. +- Apex Neptune (`apex_neptune`) inpur + - Skip invalid power times. +- Docker (`docker`) input + - Fix docker input does not parse image name correctly. +- Fibaro (`fibaro`) input + - Set default timeout of `5s`. +- InfluxDB v1.x (`influxdb`) output + - Fix connection leak on reload. +- InfluxDB v2 output + - Fix connection leak on reload. +- Lustre 2 (`lustre2`) input + - Fix only one job per storage target reported. +- Microsoft Azure Monitor (`azure_monitor`) output + - Fix scale set resource id. +- Microsoft SQL Server (`sqlserver`) input + Fix connection closing on error. +- Minecraft (`minecraft`) input + - Support Minecraft server 1.13 and newer. +- NGINX Upstream Check (`nginx_upstream_check`) input + - Fix TOML option name. +- PgBounder (`pgbouncer`) input + - Fix unsupported pkt type error. +- Procstat (`procstat`) input + - Verify a process passed by `pid_file` exists. +- VMware vSphere (`vsphere`) input + - Fixed datastore name mapping. + +## v1.10.4 [2019-05-14] + +### Bug fixes + +#### Agent + +- Create telegraf user in pre-install RPM scriptlet. +- Fix parse of unix timestamp with more than ns precision. +- Fix race condition in the Wavefront parser. + +#### Plugins + +- HTTP output plugin (`http`) + - Fix http output cannot set Host header. +- IPMI Sensor input (`ipmi_sensor`) + - Add support for hex values. +- InfluxDB v2 output (`influxdb_v2`) + - Don't discard metrics on forbidden error. +- Interrupts input (`interrupts`) + - Restore field name case. +- NTPQ input (`ntpq`) + - Skip lines with missing `refid`. +- VMware vSphere input (`vsphere`) + - Fix interval estimation. + +## v1.10.3 [2019-04-16] + +### Bug fixes + +#### Agent + +- Set log directory attributes in RPM specification. + +#### Plugins + +- Prometheus Client (`prometheus_client`) output plugin. + - Allow colons in metric names. + +## v1.10.2 [2019-04-02] + +### Breaking changes + + Grok input data format (parser): string fields no longer have leading and trailing quotation marks removed. + If you are capturing quoted strings, the patterns might need to be updated. + +### Bug fixes + +#### Agent + +- Fix deadlock when Telegraf is aligning aggregators. +- Add owned directories to RPM package specification. +- Fix drop tracking of metrics removed with aggregator `drop_original`. +- Fix aggregator window alignment. +- Fix panic during shutdown of multiple aggregators. +- Fix tags applied to wrong metric on parse error. + +#### Plugins + +- Ceph (`ceph`) input + - Fix missing cluster stats. +- DiskIO (`diskio`) input + - Fix reading major and minor block devices identifiers. +- File (`file`) output + - Fix open file error handling. +- Filecount (`filecount`) input + - Fix basedir check and parent dir extraction. +- Grok (`grok`) parser + - Fix last character removed from string field. +- InfluxDB v2 (`influxdb_v2`) output + - Fix plugin name in output logging. +- Prometheus (`prometheus`) input + - Fix parsing of kube config `certificate-authority-data`. +- Prometheus (`prometheus`) output + - Remove tags that would create invalid label names. +- StatsD (`statsd`) input + - Listen before leaving start. + +## v1.10.1 [2019-03-19] + +#### Bug fixes + +- Show error when TLS configuration cannot be loaded. +- Add base64-encoding/decoding for Google Cloud PubSub (`pubsub`) plugins. +- Fix type compatibility in VMware vSphere (`vsphere`) input plugin with `use_int_samples` option. +- Fix VMware vSphere (`vsphere`) input plugin shows failed task in vCenter. +- Fix invalid measurement name and skip column in the CSV input data format parser. +- Fix System (`system`) input plugin causing high CPU usage on Raspbian. + +## v1.10 [2019-03-05] + +#### New input plugins + +- [Google Cloud PubSub (`cloud_pubsub`)](https://github.com/influxdata/telegraf/blob/release-1.10/plugins/inputs/cloud_pubsub/README.md) - Contributed by @emilymye +- [Kubernetes Inventory (`kube_inventory`)](https://github.com/influxdata/telegraf/blob/release-1.10/plugins/inputs/cloud_pubsub_push/README.md) - Contributed by @influxdata +- [Neptune Apex (`neptune_apex`)](https://github.com/influxdata/telegraf/blob/release-1.10/plugins/inputs/neptune_apex/README.md) - Contributed by @MaxRenaud +- [NGINX Upstream Check (`nginx_upstream_check`)](https://github.com/influxdata/telegraf/blob/release-1.10/plugins/inputs/nginx_upstream_check/README.md) - Contributed by @dmitryilyin +- [Multifile (`multifile`)](https://github.com/influxdata/telegraf/blob/release-1.10/plugins/inputs/multifile/README.md) - Contributed by @martin2250 + +#### New output plugins + +- [Google Cloud PubSub (`cloud_pubsub`)](https://github.com/influxdata/telegraf/blob/release-1.10/plugins/outputs/cloud_pubsub/README.md) - Contributed by @emilymye + +#### New output data formats (serializers) + +- [ServiceNow Metrics](/telegraf/v1.12/data_formats/output/nowmetric) - Contributed by @JefMuller +- [Carbon2](/telegraf/v1.12/data_formats/output/carbon2) - Contributed by @frankreno + +#### Features + +- **General** + - Allow for force gathering ES cluster stats. + - Add Linux `mipsle` packages. +- **Input plugins** + - Ceph (`ceph`) + - Add read and write op per second fields. + - CouchDB (`couchdb`) + - Add support for basic auth. + - DNS Query (`dns_query`) + - Add `rcode` tag and field. + - DiskIO (`diskio`) + - Include `DEVLINKS` in available `udev` properties. + - HTTP (`http`) + - Add support for sending a request body to `http` input. + - InfluxDB Listener (`influxdb_listener`) + - Add internal metric for line too long. + - Interrupts (`interrupts`) + - Add option to store `cpu` as a tag. + - Kafka Consumer (`kafka_consumer`) + - Add ability to tag metrics with topic. + - Kubernetes (`k8s`) + - Support passing bearer token directly. + - Microsoft SQL Server (`sqlserver`) + - Add log send and redo queue fields. + - MongoDB (`mongodb`) + - Add `flush_total_time_ns` and additional wired tiger fields. + - Procstat (`procstat_lookup`) + - Add running field. + - Prometheus (`prometheus`) + - Support passing bearer token directly. + - Add option to report input timestamp. + - VMware vSphere (`vsphere`) + - Improve scalability. + - Add resource path-based filtering. + - Varnish (`varnish`) + - Add configurable timeout. +- **Output plugins** + - MQTT (`mqtt`) + - Add option to set retain flag on messages. + - Stackdriver (`stackdriver`) + - Add resource type and resource label support + - VMware Wavefront (`wavefront`) + - Add support for the Wavefront Direct Ingestion API. +- **Aggregator plugins** + - Value Counter (`valuecounter`) + - Allow counting float values. +- **Data formats** + - **Input data formats** + - CSV + - Support `unix_us` and `unix_ns` timestamp format. + - Add support for `unix` and `unix_ms` timestamps. + - Grok (`grok`) + - Allow parser to produce metrics with no fields. + - JSON + - Add micro and nanosecond unix timestamp support. + - **Output data formats** + - ServiceNow Metrics + +#### Bug fixes + +- **General** + - Use `systemd` in Amazon Linux 2 rpm. + - Fix `initscript` removes `pidfile` of restarted Telegraf process. +- **Input plugins** + - Consul (`consul`) + - Use datacenter option spelling. + - InfluxDB Listener (`influxdb_listener`) + - Remove auth from `/ping` route. + - Microsoft SQL Server (`sqlserver`) + - Set deadlock priority. + - Nstat (`nstat`) + - Remove error log when `snmp6` directory does not exist. + - Ping (`ping`) + - Host not added when using custom arguments. + - X.509 Certificate + - Fix input stops checking certificates after first error. +- **Output plugins** + - Prometheus (`prometheus`) + - Sort metrics by timestamp. + - Stackdriver (`stackdriver`) + - Skip string fields when writing. + - Send metrics in ascending time order. + +## v1.9.5 [2019-02-26] + +### Bug fixes + +* General + * Use `systemd` in Amazon Linux 2 rpm. +* Ceph Storage (`ceph`) input plugin + * Add backwards compatibility fields in usage and pool statistics. +* InfluxDB (`influxdb`) output plugin + * Fix UDP line splitting. +* Microsoft SQL Server (`sqlserver`) input plugin + * Set deadlock priority to low. + * Disable results by row in AzureDB query. +* Nstat (`nstat`) input plugin + * Remove error log when `snmp6` directory does not exist. +* Ping (`ping`) input plugin + * Host not added when using custom arguments. +* Stackdriver (`stackdriver`) output plugin + * Skip string fields when writing to stackdriver output. + * Send metrics in ascending time order. + +## v1.9.4 [2019-02-05] + +### Bug fixes + +* General + * Fix `skip_rows` and `skip_columns` options in csv parser. + * Build official packages with Go 1.11.5. +* Jenkins input plugin + * Always send basic auth in jenkins input. +* Syslog (`syslog`) input plugin + * Fix definition of multiple syslog plugins. + +## v1.9.3 [2019-01-22] + +#### Bug fixes + +* General + * Fix latest metrics not sent first when output fails. + * Fix `internal_write buffer_size` not reset on timed writes. +* AMQP Consumer (`amqp_consumer`) input plugin + - Fix `amqp_consumer` input stops consuming when it receives + unparsable messages. +* Couchbase (`couchbase`) input plugin + * Remove `userinfo` from cluster tag in `couchbase` input. +* Microsoft SQL Server (`sqlserver`) input plugin + * Fix arithmetic overflow in `sqlserver`) input. +* Prometheus (`prometheus`) input plugin + * Fix `prometheus` input not detecting added and removed pods. + +## v1.9.2 [2019-01-08] + +### Bug fixes + +- Increase `varnishstat` timeout. +- Remove storage calculation for non-Azure-managed instances and add server version. +- Fix error sending empty tag value in `azure_monitor` output. +- Fix panic with Prometheus input plugin on shutdown. +- Support non-transparent framing of syslog messages. +- Apply global- and plugin-level metric modifications before filtering. +- Fix `num_remapped_pgs` field in `ceph` plugin. +- Add `PDH_NO_DATA` to known counter error codes in `win_perf_counters`. +- Fix `amqp_consumer` stops consuming on empty message. +- Fix multiple replace tables not working in strings processor. +- Allow non-local UDP connections in `net_response`. +- Fix TOML option names in parser processor. +- Fix panic in Docker input with bad endpoint. +- Fix original metric modified by aggregator filters. + +## v1.9.1 [2018-12-11] + +### Bug fixes + +- Fix boolean handling in splunkmetric serializer. +- Set default config values in Jenkins input. +- Fix server connection and document stats in MongoDB input. +- Add X-Requested-By header to Graylog input. +- Fix metric memory not freed from the metric buffer on write. +- Add support for client TLS certificates in PostgreSQL inputs. +- Prevent panic when marking the offset in `kafka_consumer`. +- Add early metrics to aggregator and honor `drop_original` setting. +- Use `-W` flag on BSD variants in ping input. +- Allow delta metrics in Wavefront parser. + +## v1.9.0 [2018-11-20] + +#### Release Notes + +- The HTTP Listener (`http_listener`) input plugin has been renamed to + InfluxDB Listener (`influxdb_listener`) input plugin and + use of the original name is deprecated. The new name better describes the + intended use of the plugin as an InfluxDB relay. For general purpose + transfer of metrics in any format using HTTP, InfluxData recommends using + HTTP Listener v2 (`http_listener_v2`) input plugin. + +- Input plugins are no longer limited from adding metrics when the output is + writing and new metrics will move into the metric buffer as needed. This + will provide more robust degradation and recovery when writing to a slow + output at high throughput. + + To avoid overconsumption when reading from queue consumers, the following + input plugins use the new option `max_undelivered_messages` to limit the number + of outstanding unwritten metrics: + + * Apache Kafka Consumer (`kafka_consumer`) + * AMQP Consumer (`amqp_consumer`) + * MQTT Consumer (`mqtt_consumer`) + * NATS Consumer (`nats_consumer`) + * NSQ Consumer (`nsq_consumer`) + +#### New input plugins + +- [HTTP Listener v2 (`http_listener_v2`)](https://github.com/influxdata/telegraf/blob/release-1.9/plugins/inputs/http_listener_v2/README.md) - Contributed by @jul1u5 +- [IPVS (`ipvs`)](https://github.com/influxdata/telegraf/blob/release-1.9/plugins/inputs/ipvs/README.md) - Contributed by @amoghe +- [Jenkins (`jenkins`)](https://github.com/influxdata/telegraf/blob/release-1.9/plugins/inputs/jenkins/README.md) - Contributed by @influxdata & @lpic10 +- [NGINX Plus API (`nginx_plus_api`)](https://github.com/influxdata/telegraf/blob/release-1.9/plugins/inputs/nginx_plus_api/README.md) - Contributed by @Bugagazavr +- [NGINX VTS (`nginx_vts`)](https://github.com/influxdata/telegraf/blob/release-1.9/plugins/inputs/nginx_vts/README.md) - Contributed by @monder +- [Wireless (`wireless`)](https://github.com/influxdata/telegraf/blob/release-1.9/plugins/inputs/wireless/README.md) - Contributed by @jamesmaidment + +#### New output plugins + +- [Stackdriver (stackdriver)](https://github.com/influxdata/telegraf/blob/release-1.9/plugins/outputs/stackdriver/README.md) - Contributed by @jamesmaidment + +#### Features + +- General + - Add ability to define a custom service name when installing as a Windows service. + - Add new configuration for CSV column explicit type conversion. + - Add Telegraf version to `User-Agent` header. + - Add ability to specify bytes options as strings with units. + - Add per output `flush_interval`, `metric_buffer_limit`, and `metric_batch_size`. +- Amazon Kinesis (`kinesis`) output plugin + - Use `DescribeStreamSummary` in place of `ListStreams`. +- DNS Query (`dns_query`) input plugin + - Query servers in parallel. +- Datadog (`datadog`) output plugin + - Add an option to specify a custom URL. + - Use non-allocating field and tag accessors. +- Filecount (`filecount`) input plugin + - Add per-directory file count. +- HTTP Output (`http output`) plugin + - Add entity-body compression. +- Memcached (`memcached`) input plugin + - Collect additional statistics. +- NSQ (`nsq`) input plugin + - Add TLS configuration support. +- Ping (`ping`) input plugin + - Add support for IPv6. +- Procstat (`procstat`) input plugin + - Add Windows service name lookup. +- Prometheus (`prometheus`) input plugin + - Add scraping for Prometheus annotation in Kubernetes. + - Allow connecting to Prometheus using UNIX socket. +- Strings (`strings`) processor plugin + - Add `replace` function. +- VMware vSphere (`vsphere`) input plugin + - Add LUN to data source translation. + +#### Bug fixes + +- Remove `time_key` from the field values in JSON parser. +- Fix input time rounding when using a custom interval. +- Fix potential deadlock or leaked resources on restart or reload. +- Fix outputs block inputs when batch size is reached. +- Fix potential missing datastore metrics in VMware vSphere (`vsphere`) input plugin. + +## v1.8.3 [2018-10-30] + +### Bug fixes + +- Add DN attributes as tags in X.509 Certificate (`x509_cert`) input plugin to avoid series overwrite. +- Prevent connection leak by closing unused connections in AMQP (`amqp`) output plugin. +- Use default partition key when tag does not exist in Amazon Kinesis (`kinesis`) output plugin. +- Log the correct error in JTI OpenConfig Telemetry (`jti_openconfig_telemetry`) input plugin. +- Handle panic when IMPI Sensor (`ipmi_sensor`) input plugin gets bad input. +- Don't add unserializable fields to Jolokia2 (`jolokia2`) input plugin. +- Fix version check in PostgreSQL Exstensible (`postgresql_extensible`) plugin. + +## v1.8.2 [2018-10-17] + +### Bug fixes + +* Aerospike (`aerospike`) input plugin + * Support uint fields. +* Docker (`docker`) input plugin + * Use container name from list if no name in container stats. +* Filecount (`filecount`) input plugin + * Prevent panic on error in file stat. +* InfluxDB v2 (`influxdb_v2`) input plugin + * Update write path to match updated v2 API. +* Logparser (`logparser`) input plugin + * Fix panic. +* MongoDB (`mongodb`) input plugin + * Lower authorization errors to debug level. +* MQTT Consumer (`mqtt_consumer`) input plugin + * Fix connect and reconnect. +* Ping (`ping`) input plugin + * Return correct response code. +* VMware vSphere (`vsphere`) input plugin + * Fix missing timeouts. +* X.509 Certificate (`x509_cert`) input plugin + * Fix segfault. + +## v1.8.1 [2018-10-03] + +### Bug fixes + +- Fix `hardware_type` may be truncated in Microsoft SQL Server (`sqlserver`) input plugin. +- Improve performance in Basicstats (`basicstats`) aggregator plugin. +- Add `hostname` to TLS config for SNI support in X.509 Certicate (`x509_cert`) input plugin. +- Don't add tags with empty values to OpenTSDB (`opentsdb`) output plugin. +- Fix panic during network error in VMware vSphere (`vsphere`) input plugin. +- Unify error response in HTTP Listener (`http_listener`) input plugin with InfluxDB (`influxdb`) output plugin. +- Add `UUID` to VMs in VMware vSphere (`vsphere`) input plugin. +- Skip tags with empty values in Amazon Cloudwatch (`cloudwatch`) output plugin. +- Fix missing non-realtime samples in VMware vSphere (`vsphere`) input plugin. +- Fix case of `timezone`/`grok_timezone` options in grok parser and logparser input plugin. + +## v1.8 [2018-09-21] + +### New input plugins + +- [ActiveMQ (`activemq`)](https://github.com/influxdata/telegraf/blob/release-1.8/plugins/inputs/activemq/README.md) - Contributed by @mlabouardy +- [Beanstalkd (`beanstalkd`)](https://github.com/influxdata/telegraf/blob/release-1.8/plugins/inputs/beanstalkd/README.md) - Contributed by @44px +- [File (`file`)](https://github.com/influxdata/telegraf/blob/release-1.8/plugins/inputs/file/README.md) - Contributed by @maxunt +- [Filecount (`filecount`)](https://github.com/influxdata/telegraf/blob/release-1.8/plugins/inputs/filecount/README.md) - Contributed by @sometimesfood +- [Icinga2 (`icinga2`)](https://github.com/influxdata/telegraf/blob/release-1.8/plugins/inputs/icinga2/README.md) - Contributed by @mlabouardy +- [Kibana (`kibana`)](https://github.com/influxdata/telegraf/blob/release-1.8/plugins/inputs/kibana/README.md) - Contributed by @lpic10 +- [PgBouncer (`pgbouncer`)](https://github.com/influxdata/telegraf/blob/release-1.8/plugins/inputs/pgbouncer/README.md) - Contributed by @nerzhul +- [Temp (`temp`)](https://github.com/influxdata/telegraf/blob/release-1.8/plugins/inputs/temp/README.md) - Contributed by @pytimer +- [Tengine (`tengine`)](https://github.com/influxdata/telegraf/blob/release-1.8/plugins/inputs/tengine/README.md) - Contributed by @ertaoxu +- [VMware vSphere (`vsphere`)](https://github.com/influxdata/telegraf/blob/release-1.8/plugins/inputs/vsphere/README.md) - Contributed by @prydin +- [X.509 Certificate (`x509_cert`)](https://github.com/influxdata/telegraf/blob/release-1.8/plugins/inputs/x509_cert/README.md) - Contributed by @jtyr + +### New processor plugins + +- [Enum (`enum`)](https://github.com/influxdata/telegraf/blob/release-1.8/plugins/processors/enum/README.md) - Contributed by @KarstenSchnitter +- [Parser (`parser`)](https://github.com/influxdata/telegraf/blob/release-1.8/plugins/processors/parser/README.md) - Contributed by @Ayrdrie & @maxunt +- [Rename (`rename`)](https://github.com/influxdata/telegraf/blob/release-1.8/plugins/processors/rename/README.md) - Contributed by @goldibex +- [Strings (`strings`)](https://github.com/influxdata/telegraf/blob/release-1.8/plugins/processors/strings/README.md) - Contributed by @bsmaldon + +### New aggregator plugins + +- [ValueCounter (`valuecounter`)](https://github.com/influxdata/telegraf/blob/release-1.8/plugins/aggregators/valuecounter/README.md) - Contributed by @piotr1212 + +### New output plugins + +- [Azure Monitor (`azure_monitor`)](https://github.com/influxdata/telegraf/blob/release-1.8/plugins/outputs/azure_monitor/README.md) - Contributed by @influxdata +- [InfluxDB v2 (`influxdb_v2`)](https://github.com/influxdata/telegraf/blob/release-1.8/plugins/outputs/influxdb_v2/README.md) - Contributed by @influxdata + +### New input data formats (parsers) + +- [csv](/telegraf/v1.8/data_formats/input/csv) - Contributed by @maxunt +- [grok](/telegraf/v1.8/data_formats/input/grok/) - Contributed by @maxunt +- [logfmt](/telegraf/v1.8/data_formats/input/logfmt/) - Contributed by @Ayrdrie & @maxunt +- [wavefront](/telegraf/v1.8/data_formats/input/wavefront/) - Contributed by @puckpuck + +### New output data formats (serializers) + +- [splunkmetric](/telegraf/v1.8/data_formats/output/splunkmetric/) - Contributed by @ronnocol + +### Features + +- Add SSL/TLS support to Redis (`redis`) input plugin. +- Add tengine input plugin. +- Add power draw field to the NVIDIA SMI (`nvidia_smi`) input plugin. +- Add support for Solr 7 to the Solr (`solr`) input plugin. +- Add owner tag on partitions in Burrow (`burrow`) input plugin. +- Add container status tag to Docker (`docker`) input plugin. +- Add ValueCounter (`valuecounter`) aggregator plugin. +- Add new measurement with results of `pgrep` lookup to Procstat (`procstat`) input plugin. +- Add support for comma in logparser timestamp format. +- Add path tag to Tail (`tail`) input plugin. +- Add log message when tail is added or removed from a file. +- Add option to use of counter time in win perf counters. +- Add energy and power field and device id tag to Fibaro (`fibaro`) input plugin. +- Add HTTP path configuration for OpenTSDB output. +- Gather IPMI metrics concurrently. +- Add mongo document and connection metrics. +- Add enum processor plugin. +- Add user tag to procstat input. +- Add support for multivalue metrics to collectd parser. +- Add support for setting kafka client id. +- Add file input plugin and grok parser. +- Improve cloudwatch output performance. +- Add x509_cert input plugin. +- Add IPSIpAddress syntax to ipaddr conversion in snmp plugin. +- Add Filecount filecount input plugin. +- Add support for configuring an AWS `endpoint_url`. +- Send all messages before waiting for results in Kafka output plugin. +- Add support for lz4 compression to Kafka output plugin. +- Split multiple sensor keys in ipmi input. +- Support StatisticValues in cloudwatch output plugin. +- Add ip restriction for the prometheus_client output. +- Add PgBouncer (`pgbouncer`) input plugin. +- Add ActiveMQ input plugin. +- Add wavefront parser plugin. +- Add rename processor plugin. +- Add message 'max_bytes' configuration to kafka input. +- Add gopsutil meminfo fields to Mem (`mem`) input plugin. +- Document how to parse Telegraf logs. +- Use dep v0.5.0. +- Add ability to set measurement from matched text in grok parser. +- Drop message batches in Kafka (`kafka`) output plugin if too large. +- Add support for static and random routing keys in Kafka (`kafka`) output plugin. +- Add logfmt parser plugin. +- Add parser processor plugin. +- Add Icinga2 input plugin. +- Add name, time, path and string field options to JSON parser. +- Add forwarded records to sqlserver input. +- Add Kibana input plugin. +- Add csv parser plugin. +- Add read_buffer_size option to statsd input. +- Add azure_monitor output plugin. +- Add queue_durability parameter to amqp_consumer input. +- Add strings processor. +- Add OAuth 2.0 support to HTTP output plugin. +- Add Unix epoch timestamp support for JSON parser. +- Add options for basic auth to haproxy input. +- Add temp input plugin. +- Add Beanstalkd input plugin. +- Add means to specify server password for redis input. +- Add Splunk Metrics serializer. +- Add input plugin for VMware vSphere. +- Align metrics window to interval in cloudwatch input. +- Improve Azure Managed Instance support + more in sqlserver input. +- Allow alternate binaries for iptables input plugin. +- Add influxdb_v2 output plugin. + +### Bug fixes + +- Fix divide by zero in logparser input. +- Fix instance and object name in performance counters with backslashes. +- Reset/flush saved contents from bad metric. +- Document all supported cli arguments. +- Log access denied opening a service at debug level in win_services. +- Add support for Kafka 2.0. +- Fix nagios parser does not support ranges in performance data. +- Fix nagios parser does not strip quotes from performance data. +- Fix null value crash in postgresql_extensible input. +- Remove the startup authentication check from the cloudwatch output. +- Support tailing files created after startup in tail input. +- Fix CSV format configuration loading. + + +## v1.7.4 [2018-08-29] + +### Bug fixes + +* Continue sending write batch in UDP if a metric is unserializable in InfluxDB (`influxdb`) output plugin. +* Fix PowerDNS (`powerdns`) input plugin tests. +* Fix `burrow_group` offset calculation for Burrow (`burrow`) input plugin. +* Add `result_code` value for errors running ping command. +* Remove timeout deadline for UDP in Syslog (`syslog`) input plugin. +* Ensure channel is closed if an error occurs in CGroup (`cgroup`) input plugin. +* Fix sending of basic authentication credentials in HTTP `(output)` output plugin. +* Use the correct `GOARM` value in the Linux armel package. + +## v1.7.3 [2018-08-07] + +### Bug fixes + +* Reduce required Docker API version. +* Keep leading whitespace for messages in syslog input. +* Skip bad entries on interrupt input. +* Preserve metric type when using filters in output plugins. +* Fix error message if URL is unparseable in InfluxDB output. +* Use explicit `zpool` properties to fix parse error on FreeBSD 11.2. +* Lock buffer when adding metrics. + +## v1.7.2 [2018-07-18] + +### Bug fixes + +* Use localhost as default server tag in Zookeeper (`zookeeper`) input plugin. +* Don't set values when pattern doesn't match in Regex (`regex`) processor plugin. +* Fix output format of Printer (`printer`) processor plugin. +* Fix metric can have duplicate field. +* Return error if NewRequest fails in HTTP (`http`) output plugin. +* Reset read deadline for Syslog (`syslog`) input plugin. +* Exclude cached memory on Docker (`docker`) input plugin. + +## v1.7.1 [2018-07-03] + +### Bug fixes + +* Treat `sigterm` as a clean shutdown signal. +* Fix selection of tags under nested objects in the JSON parser. +* Fix Postfix (`postfix`) input plugin handling of multilevel queues. +* Fix Syslog (`syslog` input plugin timestamp parsing with single digit day of month. +* Handle MySQL (`mysql`) input plugin variations in the `user_statistics` collecting. +* Fix Minmax (`minmax`) and Basicstats (`basicstats`) aggregator plugins to use `uint64`. +* Document Swap (`swap`) input plugin. +* Fix incorrect precision being applied to metric in HTTP Listener (`http_listener`) input plugin. + +## v1.7 [2018-06-12] + +### Release notes + +- The Cassandra (`cassandra`) input plugin has been deprecated in favor of the Jolokia2 (`jolokia2`) + input plugin which is much more configurable and more performant. There is + an [example configuration](https://github.com/influxdata/telegraf/tree/release-1.8/plugins/inputs/jolokia2/examples) to help you + get started. + +- For plugins supporting TLS, you can now specify the certificate and keys + using `tls_ca`, `tls_cert`, `tls_key`. These options behave the same as + the, now deprecated, `ssl` forms. + +### New input plugins + +- [Aurora (`aurora`)](https://github.com/influxdata/telegraf/tree/release-1.8/plugins/inputs/aurora/README.md) - Contributed by @influxdata +- [Burrow (`burrow`) input plugin](https://github.com/influxdata/telegraf/tree/release-1.8/plugins/inputs/burrow/README.md) - Contributed by @arkady-emelyanov +- [`fibaro`](https://github.com/influxdata/telegraf/tree/release-1.8/plugins/inputs/fibaro/README.md) - Contributed by @dynek +- [`jti_openconfig_telemetry`](https://github.com/influxdata/telegraf/tree/release-1.8/plugins/inputs/jti_openconfig_telemetry/README.md) - Contributed by @ajhai +- [`mcrouter`](https://github.com/influxdata/telegraf/tree/release-1.8/plugins/inputs/mcrouter/README.md) - Contributed by @cthayer +- [NVIDIA SMI (`nvidia_smi`)](https://github.com/influxdata/telegraf/tree/release-1.8/plugins/inputs/nvidia_smi/README.md) - Contributed by @jackzampolin +- [Syslog (`syslog`)](https://github.com/influxdata/telegraf/tree/release-1.8/plugins/inputs/syslog/README.md) - Contributed by @influxdata + +### New processor plugins + +- [converter](https://github.com/influxdata/telegraf/tree/release-1.8/plugins/processors/converter/README.md) - Contributed by @influxdata +- [regex](https://github.com/influxdata/telegraf/tree/release-1.8/plugins/processors/regex/README.md) - Contributed by @44px +- [topk](https://github.com/influxdata/telegraf/tree/release-1.8/plugins/processors/topk/README.md) - Contributed by @mirath + +### New output plugins + +- [HTTP (`http`)](https://github.com/influxdata/telegraf/tree/release-1.8/plugins/outputs/http/README.md) - Contributed by @Dark0096 +- [Application Insights (`application_insights`) output plugin](https://github.com/influxdata/telegraf/tree/release-1.8/plugins/outputs/application_insights/README.md): Contribute by @karolz-ms + +### Features + +- Add `repl_oplog_window_sec` metric to MongoDB (`mongodb`) input plugin. +- Add per-host shard metrics in MongoDB (`mongodb`) input plugin. +- Skip files with leading `..` in config directory. +- Add TLS support to `socket_writer` and `socket_listener` plugins. +- Add `snmp` input option to strip non-fixed length index suffixes. +- Add server version tag to the Docker (`docker`) input plugin. +- Add support for LeoFS 1.4 to `leofs` input. +- Add parameter to force the interval of gather for Sysstat (`sysstat`). +- Support BusyBox ping in the Ping (`ping`) input plugin. +- Add Mcrouter (`mcrouter`) input plugin. +- Add TopK (`topk`) processor plugin. +- Add cursor metrics to MongoDB (`mongodb`) input plugin. +- Add tag/integer pair for result to Network Response (`net_response`) input plugin. +- Add Application Insights (`application_insights`) output plugin. +- Added several important Elasticsearch cluster health metrics. +- Add batch mode to `mqtt` output. +- Add Aurora (`aurora`) input plugin. +- Add Regex (`regex`) processor plugin. +- Add support for Graphite 1.1 tags. +- Add timeout option to Sensors (`sensors)` input plugin. +- Add Burrow (`burrow`) input plugin. +- Add option to Unbound (`unbound`) input plugin to use threads as tags. +- Add support for TLS and username/password auth to Aerospike (`aerospike`) input plugin. +- Add special syslog timestamp parser to grok parser that uses current year. +- Add Syslog (`syslog`) input plugin. +- Print the enabled aggregator and processor plugins on startup. +- Add static `routing_key` option to `amqp` output. +- Add passive mode exchange declaration option to AMQP Consumer (`amqp_consumer`) input plugin. +- Add counter fields to PF (`pf`) input plugin. + +### Bug fixes + +- Write to working file outputs if any files are not writeable. +- Add all win_perf_counters fields for a series in a single metric. +- Report results of `dns_query` instead of `0ms` on timeout. +- Add consul service tags to metric. +- Fix wildcards and multi instance processes in win_perf_counters. +- Fix crash on 32-bit Windows in `win_perf_counters`. +- Fix `win_perf_counters` not collecting at every interval. +- Use same flags for all BSD family ping variants. + + +## v1.6.4 [2018-06-05] + +### Bug fixes + +* Fix SNMP overriding of auto-configured table fields. +* Fix uint support in CloudWatch output. +* Fix documentation of `instance_name` option in Varnish input. +* Revert to previous Aerospike library version due to memory leak. + +## v1.6.3 [2018-05-21] + +### Bug fixes + +* Fix intermittent panic in Aerospike input plugin. +* Fix connection leak in the Jolokia agent (`Jolokia2_agent`) input plugin. +* Fix Jolokia agent (`Jolokia2_agent`) input plugin timeout parsing. +* Fix error parsing Dropwizard metrics. +* Fix Librato (`librato`) output plugin support for unsigned integer (`uint`) and Boolean (`bool`). +* Fix WaitGroup deadlock, if URL is incorrect, in Apache input plugin. + +## v1.6.2 [2018-05-08] + +### Bug fixes + +* Use same timestamp for fields in system input. +* Fix handling of uint64 in Datadog (`datadog`) output. +* Ignore UTF8 BOM in JSON parser. +* Fix case for slave metrics in MySQL (`mysql`) input. +* Fix uint support in CrateDB (`cratedb`) output. + + +## v1.6.1 [2018-04-23] + +### Bug fixes + +* Report mem input fields as gauges instead of counters. +* Fix Graphite outputs unsigned integers in wrong format. +* Report available fields if `utmp` is unreadable. +* Fix potential `no fields` error writing to outputs. +* Fix uptime reporting in system input when ran inside docker. +* Fix mem input `cannot allocate memory` error on FreeBSD-based systems. +* Fix duplicate tags when overriding an existing tag. +* Add server argument as first argument in the Unbound (`unbound`) input plugin. +* Fix handling of floats with multiple leading zeroes. +* Return errors in SSL/TLS configuration of MongoDB (`mongodb`) input plugin. + + +## v1.6 [2018-04-16] + +### Release notes + +- The MySQL (`mysql`) input plugin has been updated fix a number of type conversion + issues. This may cause a `field type error` when inserting into InfluxDB due + the change of types. + + To address this, we have introduced a new `metric_version` option to control + enabling the new format. + For in depth recommendations on upgrading, see [Metric version](https://github.com/influxdata/telegraf/tree/release-1.8/plugins/inputs/mysql#metric-version) in the MySQL input plugin documentation. + + You are encouraged to migrate to the new model when possible as the old version + is deprecated and will be removed in a future version. + +- The PostgreSQL (`postgresql`) input plugin now defaults to using a persistent connection to the database. + In environments where TCP connections are terminated, the `max_lifetime` + setting should be set less than the collection `interval` to prevent errors. + +- The SQL Server (`sqlserver`) input plugin has a new query and data model that can be enabled + by setting `query_version = 2`. + Migrate to the new model, if possible, since the old version is deprecated and will be removed in a future version. + +- The OpenLDAP (`openldap`) input plugin has a new option, `reverse_metric_names = true`, that reverses metric + names to improve grouping. + Enable this option, when possible, as the old ordering is deprecated. + +- The new HTTP (`http`) input plugin, when configured with `data_format = "json"`, can perform the + same task as the, now deprecated, HTTP JSON (`httpjson`) input plugin. + + +### New input plugins + +- [HTTP (`http`) input plugin](https://github.com/influxdata/telegraf/tree/release-1.8/plugins/inputs/http/README.md) - Thanks to @grange74 +- [Ipset (`ipset`) input plugin](https://github.com/influxdata/telegraf/tree/release-1.8/plugins/inputs/ipset/README.md) - Thanks to @sajoupa +- [NATS Server Monitoring (`nats`) input plugin](https://github.com/influxdata/telegraf/tree/release-1.8/plugins/inputs/nats/README.md) - Thanks to @mjs and @levex + +### New processor plugins + +- [Override (`override`) processor plugin](https://github.com/influxdata/telegraf/tree/release-1.8/plugins/processors/override/README.md) - Thanks to @KarstenSchnitter + +### New parsers + +- [Dropwizard input data format](https://github.com/influxdata/telegraf/blob/release-1.8/docs/DATA_FORMATS_INPUT.md#dropwizard) - Thanks to @atzoum + +### Features + +* Add health status mapping from `string` to `int` in Elasticsearch (`elasticsearch`) input plugin. +* Add control over which stats to gather in BasicStats (`basicstats`) aggregator plugin. +* Add `messages_delivered_get` to RabbitMQ (`rabbitmq`) input plugin. +* Add `wired` field to mem input plugin. +* Add support for gathering exchange metrics to the RabbitMQ (`rabbitmq`) input plugin. +* Add support for additional metrics on Linux in Zfs (`zfs`) input plugin. +* Add `available_entropy` field to Kernel (`kernel`) input plugin. +* Add user privilege level setting to IPMI sensors. +* Use persistent connection to PostgreSQL database. +* Add support for dropwizard input data format. +* Add container health metrics to Docker (`docker`) input plugin. +* Add support for using globs in devices list of DiskIO (`diskio`) input plugin. +* Allow running as console application on Windows. +* Add listener counts and node running status to RabbitMQ (`rabbitmq`) input plugin. +* Add NATS Server Monitoring (`nats`) input plugin. +* Add ability to select which queues will be gathered in RabbitMQ (`rabbitmq`) input plugin. +* Add support for setting BSD source address to the ping (`ping`) input plugin. +* Add Ipset (`ipset`) input plugin. +* Add TLS and HTTP basic auth to Prometheus Client (`prometheus_client`) output plugin. +* Add new sqlserver output data model. +* Add native Go method for finding `pid` to the Procstat (`procstat`) input plugin. +* Add additional metrics and reverse metric names option to OpenLDAP (`openldap`) input plugin. +* Add TLS support to the Mesos (`mesos`) input plugin. +* Add HTTP (`http`) input plugin. +* Add keep alive support to the TCP mode of StatsD (`statsd`) input plugin . +* Support deadline in Ping (`ping`) input plugin. +* Add option to disable labels in the Prometheus Client (`prometheus`) output plugin for string fields. +* Add shard server stats to the MongoDB (`mongodb`) input plugin. +* Add server option to Unbound (`unbound`) input plugin. +* Convert boolean metric values to float in Datadog (`datadog`) output plugin. +* Add Solr 3 compatibility. +* Add sum stat to BasicStats (`basicstats`) aggregator plugin. +* Add ability to override proxy from environment in HTTP Response (`http_response`) input plugin. +* Add host to ping timeout log message. +* Add override processor plugin. +* Add `status_code` and result tags and `result_type` field to HTTP Response (`http_response`) input plugin. +* Added config flag to skip collection of network protocol metrics. +* Add TLS support to Kapacitor (`kapacitor`) input plugin. +* Add HTTP basic auth support to the HTTP Listener (`http_listener`) input plugin. +* Tags in output InfluxDB Line Protocol are now sorted. +* InfluxDB Line Protocol parser now accepts DOS line endings. +* An option has been added to skip database creation in the InfluxDB (`influxdb`) output plugin. +* Add support for connecting to InfluxDB over a UNIX domain socket. +* Add optional unsigned integer support to the influx data format. +* Add TLS support to Zookeeper (`zookeeper`) input plugin. +* Add filters for container state to Docker (`docker`) input plugin. + +### Bug fixes + +* Fix various MySQL data type conversions. +* Fix metric buffer limit in internal plugin after reload. +* Fix panic in HTTP Response (`http_response`) input plugin on invalid regex. +* Fix socket_listener setting ReadBufferSize on TCP sockets. +* Add tag for target URL to `phpfpm` input plugin. +* Fix cannot unmarshal object error in Mesosphere DC/OS (`dcos`) input plugin. +* Fix InfluxDB output not able to reconnect when server address changes. +* Fix parsing of DOS line endings in the SMART (`smart`) input plugin. +* Fix precision truncation when no timestamp included. +* Fix SNMPv3 connection with Cisco ASA 5515 in SNMP (`snmp`) input plugin. + + +## v1.5.3 [2018-03-14] + +### Bug fixes + +* Set path to `/` if `HOST_MOUNT_PREFIX` matches full path. +* Remove `userinfo` from `url` tag in Prometheus input plugin. +* Fix Ping input plugin not reporting zero durations. +* Disable `keepalive` in MQTT output plugin to prevent deadlock. +* Fix collation difference in SQL Server (`sqlserver`) input plugin. +* Fix uptime metric in Passenger (`passenger`) input plugin. +* Add output of stderr in case of error to exec log message. + +## v1.5.2 [2018-01-30] + +### Bug fixes + +- Ignore empty lines in Graphite plaintext. +- Fix `index out of bounds` error in Solr input plugin. +- Reconnect before sending Graphite metrics if disconnected. +- Align aggregator period with internal ticker to avoid skipping metrics. +- Fix a potential deadlock when using aggregators. +- Limit wait time for writes in MQTT (`mqtt`) output plugin. +- Revert change in Graphite (`graphite`) output plugin where dot(`.`) in field key was replaced by underscore (`_`). +- Add `timeout` to Wavefront output write. +- Exclude `master_replid` fields from Redis input. + +## v1.5.1 [2017-01-10] + +### Bug fixes + +- Fix name error in jolokia2_agent sample config. +- Fix DC/OS input - login expiration time. +- Set Content-Type charset parameter in InfluxDB (`influxdb`) output plugin and allow it to be overridden. +- Document permissions setup for Postfix (`postfix`) input plugin. +- Fix `deliver_get` field in RabbitMQ (`rabbitmq`) input plugin. +- Escape environment variables during config TOML parsing. + +## v1.5 [2017-12-14] + +### New plugins + +#### Input plugins +- [Bond (bond)](https://github.com/influxdata/telegraf/tree/release-1.5/plugins/inputs/bond/README.md) - Thanks to @ildarsv +- [DC/OS (dcos)](https://github.com/influxdata/telegraf/tree/release-1.5/plugins/inputs/dcos/README.md) - Thanks to @influxdata +- [Jolokia2 (jolokia2)](https://github.com/influxdata/telegraf/tree/release-1.5/plugins/inputs/jolokia2/README.md) - Thanks to @dylanmei +- [NGINX Plus (nginx_plus)](https://github.com/influxdata/telegraf/tree/release-1.5/plugins/inputs/nginx_plus/README.md) - Thanks to @mplonka & @poblahblahblah +- [OpenSMTPD (opensmtpd)](https://github.com/influxdata/telegraf/tree/release-1.5/plugins/inputs/opensmtpd/README.md) - Thanks to @aromeyer +- [Particle.io Webhooks (particle)](https://github.com/influxdata/telegraf/tree/release-1.5/plugins/inputs/webhooks/particle/README.md) - Thanks to @davidgs +- [PF (pf)](https://github.com/influxdata/telegraf/tree/release-1.5/plugins/inputs/pf/README.md) - Thanks to @nferch +- [Postfix (postfix)](https://github.com/influxdata/telegraf/tree/release-1.5/plugins/inputs/postfix/README.md) - Thanks to @phemmer +- [SMART (smart)](https://github.com/influxdata/telegraf/tree/release-1.5/plugins/inputs/smart/README.md) - Thanks to @rickard-von-essen +- [Solr (solr)](https://github.com/influxdata/telegraf/tree/release-1.5/plugins/inputs/solr/README.md) - Thanks to @ljagiello +- [Teamspeak (teamspeak)](https://github.com/influxdata/telegraf/tree/release-1.5/plugins/inputs/teamspeak/README.md) - Thanks to @p4ddy1 +- [Unbound (unbound)](https://github.com/influxdata/telegraf/tree/release-1.5/plugins/inputs/unbound/README.md) - Thanks to @aromeyer + +#### Aggregator plugins +- [BasicStats (basicstats)](https://github.com/influxdata/telegraf/tree/release-1.5/plugins/aggregators/basicstats/README.md) - Thanks to @toni-moreno + +#### Output plugins +- [CrateDB (cratedb)](https://github.com/influxdata/telegraf/tree/release-1.8/plugins/outputs/cratedb) - Thanks to @felixge +- [Wavefront (wavefront)](https://github.com/influxdata/telegraf/tree/release-1.5/plugins/outputs/wavefront/README.md) - Thanks to @puckpuck + + +### Release notes + +- In the Kinesis (`kinesis`) output plugin, use of the `partition_key` and + `use_random_partitionkey` options has been deprecated in favor of the + `partition` subtable. This allows for more flexible methods to set the + partition key such as by metric name or by tag. + +- With the release of the new improved Jolokia2 (`jolokia2`) input plugin, the legacy `jolokia` + plugin is deprecated and will be removed in a future release. Users of this + plugin are encouraged to update to the new `jolokia2` plugin. + +### Features + +- Add support for sharding based on metric name. +- Add Kafka output plugin `topic_suffix` option. +- Include mount mode option in disk metrics. +- TLS and MTLS enhancements to HTTP Listener input plugin. +- Add polling method to logparser and tail inputs. +- Add timeout option for Kubernetes (`kubernetes`) input plugin. +- Add support for timing sums in statsd input plugin. +- Add resource limit monitoring to Procstat (`procstat`) input plugin. +- Add support for k8s service DNS discovery to Prometheus Client (`prometheus`) input plugin. +- Add configurable metrics endpoint to (`prometheus`) output plugin. +- Add support for NSQLookupd to `nsq_consumer`. +- Add configurable separator for metrics and fields in OpenTSDB (`opentsdb`) output plugin. +- Add support for the rollbar occurrence webhook event. +- Add extra wired tiger cache metrics to `mongodb` input. +- Collect Docker Swarm service metrics in Docker (`docker`) input plugin. +- Add cluster health level configuration to Elasticsearch (`elasticsearch`) input plugin. +- Add ability to limit node stats in Elasticsearch (`elasticsearch`) input plugin. +- Add UDP IPv6 support to StatsD (`statsd`) input plugin. +- Use labels in Prometheus Client (`prometheus`) output plugin for string fields. +- Add support for decimal timestamps to ts-epoch modifier. +- Add histogram and summary types and use in Prometheus (`prometheus`) plugins. +- Gather concurrently from snmp agents. +- Perform DNS lookup before ping and report result. +- Add instance name option to Varnish (`varnish`) plugin. +- Add support for SSL settings to ElasticSearch (`elasticsearch`) output plugin. +- Add modification_time field to Filestat (`filestat`) input plugin. +- Add systemd unit pid and cgroup matching to Procstat (`procstat`) . +- Use MAX() instead of SUM() for latency measurements in SQL Server (`sqlserver`) input plugin. +- Add index by week number to Elasticsearch (`elasticsearch`) output plugin. +- Add support for tags in the index name in Elasticsearch (`elasticsearch`) output plugin. +- Add slab to mem plugin. +- Add support for glob patterns in net input plugin. +- Add option to AMQP (`amqp`) output plugin to publish persistent messages. +- Support I (idle) process state on procfs+Linux. + +### Bug fixes + +- Fix webhooks input address in use during reload. +- Unlock Statsd when stopping to prevent deadlock. +- Fix cloudwatch output requires unneeded permissions. +- Fix prometheus passthrough for existing value types. +- Always ignore autofs filesystems in disk input. +- Fail metrics parsing on unescaped quotes. +- Whitelist allowed char classes for graphite output. +- Use hexadecimal ids and lowercase names in zipkin input. +- Fix snmp-tools output parsing with Windows EOLs. +- Add shadow-utils dependency to rpm package. +- Use deb-systemd-invoke to restart service. +- Fix kafka_consumer outside range of offsets error. +- Fix separation of multiple prometheus_client outputs. +- Don't add system input uptime_format as a counter. + +## v1.4.5 [2017-12-01] + +### Bug fixes + +- Fix global variable collection when using interval_slow option in MySQL input. +- Fix error getting net connections info in netstat input. +- Fix HOST_MOUNT_PREFIX in Docker with disk input. + +## v1.4.4 [2017-11-08] + +### Bug fixes +- Use schema specified in mqtt_consumer input. +- Redact Datadog API key in log output. +- Fix error getting PIDs in netstat input. +- Support HOST_VAR envvar to locate /var in system input. +- Use current time if Docker container read time is zero value. + +## v1.4.3 [2017-10-25] + +### Bug fixes + +- Fix container name filters in Docker input. +- Fix snmpwalk address format in leofs input. +- Fix case sensitivity issue in SQL Server query. +- Fix CPU input plugin stuck after suspend on Linux. +- Fix MongoDB input panic when restarting MongoDB. +- Preserve URL path prefix in InfluxDB output. +- Fix TELEGRAF_OPTS expansion in systemd service unit. +- Remove warning when JSON contains null value. +- Fix ACL token usage in consul input plugin. +- Fix unquoting error with Tomcat 6. +- Fix syscall panic in diskio on some Linux systems. + +## v1.4.2 [2017-10-10] + +### Bug fixes + +- Fix error if int larger than 32-bit in `/proc/vmstat`. +- Fix parsing of JSON with a UTF8 BOM in `httpjson`. +- Allow JSON data format to contain zero metrics. +- Fix format of connection_timeout in `mqtt_consumer`. +- Fix case sensitivity error in SQL Server input. +- Add support for proxy environment variables to `http_response`. +- Add support for standard proxy env vars in outputs. +- Fix panic in CPU input if number of CPUs changes. +- Use chunked transfer encoding in InfluxDB output. + +## v1.4.1 [2017-09-26] + +### Bug fixes + +- Fix MQTT input exits if Broker is not available on startup. +- Fix optional field value conversions in fluentd input. +- Whitelist allowed char classes for opentsdb output. +- Fix counter and gauge metric types. +- Fix skipped line with empty target in iptables. +- Fix duplicate keys in perf counters sqlserver query. +- Fix panic in statsd p100 calculation. +- Fix arm64 packages contain 32-bit executable. + +## v1.4.0 [2017-09-05] + +### Release Notes + +- The `kafka_consumer` input has been updated to support Kafka 0.9 and + above style consumer offset handling. The previous version of this plugin + supporting Kafka 0.8 and below is available as the `kafka_consumer_legacy` + plugin. +- In the `aerospike` input the `node_name` field has been changed to be a tag + for both the `aerospike_node` and `aerospike_namespace` measurements. +- The default prometheus_client port has been changed to 9273. + +### New plugins + +- fail2ban +- fluentd +- histogram +- minecraft +- openldap +- salesforce +- tomcat +- win_services +- zipkin + +### Features + +- Add Kafka 0.9+ consumer support. +- Add support for self-signed certs to InfluxDB input plugin. +- Add TCP listener for statsd input. +- Add Docker container environment variables as tags. Only whitelisted. +- Add timeout option to IPMI sensor plugin. +- Add support for an optional SSL/TLS configuration to Nginx input plugin. +- Add timezone support for logparser timestamps. +- Add result_type field for http_response input. +- Add include/exclude filters for docker containers. +- Add secure connection support to graphite output. +- Add min/max response time on linux/darwin to ping. +- Add HTTP Proxy support to influxdb output. +- Add standard SSL options to mysql input. +- Add input plugin for fail2ban. +- Support HOST_PROC in processes and linux_sysctl_fs inputs. +- Add Minecraft input plugin. +- Add support for RethinkDB 1.0 handshake protocol. +- Add optional usage_active and time_active CPU metrics. +- Change default prometheus_client port. +- Add fluentd input plugin. +- Add result_type field to net_response input plugin. +- Add read timeout to socket_listener. +- Add input plugin for OpenLDAP. +- Add network option to dns_query. +- Add redis_version field to redis input. +- Add tls options to docker input. +- Add histogram aggregator plugin. +- Add Zipkin input plugin. +- Add Windows Services input plugin. +- Add path tag to logparser containing path of logfile. +- Add Salesforce input plugin. +- Add option to run varnish under sudo. +- Add weighted_io_time to diskio input. +- Add gzip content-encoding support to influxdb output. +- Allow using system plugin in Windows. +- Add Tomcat input plugin. +- HTTP headers can be added to InfluxDB output. + +### Bug fixes + +- Improve logging of errors in Cassandra input. +- [enh] set db_version at 0 if query version fails. +- Fixed SQL Server input to work with case sensitive server collation. +- Systemd does not see all shutdowns as failures. +- Reuse transports in input plugins. +- Inputs processes fails with "no such process". +- Fix multiple plugin loading in win_perf_counters. +- MySQL input: log and continue on field parse error. +- Fix timeout option in Windows ping input sample configuration. +- Fix Kinesis output plugin in govcloud. +- Fix Aerospike input adds all nodes to a single series. +- Improve Prometheus Client output documentation. +- Display error message if prometheus output fails to listen. +- Fix elasticsearch output content type detection warning. +- Prevent possible deadlock when using aggregators. +- Fix combined tagdrop/tagpass filtering. +- Fix filtering when both pass and drop match an item. +- Only report cpu usage for online cpus in docker input. +- Start first aggregator period at startup time. +- Fix panic in logparser if file cannot be opened. +- Default to localhost if zookeeper has no servers set. +- Fix docker memory and cpu reporting in Windows. +- Allow iptable entries with trailing text. +- Sanitize password from couchbase metric. +- Converge to typed value in prometheus output. +- Skip compilcation of logparser and tail on solaris. +- Discard logging from tail library. +- Remove log message on ping timeout. +- Don't retry points beyond retention policy. +- Don't start Telegraf on install in Amazon Linux. +- Enable hddtemp input on all platforms. +- Escape backslash within string fields. +- Fix parsing of SHM remotes in ntpq input +- Don't fail parsing zpool stats if pool health is UNAVAIL on FreeBSD. +- Fix NSQ input plugin when used with version 1.0.0-compat. +- Added CloudWatch metric constraint validation. +- Skip non-numerical values in graphite format. +- Fix panic when handling string fields with escapes. + +## v1.3.5 [2017-07-26] + +### Bug fixes + +- Fix prometheus output cannot be reloaded. +- Fix filestat reporting exists when cannot list directory. +- Fix ntpq parse issue when using dns_lookup. +- Fix panic when agent.interval = "0s". + +## v1.3.4 [2017-07-12] + +### Bug fixes + +- Fix handling of escape characters within fields. +- Fix chrony plugin does not track system time offset. +- Do not allow metrics with trailing slashes. +- Prevent Write from being called concurrently. + +## v1.3.3 [2017-06-28] + +### Bug fixes + +- Allow dos line endings in tail and logparser. +- Remove label value sanitization in prometheus output. +- Fix bug parsing default timestamps with modified precision. +- Fix panic in elasticsearch input if cannot determine master. + +## v1.3.2 [2017-06-14] + +### Bug fixes + +- Fix InfluxDB UDP metric splitting. +- Fix mongodb/leofs urls without scheme. +- Fix inconsistent label dimensions in prometheus output. + +## v1.3.1 [2017-05-31] + +### Bug fixes + +- Fixed sqlserver input to work with case-sensitive server collation. +- Reuse transports in input plugins. +- Process input fails with `no such process`. +- Fix InfluxDB output database quoting. +- Fix net input on older Linux kernels. +- Fix panic in mongo input. +- Fix length calculation of split metric buffer. + +## v1.3.0 [2017-05-09] + +#### Changes to the Windows ping plugin + +Users of the windows [ping plugin](https://github.com/influxdata/telegraf/tree/release-1.8/plugins/inputs/ping) will need to drop or migrate their measurements to continue using the plugin. +The reason for this is that the windows plugin was outputting a different type than the linux plugin. +This made it impossible to use the `ping` plugin for both windows and linux machines. + +#### Changes to the Ceph plugin + +For the [Ceph plugin](https://github.com/influxdata/telegraf/tree/release-1.8/plugins/inputs/ceph), the `ceph_pgmap_state` metric content has been modified to use a unique field `count`, with each state expressed as a `state` tag. + +Telegraf < 1.3: + +``` +# field_name value +active+clean 123 +active+clean+scrubbing 3 +``` + +Telegraf >= 1.3: + +``` +# field_name value tag +count 123 state=active+clean +count 3 state=active+clean+scrubbing +``` + +#### Rewritten Riemann plugin + +The [Riemann output plugin](https://github.com/influxdata/telegraf/tree/release-1.8/plugins/outputs/riemann) has been rewritten +and the [previous riemann plugin](https://github.com/influxdata/telegraf/tree/release-1.8/plugins/outputs/riemann_legacy) is _incompatible_ with the new one. +The reasons for this are outlined in issue [#1878](https://github.com/influxdata/telegraf/issues/1878). +The previous Riemann output will still be available using `outputs.riemann_legacy` if needed, but that will eventually be deprecated. +It is highly recommended that all users migrate to the new Riemann output plugin. + +#### New Socket Listener and Socket Writer plugins + +Generic [Socket Listener](https://github.com/influxdata/telegraf/tree/release-1.8/plugins/inputs/socket_listener) and [Socket Writer](https://github.com/influxdata/telegraf/tree/release-1.8/plugins/outputs/socket_writer) plugins have been implemented for receiving and sending UDP, TCP, unix, & unix-datagram data. +These plugins will replace [udp_listener](https://github.com/influxdata/telegraf/tree/release-1.8/plugins/inputs/udp_listener) and [tcp_listener](https://github.com/influxdata/telegraf/tree/release-1.8/plugins/inputs/tcp_listener), which are still available but will be deprecated eventually. + +### Features + +- Add SASL options for the [Kafka output plugin](https://github.com/influxdata/telegraf/tree/release-1.8/plugins/outputs/kafka). +- Add SSL configuration for [HAproxy input plugin](https://github.com/influxdata/telegraf/tree/release-1.8/plugins/inputs/haproxy). +- Add the [Interrupts input plugin](https://github.com/influxdata/telegraf/tree/release-1.8/plugins/inputs/interrupts). +- Add generic [Socket Listener input plugin](https://github.com/influxdata/telegraf/tree/release-1.8/plugins/inputs/socket_listener) and [socket writer output plugin](https://github.com/influxdata/telegraf/tree/release-1.8/plugins/outputs/socket_writer). +- Extend the [HTTP Response input plugin](https://github.com/influxdata/telegraf/tree/release-1.8/plugins/inputs/http_response) to support searching for a substring in response. Return 1 if found, else 0. +- Add userstats to the [MySQL input plugin](https://github.com/influxdata/telegraf/tree/release-1.8/plugins/inputs/mysql). +- Add more InnoDB metric to the [MySQL input plugin](https://github.com/influxdata/telegraf/tree/release-1.8/plugins/inputs/mysql). +- For the [Ceph input plugin](https://github.com/influxdata/telegraf/tree/release-1.8/plugins/inputs/ceph), `ceph_pgmap_state` metric now uses a single field `count`, with PG state published as `state` tag. +- Use own client for improved through-put and less allocations in the [InfluxDB output plugin](https://github.com/influxdata/telegraf/tree/release-1.8/plugins/outputs/influxdb). +- Keep -config-directory when running as Windows service. +- Rewrite the [Riemann output plugin](https://github.com/influxdata/telegraf/tree/release-1.8/plugins/outputs/riemann). +- Add support for name templates and udev tags to the [DiskIO input plugin](https://github.com/influxdata/telegraf/blob/release-1.8/plugins/inputs/system/DISK_README.md#diskio-input-plugin). +- Add integer metrics for [Consul](https://github.com/influxdata/telegraf/tree/release-1.8/plugins/inputs/consul) check health state. +- Add lock option to the [IPtables input plugin](https://github.com/influxdata/telegraf/tree/release-1.8/plugins/inputs/iptables). +- Support [ipmi_sensor input plugin](https://github.com/influxdata/telegraf/tree/release-1.8/plugins/inputs/ipmi_sensor) querying local ipmi sensors. +- Increment gather_errors for all errors emitted by inputs. +- Use the official docker SDK. +- Add [AMQP consumer input plugin](https://github.com/influxdata/telegraf/tree/release-1.8/plugins/inputs/amqp_consumer). +- Add pprof tool. +- Support DEAD(X) state in the [system input plugin](https://github.com/influxdata/telegraf/tree/release-1.8/plugins/inputs/system). +- Add support for [MongoDB](https://github.com/influxdata/telegraf/tree/release-1.8/plugins/inputs/mongodb) client certificates. +- Support adding [SNMP](https://github.com/influxdata/telegraf/tree/release-1.8/plugins/inputs/snmp) table indexes as tags. +- Add [Elasticsearch 5.x output plugin](https://github.com/influxdata/telegraf/tree/release-1.8/plugins/outputs/elasticsearch). +- Add json timestamp units configurability. +- Add support for Linux sysctl-fs metrics. +- Support to include/exclude docker container labels as tags. +- Add [DMCache input plugin](https://github.com/influxdata/telegraf/tree/release-1.8/plugins/inputs/dmcache). +- Add support for precision in [HTTP Listener input plugin](https://github.com/influxdata/telegraf/tree/release-1.8/plugins/inputs/http_listener). +- Add `message_len_max` option to the [Kafka consumer input plugin](https://github.com/influxdata/telegraf/tree/release-1.8/plugins/inputs/kafka_consumer). +- Add [collectd parser](/telegraf/v1.3/concepts/data_formats_input/#collectd). +- Simplify plugin testing without outputs. +- Check signature in the [GitHub webhook input plugin](https://github.com/influxdata/telegraf/tree/release-1.8/plugins/inputs/webhooks/github). +- Add [papertrail](https://github.com/influxdata/telegraf/tree/release-1.8/plugins/inputs/webhooks/papertrail) support to webhooks. +- Change [jolokia input plugin](https://github.com/influxdata/telegraf/tree/release-1.8/plugins/inputs/jolokia) to use bulk requests. +- Add [DiskIO input plugin](https://github.com/influxdata/telegraf/blob/release-1.8/plugins/inputs/system/DISK_README.md#diskio-input-plugin) for Darwin. +- Add use_random_partitionkey option to the [Kinesis output plugin](https://github.com/influxdata/telegraf/tree/release-1.8/plugins/outputs/kinesis). +- Add tcp keep-alive to [Socket Listener input plugin](https://github.com/influxdata/telegraf/tree/release-1.8/plugins/inputs/socket_listener) and [Socket Writer output plugin](https://github.com/influxdata/telegraf/tree/release-1.8/plugins/outputs/socket_writer). +- Add [Kapacitor input plugin](https://github.com/influxdata/telegraf/tree/release-1.8/plugins/inputs/kapacitor). +- Use Go (golang) 1.8.1. +- Add documentation for the [RabbitMQ input plugin](https://github.com/influxdata/telegraf/tree/release-1.8/plugins/inputs/rabbitmq). +- Make the [Logparser input plugin](https://github.com/influxdata/telegraf/tree/release-1.8/plugins/inputs/logparser) check for newly-created files. + +### Bug fixes + +- Allow `@` symbol in password for the ipmi_sensor plugin. +- Fix arithmetic overflow error converting numeric to data type int in SQL Server input. +- Flush jitter can inhibit metric collection. +- Add missing fields for HAproxy input. +- Handle null startTime for stopped pods for the Kubernetes input. +- Fix cpu input panic when /proc/stat is empty. +- Fix telegraf swallowing panics in --test mode. +- Create pidfile with 644 permissions & defer file deletion. +- Fix install/remove of telegraf on non-systemd Debian/Ubuntu systems. +- Fix for reloading telegraf freezes prometheus output. +- Fix when empty tag value causes error on InfluxDB output. +- buffer_size field value is negative number from "internal" plugin. +- Missing error handling in the MySQL plugin leads to segmentation violation. +- Fix type conflict in windows ping plugin. +- logparser: regexp with lookahead. +- Telegraf can crash in LoadDirectory on 0600 files. +- Iptables input: document better that rules without a comment are ignored. +- Fix win_perf_counters capping values at 100. +- Exporting Ipmi.Path to be set by config. +- Remove warning if parse empty content. +- Update default value for Cloudwatch rate limit. +- create /etc/telegraf/telegraf.d directory in tarball. +- Return error on unsupported serializer data format. +- Fix Windows Performance Counters multi instance identifier. +- Add write timeout to Riemann output. +- fix timestamp parsing on prometheus plugin. +- Fix deadlock when output cannot write. +- Fix connection leak in postgresql. +- Set default measurement name for snmp input. +- Improve performance of diskio with many disks. +- The internal input plugin uses the wrong units for `heap_objects`. +- Fix ipmi_sensor config is shared between all plugin instances. +- Network statistics not collected when system has alias interfaces. +- Sysstat plugin needs LANG=C or similar locale. +- File output closes standard streams on reload. +- AMQP output disconnect blocks all outputs. +- Improve documentation for redis input plugin. + +## v1.2.1 [2017-02-01] + +### Bug fixes + +- Fix segfault on nil metrics with InfluxDB output. +- Fix negative number handling. + +### Features + +- Go (golang) version update 1.7.4 -> 1.7.5 + +## v1.2 [2017-01-24] + +### Release Notes + +- The StatsD plugin will now default all "delete_" config options to "true". This +will change te default behavior for users who were not specifying these parameters +in their config file. + +- The StatsD plugin will also no longer save it's state on a service reload. +Essentially we have reverted PR [#887](https://github.com/influxdata/telegraf/pull/887). +The reason for this is that saving the state in a global variable is not +thread-safe (see [#1975](https://github.com/influxdata/telegraf/issues/1975) & [#2102](https://github.com/influxdata/telegraf/issues/2102)), +and this creates issues if users want to define multiple instances +of the statsd plugin. Saving state on reload may be considered in the future, +but this would need to be implemented at a higher level and applied to all +plugins, not just statsd. + +### Features + +- Fix improper calculation of CPU percentages +- Use RFC3339 timestamps in log output. +- Non-default HTTP timeouts for RabbitMQ plugin. +- "Discard" output plugin added, primarily for testing purposes. +- The JSON parser can now parse an array of objects using the same configuration. +- Option to use device name rather than path for reporting disk stats. +- Telegraf "internal" plugin for collecting stats on itself. +- Update GoLang version to 1.7.4. +- Support a metric.Split function. +- Elasticsearch "shield" (basic auth) support doc. +- Fix over-querying of cloudwatch metrics +- OpenTSDB basic auth support. +- RabbitMQ Connection metrics. +- HAProxy session limit metric. +- Accept strings for StatsD sets. +- Change StatsD default "reset" behavior. +- Enable setting ClientID in MQTT output. +- MongoDB input plugin: Improve state data. +- Ping input: add standard deviation field. +- Add GC pause metric to InfluxDB input plugin. +- Added response_timeout property to prometheus input plugin. +- Pulling github.com/lxn/win's pdh wrapper into Telegraf. +- Support negative statsd counters. +- Elasticsearch cluster stats support. +- Change Amazon Kinesis output plugin to use the built-in serializer plugins. +- Hide username/password from elasticsearch error log messages. +- Configurable HTTP timeouts in Jolokia plugin. +- Allow changing jolokia attribute delimiter. + +### Bug fixes + +- Fix the Value data format not trimming null characters from input. +- Fix windows `.net` plugin. +- Cache & expire metrics for delivery to prometheus +- Fix potential panic in aggregator plugin metric maker. +- Add optional ability to define PID as a tag. +- Fix win_perf_counters not gathering non-English counters. +- Fix panic when file stat info cannot be collected due to permissions or other issue(s). +- Graylog output should set short_message field. +- Hddtemp always put the value in the field temperature. +- Properly collect nested jolokia struct data. +- Fix puppetagent inputs plugin to support string for config variable. +- Fix docker input plugin tags when registry has port. +- Fix tail input when reading from a pipe. +- MongoDB plugin always shows 0 replication lag. +- Consul plugin: add check_id as a tag in metrics to avoid overwrites. +- Partial fix: logparser CLF pattern with IPv6 addresses. +- Fix thread-safety when using multiple instances of the statsd input plugin. +- Docker input: interface conversion panic fix. +- SNMP: ensure proper context is present on error messages. +- OpenTSDB: add tcp:// prefix if no scheme provided. +- Influx parser: parse line-protocol without newlines. +- InfluxDB output: fix field type conflict blocking output buffer. + +## v1.1.2 [2016-12-12] + +### Bug fixes + +- Make snmptranslate not required when using numeric OID. +- Add a global snmp translation cache. + +## v1.1.1 [2016-11-14] + +### Bug fixes + +- Fix issue parsing toml durations with single quotes. + +## v1.1.0 [2016-11-07] + +### Release Notes + +- Telegraf now supports two new types of plugins: processors & aggregators. + +- On systemd Telegraf will no longer redirect it's stdout to /var/log/telegraf/telegraf.log. +On most systems, the logs will be directed to the systemd journal and can be +accessed by `journalctl -u telegraf.service`. Consult the systemd journal +documentation for configuring journald. There is also a [`logfile` config option](https://github.com/influxdata/telegraf/blob/release-1.8/etc/telegraf.conf#L70) +available in 1.1, which will allow users to easily configure telegraf to +continue sending logs to /var/log/telegraf/telegraf.log. + +### Features + +- Processor & Aggregator plugin support. +- Adding the tags in the graylog output plugin. +- Telegraf systemd service, log to journal. +- Allow numeric and non-string values for tag_keys. +- Adding Gauge and Counter metric types. +- Remove carraige returns from exec plugin output on Windows +- Elasticsearch input: configurable timeout. +- Massage metric names in Instrumental output plugin +- Apache Mesos improvements. +- Add Ceph Cluster Performance Statistics +- Ability to configure response_timeout in httpjson input. +- Add additional redis metrics. +- Added capability to send metrics through HTTP API for OpenTSDB. +- iptables input plugin. +- Add filestack webhook plugin. +- Add server hostname for each Docker measurements. +- Add NATS output plugin. +- HTTP service listener input plugin. +- Add database blacklist option for Postgresql +- Add Docker container state metrics to Docker input plugin output +- Add support to SNMP for IP & MAC address conversion. +- Add support to SNMP for OID index suffixes. +- Change default arguments for SNMP plugin. +- Apach Mesos input plugin: very high-cardinality mesos-task metrics removed. +- Logging overhaul to centralize the logger & log levels, & provide a logfile config option. +- HAProxy plugin socket glob matching. +- Add Kubernetes plugin for retrieving pod metrics. + +### Bug fixes + +- Fix NATS plug-ins reconnection logic. +- Set required default values in udp_listener & tcp_listener. +- Fix toml unmarshal panic in Duration objects. +- Fix handling of non-string values for JSON keys listed in tag_keys. +- Fix mongodb input panic on version 2.2. +- Fix statsd scientific notation parsing. +- Sensors plugin strconv.ParseFloat: parsing "": invalid syntax. +- Fix prometheus_client reload panic. +- Fix Apache Kafka consumer panic when nil error is returned down errs channel. +- Speed up statsd parsing. +- Fix powerdns integer parse error handling. +- Fix varnish plugin defaults not being used. +- Fix Windows glob paths. +- Fix issue loading config directory on Windows. +- Windows remote management interactive service fix. +- SQLServer, fix issue when case sensitive collation is activated. +- Fix huge allocations in http_listener when dealing with huge payloads. +- Fix translating SNMP fields not in MIB. +- Fix SNMP emitting empty fields. +- SQL Server waitstats truncation bug. +- Fix logparser common log format: numbers in ident. +- Fix JSON Serialization in OpenTSDB output. +- Fix Graphite template ordering, use most specific. +- Fix snmp table field initialization for non-automatic table. +- cgroups path being parsed as metric. +- Fix phpfpm fcgi client panic when URL does not exist. +- Fix config file parse error logging. +- Delete nil fields in the metric maker. +- Fix MySQL special characters in DSN parsing. +- Ping input odd timeout behavior. +- Switch to github.com/kballard/go-shellquote. + +## v1.0.1 [2016-09-26] + +### Bug fixes + +- Prometheus output: Fix bug with multi-batch writes. +- Fix unmarshal of influxdb metrics with null tags. +- Add configurable timeout to influxdb input plugin. +- Fix statsd no default value panic. + +## v1.0 [2016-09-08] + +### Release Notes + +**Breaking Change** The SNMP plugin is being deprecated in it's current form. +There is a [new SNMP plugin](https://github.com/influxdata/telegraf/tree/release-1.8/plugins/inputs/snmp) +which fixes many of the issues and confusions +of its predecessor. For users wanting to continue to use the deprecated SNMP +plugin, you will need to change your config file from `[[inputs.snmp]]` to +`[[inputs.snmp_legacy]]`. The configuration of the new SNMP plugin is _not_ +backwards-compatible. + +**Breaking Change**: Aerospike main server node measurements have been renamed +aerospike_node. Aerospike namespace measurements have been renamed to +aerospike_namespace. They will also now be tagged with the node_name +that they correspond to. This has been done to differentiate measurements +that pertain to node vs. namespace statistics. + +**Breaking Change**: users of github_webhooks must change to the new +`[[inputs.webhooks]]` plugin. + +This means that the default github_webhooks config: + +``` +# A Github Webhook Event collector +[[inputs.github_webhooks]] + ## Address and port to host Webhook listener on + service_address = ":1618" +``` + +should now look like: + +``` +# A Webhooks Event collector +[[inputs.webhooks]] + ## Address and port to host Webhook listener on + service_address = ":1618" + + [inputs.webhooks.github] + path = "/" +``` + +- Telegraf now supports being installed as an official windows service, +which can be installed via +`> C:\Program Files\Telegraf\telegraf.exe --service install` + +- `flush_jitter` behavior has been changed. The random jitter will now be +evaluated at every flush interval, rather than once at startup. This makes it +consistent with the behavior of `collection_jitter`. + +- PostgresSQL plugins now handle oid and name typed columns seamlessly, previously they were ignored/skipped. + +### Features + +- postgresql_extensible now handles name and oid types correctly. +- Separate container_version from container_image tag. +- Support setting per-device and total metrics for Docker network and blockio. +- MongoDB input plugin: adding per DB stats from db.stats() +- Add tls support for certs to RabbitMQ input plugin. +- Webhooks input plugin. +- Rollbar webhook plugin. +- Mandrill webhook plugin. +- docker-machine/boot2docker no longer required for unit tests. +- cgroup input plugin. +- Add input plugin for consuming metrics from NSQD. +- Add ability to read Redis from a socket. +- **Breaking Change** - Redis `role` tag renamed to `replication_role` to avoid global_tags override. +- Fetching Galera status metrics in MySQL +- Aerospike plugin refactored to use official client library. +- Add measurement name arg to logparser plugin. +- logparser: change resp_code from a field to a tag. +- Implement support for fetching hddtemp data +- statsd: do not log every dropped metric. +- Add precision rounding to all metrics on collection. +- Add support for Tengine. +- Logparser input plugin for parsing grok-style log patterns. +- ElasticSearch: now supports connecting to ElasticSearch via SSL. +- Add graylog input pluging. +- Consul input plugin. +- conntrack input plugin. +- vmstat input plugin. +- Standardized AWS credentials evaluation & wildcard CloudWatch dimensions. +- Add SSL config options to http_response plugin. +- Graphite parser: add ability to specify multiple tag keys, for consistency with influxdb parser. +- Make DNS lookups for chrony configurable. +- Allow wildcard filtering of varnish stats. +- Support for glob patterns in exec plugin commands configuration. +- RabbitMQ input: made url parameter optional by using DefaultURL (`http://localhost:15672`) if not specified. +- Limit AWS GetMetricStatistics requests to 10 per second. +- RabbitMQ/Apache/InfluxDB inputs: made url(s) parameter optional by using reasonable input defaults if not specified. +- Refactor of flush_jitter argument. +- Add inactive & active memory to mem plugin. +- Official Windows service. +- Forking sensors command to remove C package dependency. +- Add a new SNMP plugin. + +### Bug fixes + +- Fix `make windows` build target. +- Fix error race conditions and partial failures. +- nstat: fix inaccurate config panic. +- jolokia: fix handling multiple multi-dimensional attributes. +- Fix prometheus character sanitizing. Sanitize more win_perf_counters characters. +- Add diskio io_time to FreeBSD & report timing metrics as ms (as linux does). +- Fix covering Amazon Linux for post remove flow. +- procstat missing fields: read/write bytes & count. +- diskio input plugin: set 'skip_serial_number = true' by default to avoid high cardinality. +- nil metrics panic fix. +- Fix datarace in apache input plugin. +- Add `read_repairs` statistics to riak plugin. +- Fix memory/connection leak in Prometheus input plugin. +- Trim BOM from config file for Windows support. +- Prometheus client output panic on service reload. +- Prometheus parser, protobuf format header fix. +- Prometheus output, metric refresh and caching fixes. +- Panic fix for multiple graphite outputs under very high load. +- Instrumental output has better reconnect behavior. +- Remove PID from procstat plugin to fix cardinality issues. +- Cassandra input: version 2.x "column family" fix. +- Shared WaitGroup in Exec plugin. +- logparser: honor modifiers in "pattern" config. +- logparser: error and exit on file permissions/missing errors. +- Make the user able to specify full path for HAproxy stats. +- Fix Redis url, an extra "tcp://" was added. +- Fix exec plugin panic when using single binary. +- Fixed incorrect prometheus metrics source selection. +- Set default Zookeeper chroot to empty string. +- Fix overall ping timeout to be calculated based on per-ping timeout. +- Change "default" retention policy to "". +- Graphite output mangling '%' character. +- Prometheus input plugin now supports x509 certs authentication. +- Fix systemd service. +- Fix influxdb n_shards counter. +- Fix potential kernel plugin integer parse error. +- Fix potential influxdb input type assertion panic. +- Still send processes metrics if a process exited during metric collection. +- disk plugin panic when usage grab fails. +- Removed leaked "database" tag on redis metrics. +- Processes plugin: fix potential error with /proc/net/stat directory. +- Fix rare RHEL 5.2 panic in gopsutil diskio gathering function. +- Remove IF NOT EXISTS from influxdb output database creation. +- Fix quoting with text values in postgresql_extensible plugin. +- Fix win_perf_counter "index out of range" panic. +- Fix ntpq panic when field is missing. +- Sanitize graphite output field names. +- Fix MySQL plugin not sending 0 value fields. diff --git a/content/telegraf/v1.15/administration/_index.md b/content/telegraf/v1.15/administration/_index.md new file mode 100644 index 000000000..8b821f02f --- /dev/null +++ b/content/telegraf/v1.15/administration/_index.md @@ -0,0 +1,21 @@ +--- + title: Administering Telegraf + + menu: + telegraf_1_15: + name: Administration + weight: 60 + +--- + +## [Configuring Telegraf](/telegraf/v1.15/administration/configuration/) + +[Configuring Telegraf](/telegraf/v1.15/administration/configuration/) discusses the Telegraf configuration file, enabling plugins, and setting environment variables. + +## [Running Telegraf as a Windows service](/telegraf/v1.15/administration/windows_service/) + +[Running Telegraf as a Windows service](/telegraf/v1.15/administration/windows_service/) describes how to use Telegraf as a Windows service. + +## [Troubleshooting Telegraf](/telegraf/v1.15/administration/troubleshooting/) + +[Troubleshooting Telegraf](/telegraf/v1.15/administration/troubleshooting/) shows you how to capture Telegraf output, submit sample metrics, and see how Telegraf formats and emits points to its output plugins. diff --git a/content/telegraf/v1.15/administration/configuration.md b/content/telegraf/v1.15/administration/configuration.md new file mode 100644 index 000000000..69b67661d --- /dev/null +++ b/content/telegraf/v1.15/administration/configuration.md @@ -0,0 +1,423 @@ +--- +title: Configuring Telegraf + +menu: + telegraf_1_15: + name: Configuring + weight: 20 + parent: Administration +--- + +The Telegraf configuration file (`telegraf.conf`) lists all available Telegraf plugins. See the current version here: [telegraf.conf](https://github.com/influxdata/telegraf/blob/master/etc/telegraf.conf). + +## Generate a configuration file + +A default Telegraf configuration file can be auto-generated by Telegraf: + +``` +telegraf config > telegraf.conf +``` + +To generate a configuration file with specific inputs and outputs, you can use the +`--input-filter` and `--output-filter` flags: + +``` +telegraf --input-filter cpu:mem:net:swap --output-filter influxdb:kafka config +``` + +## Configuration file locations + +Use the `--config` flag to specify the configuration file location: + +- Filename and path, for example: `--config /etc/default/telegraf` +- Remote URL endpoint, for example: `--config "http://remote-URL-endpoint"` + +Use the `--config-directory` flag to include files ending with `.conf` in the specified directory in the Telegraf +configuration. + +On most systems, the default locations are `/etc/telegraf/telegraf.conf` for +the main configuration file and `/etc/telegraf/telegraf.d` for the directory of +configuration files. + +## Set environment variables + +Add environment variables anywhere in the configuration file by prepending them with `$`. +For strings, variables must be in quotes (for example, `"$STR_VAR"`). +For numbers and Booleans, variables must be unquoted (for example, `$INT_VAR`, `$BOOL_VAR`). + +You can also set environment variables using the Linux `export` command: `export password=mypassword` + +> **Note:** We recommend using environment variables for sensitive information. + +### Example: Telegraf environment variables + +In the Telegraf environment variables file (`/etc/default/telegraf`): + +```sh +USER="alice" +INFLUX_URL="http://localhost:8086" +INFLUX_SKIP_DATABASE_CREATION="true" +INFLUX_PASSWORD="monkey123" +``` + +In the Telegraf configuration file (`/etc/telegraf.conf`): + +```sh +[global_tags] + user = "${USER}" + +[[inputs.mem]] + +[[outputs.influxdb]] + urls = ["${INFLUX_URL}"] + skip_database_creation = ${INFLUX_SKIP_DATABASE_CREATION} + password = "${INFLUX_PASSWORD}" +``` + +The environment variables above add the following configuration settings to Telegraf: + +```sh +[global_tags] + user = "alice" + +[[outputs.influxdb]] + urls = "http://localhost:8086" + skip_database_creation = true + password = "monkey123" + +``` + +# Global tags + +Global tags can be specified in the `[global_tags]` section of the config file +in `key="value"` format. All metrics being gathered on this host will be tagged +with the tags specified here. + +## Agent configuration + +Telegraf has a few options you can configure under the `[agent]` section of the +config. + +* **interval**: Default data collection interval for all inputs +* **round_interval**: Rounds collection interval to `interval`. +For example, if `interval` is set to 10s then always collect on :00, :10, :20, etc. +* **metric_batch_size**: Telegraf will send metrics to output in batch of at +most `metric_batch_size` metrics. +* **metric_buffer_limit**: Telegraf will cache `metric_buffer_limit` metrics +for each output, and will flush this buffer on a successful write. +This should be a multiple of `metric_batch_size` and could not be less +than 2 times `metric_batch_size`. +* **collection_jitter**: Collection jitter is used to jitter +the collection by a random amount. +Each plugin will sleep for a random time within jitter before collecting. +This can be used to avoid many plugins querying things like sysfs at the +same time, which can have a measurable effect on the system. +* **flush_interval**: Default data flushing interval for all outputs. +You should not set this below `interval`. +Maximum `flush_interval` will be `flush_interval` + `flush_jitter` +* **flush_jitter**: Jitter the flush interval by a random amount. +This is primarily to avoid +large write spikes for users running a large number of Telegraf instances. +For example, a `flush_jitter` of 5s and `flush_interval` of 10s means flushes will happen every 10-15s. +* **precision**: By default, precision will be set to the same timestamp order +as the collection interval, with the maximum being 1s. Precision will NOT +be used for service inputs, such as `logparser` and `statsd`. Valid values are +`ns`, `us` (or `µs`), `ms`, and `s`. +* **logfile**: Specify the log file name. The empty string means to log to `stderr`. +* **debug**: Run Telegraf in debug mode. +* **quiet**: Run Telegraf in quiet mode (error messages only). +* **hostname**: Override default hostname, if empty use `os.Hostname()`. +* **omit_hostname**: If true, do no set the `host` tag in the Telegraf agent. + +## Input configuration + +The following config parameters are available for all inputs: + +* **interval**: How often to gather this metric. Normal plugins use a single +global interval, but if one particular input should be run less or more often, +you can configure that here. +* **name_override**: Override the base name of the measurement. +(Default is the name of the input). +* **name_prefix**: Specifies a prefix to attach to the measurement name. +* **name_suffix**: Specifies a suffix to attach to the measurement name. +* **tags**: A map of tags to apply to a specific input's measurements. + +## Output configuration + +There are no generic configuration options available for all outputs. + +## Aggregator configuration + +The following config parameters are available for all aggregators: + +* **period**: The period on which to flush & clear each aggregator. All metrics +that are sent with timestamps outside of this period will be ignored by the +aggregator. +* **delay**: The delay before each aggregator is flushed. This is to control +how long for aggregators to wait before receiving metrics from input plugins, +in the case that aggregators are flushing and inputs are gathering on the +same interval. +* **drop_original**: If true, the original metric will be dropped by the +aggregator and will not get sent to the output plugins. +* **name_override**: Override the base name of the measurement. +(Default is the name of the input). +* **name_prefix**: Specifies a prefix to attach to the measurement name. +* **name_suffix**: Specifies a suffix to attach to the measurement name. +* **tags**: A map of tags to apply to a specific input's measurements. + +## Processor configuration + +The following config parameters are available for all processors: + +* **order**: This is the order in which processors are executed. If this +is not specified, then processor execution order will be random. + +#### Measurement filtering + +Filters can be configured per input, output, processor, or aggregator, +see below for examples. + +* **namepass**: +An array of glob pattern strings. Only points whose measurement name matches +a pattern in this list are emitted. +* **namedrop**: +The inverse of `namepass`. If a match is found the point is discarded. This +is tested on points after they have passed the `namepass` test. +* **fieldpass**: +An array of glob pattern strings. Only fields whose field key matches a +pattern in this list are emitted. +* **fielddrop**: +The inverse of `fieldpass`. Fields with a field key matching one of the +patterns will be discarded from the point. +* **tagpass**: +A table mapping tag keys to arrays of glob pattern strings. Only points +that contain a tag key in the table and a tag value matching one of its +patterns is emitted. +* **tagdrop**: +The inverse of `tagpass`. If a match is found the point is discarded. This +is tested on points after they have passed the `tagpass` test. +* **taginclude**: +An array of glob pattern strings. Only tags with a tag key matching one of +the patterns are emitted. In contrast to `tagpass`, which will pass an entire +point based on its tag, `taginclude` removes all non matching tags from the +point. This filter can be used on both inputs & outputs, but it is +_recommended_ to be used on inputs, as it is more efficient to filter out tags +at the ingestion point. +* **tagexclude**: +The inverse of `taginclude`. Tags with a tag key matching one of the patterns +will be discarded from the point. + +**NOTE** Due to the way TOML is parsed, `tagpass` and `tagdrop` parameters +must be defined at the _end_ of the plugin definition, otherwise subsequent +plugin config options will be interpreted as part of the tagpass/tagdrop +tables. + +#### Input configuration examples + +This is a full working config that will output CPU data to an InfluxDB instance +at `192.168.59.103:8086`, tagging measurements with `dc="denver-1"`. It will output +measurements at a 10s interval and will collect per-cpu data, dropping any +fields which begin with `time_`. + +```toml +[global_tags] + dc = "denver-1" + +[agent] + interval = "10s" + +# OUTPUTS +[[outputs.influxdb]] + url = "http://192.168.59.103:8086" # required. + database = "telegraf" # required. + precision = "s" + +# INPUTS +[[inputs.cpu]] + percpu = true + totalcpu = false + # filter all fields beginning with 'time_' + fielddrop = ["time_*"] +``` + +#### Input Config: `tagpass` and `tagdrop` + +**NOTE** `tagpass` and `tagdrop` parameters must be defined at the _end_ of +the plugin definition, otherwise subsequent plugin config options will be +interpreted as part of the tagpass/tagdrop map. + +```toml +[[inputs.cpu]] + percpu = true + totalcpu = false + fielddrop = ["cpu_time"] + # Don't collect CPU data for cpu6 & cpu7 + [inputs.cpu.tagdrop] + cpu = [ "cpu6", "cpu7" ] + +[[inputs.disk]] + [inputs.disk.tagpass] + # tagpass conditions are OR, not AND. + # If the (filesystem is ext4 or xfs) OR (the path is /opt or /home) + # then the metric passes + fstype = [ "ext4", "xfs" ] + # Globs can also be used on the tag values + path = [ "/opt", "/home*" ] +``` + +#### Input Config: `fieldpass` and `fielddrop` + +```toml +# Drop all metrics for guest & steal CPU usage +[[inputs.cpu]] + percpu = false + totalcpu = true + fielddrop = ["usage_guest", "usage_steal"] + +# Only store inode related metrics for disks +[[inputs.disk]] + fieldpass = ["inodes*"] +``` + +#### Input Config: `namepass` and `namedrop` + +```toml +# Drop all metrics about containers for kubelet +[[inputs.prometheus]] + urls = ["http://kube-node-1:4194/metrics"] + namedrop = ["container_*"] + +# Only store rest client related metrics for kubelet +[[inputs.prometheus]] + urls = ["http://kube-node-1:4194/metrics"] + namepass = ["rest_client_*"] +``` + +#### Input Config: `taginclude` and `tagexclude` + +```toml +# Only include the "cpu" tag in the measurements for the cpu plugin. +[[inputs.cpu]] + percpu = true + totalcpu = true + taginclude = ["cpu"] + +# Exclude the `fstype` tag from the measurements for the disk plugin. +[[inputs.disk]] + tagexclude = ["fstype"] +``` + +#### Input config: `prefix`, `suffix`, and `override` + +This plugin will emit measurements with the name `cpu_total`. + +```toml +[[inputs.cpu]] + name_suffix = "_total" + percpu = false + totalcpu = true +``` + +This will emit measurements with the name `foobar`. + +```toml +[[inputs.cpu]] + name_override = "foobar" + percpu = false + totalcpu = true +``` + +#### Input config: tags + +This plugin will emit measurements with two additional tags: `tag1=foo` and +`tag2=bar`. + +NOTE: Order matters, the `[inputs.cpu.tags]` table must be at the _end_ of the +plugin definition. + +```toml +[[inputs.cpu]] + percpu = false + totalcpu = true + [inputs.cpu.tags] + tag1 = "foo" + tag2 = "bar" +``` + +#### Multiple inputs of the same type + +Additional inputs (or outputs) of the same type can be specified by defining these instances in the configuration file. To avoid measurement collisions, use the `name_override`, `name_prefix`, or `name_suffix` config options: + +```toml +[[inputs.cpu]] + percpu = false + totalcpu = true + +[[inputs.cpu]] + percpu = true + totalcpu = false + name_override = "percpu_usage" + fielddrop = ["cpu_time*"] +``` + +#### Output configuration examples: + +```toml +[[outputs.influxdb]] + urls = [ "http://localhost:8086" ] + database = "telegraf" + precision = "s" + # Drop all measurements that start with "aerospike" + namedrop = ["aerospike*"] + +[[outputs.influxdb]] + urls = [ "http://localhost:8086" ] + database = "telegraf-aerospike-data" + precision = "s" + # Only accept aerospike data: + namepass = ["aerospike*"] + +[[outputs.influxdb]] + urls = [ "http://localhost:8086" ] + database = "telegraf-cpu0-data" + precision = "s" + # Only store measurements where the tag "cpu" matches the value "cpu0" + [outputs.influxdb.tagpass] + cpu = ["cpu0"] +``` + +#### Aggregator Configuration Examples: + +This will collect and emit the min/max of the system load1 metric every +30s, dropping the originals. + +```toml +[[inputs.system]] + fieldpass = ["load1"] # collects system load1 metric. + +[[aggregators.minmax]] + period = "30s" # send & clear the aggregate every 30s. + drop_original = true # drop the original metrics. + +[[outputs.file]] + files = ["stdout"] +``` + +This will collect and emit the min/max of the swap metrics every +30s, dropping the originals. The aggregator will not be applied +to the system load metrics due to the `namepass` parameter. + +```toml +[[inputs.swap]] + +[[inputs.system]] + fieldpass = ["load1"] # collects system load1 metric. + +[[aggregators.minmax]] + period = "30s" # send & clear the aggregate every 30s. + drop_original = true # drop the original metrics. + namepass = ["swap"] # only "pass" swap metrics through the aggregator. + +[[outputs.file]] + files = ["stdout"] +``` diff --git a/content/telegraf/v1.15/administration/enterprise-plugins.md b/content/telegraf/v1.15/administration/enterprise-plugins.md new file mode 100644 index 000000000..cc582c5d6 --- /dev/null +++ b/content/telegraf/v1.15/administration/enterprise-plugins.md @@ -0,0 +1,18 @@ +--- +title: Recommended Telegraf plugins for Enterprise users + +menu: + telegraf_1_15: + name: Recommended plugins for Enterprise users + weight: 20 + parent: Administration +draft: true +--- + +The Telegraf configuration file (`telegraf.conf`) lists all of the available plugins. The current version is available here: +[telegraf.conf](https://github.com/influxdata/telegraf/blob/master/etc/telegraf.conf) + +## Core Telegraf plugins for Enterprise users + + +## Optional Telegraf plugins for Enterprise users diff --git a/content/telegraf/v1.15/administration/troubleshooting.md b/content/telegraf/v1.15/administration/troubleshooting.md new file mode 100644 index 000000000..3088ef4cd --- /dev/null +++ b/content/telegraf/v1.15/administration/troubleshooting.md @@ -0,0 +1,89 @@ +--- +title: Troubleshooting Telegraf + +menu: + telegraf_1_15: + name: Troubleshooting + weight: 30 + parent: Administration +--- + +This guide will show you how to capture Telegraf output, submit sample metrics, and see how Telegraf formats and emits points to its output plugins. + +## Capture output + +A quick way to view Telegraf output is by enabling a new UDP output plugin to run in parallel with the existing output plugins. Since each output plugin creates its own stream, the already existing outputs will not be affected. Traffic will be replicated to all active outputs. + +> **NOTE:** This approach requires Telegraf to be restarted, which will cause a brief interruption to your metrics collection. + +The minimal Telegraf configuration required to enable a UDP output is: + +``` +[[outputs.influxdb]] + urls = ["udp://localhost:8089"] +``` + +This setup utilizes the UDP format of the [InfluxDB output plugin](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/influxdb) and emits points formatted in InfluxDB's [line protocol](/influxdb/latest/concepts/glossary/#line-protocol). +You will need to append this section to the Telegraf configuration file and restart Telegraf for the change to take effect. + +Now you are ready to start listening on the destination port (`8089` in this example) using a simple tool like `netcat`: + +``` +nc -lup 8089 +``` + +`nc` will print the exact Telegraf output on stdout. +You can also direct the output to a file for further inspection: + +``` +nc -lup 8089 > telegraf_dump.txt +``` + +## Submit test inputs + +Once you have Telegraf's output arriving to your `nc` socket, you can enable the [inputs.socket_listener](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/socket_listener) plugins to submit some sample metrics. + +Append the TCP or UDP input section to Telegraf's config file and restart Telegraf for the change to take effect. + +``` + [[inputs.socket_listener]] + service_address = "tcp://:8094" + data_format = "influx" +``` + +Submit sample data to the Telegraf socket listener: + +``` +echo 'mymeasurement,my_tag_key=mytagvalue my_field="my field value"' | nc localhost 8094 +``` + +The output from your `netcat` listener will look like the following: + +``` +mymeasurement,host=kubuntu,my_tag_key=mytagvalue my_field="my field value" 1478106104713745634 +``` + +## Testing other plugins + +The same approach can be used to test other plugins, like the [inputs.statsd](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/statsd) plugin. + +Here is a basic configuration example of how to set up the Telegraf statsd input plugin: + +``` + [[inputs.statsd]] + service_address = ":8125" + metric_separator = "_" + allowed_pending_messages = 10000 +``` + +Sending a sample metric to the Telegraf statsd port: + +``` +echo "a.b.c:1|g" | nc -u localhost 8125 +``` + +The output from `nc` will look like the following: + +``` +a_b_c,host=myserver,metric_type=gauge value=1 1478106500000000000 +``` diff --git a/content/telegraf/v1.15/administration/windows_service.md b/content/telegraf/v1.15/administration/windows_service.md new file mode 100644 index 000000000..e72777798 --- /dev/null +++ b/content/telegraf/v1.15/administration/windows_service.md @@ -0,0 +1,48 @@ +--- +title: Running Telegraf as a Windows service +description: How to configure Telegraf as a Windows service. +menu: + telegraf_1_15: + name: Running as Windows service + weight: 20 + parent: Administration +--- + +# Running Telegraf as a Windows service + +Telegraf natively supports running as a Windows service. Outlined below are +the general steps to set it up. + +1. Obtain the Telegraf distribution for Windows. +2. Create the directory `C:\Program Files\Telegraf` (if you install in a different location, specify the `-config` parameter with the desired location) +3. Place the `telegraf.exe` and the `telegraf.conf` files into `C:\Program Files\Telegraf`. +4. To install the service into the Windows Service Manager, run the following in PowerShell as an administrator. If necessary, you can wrap any spaces in the file directories in double quotes `""`: + + ``` + > C:\"Program Files"\Telegraf\telegraf.exe --service install + ``` + +5. Edit the configuration file to meet your requirements. + +6. To verify that it works, run: + + ``` + > C:\"Program Files"\Telegraf\telegraf.exe --config C:\"Program Files"\Telegraf\telegraf.conf --test + ``` + +7. To start collecting data, run: + + ``` + > net start telegraf + ``` + +## Other supported operations + +Telegraf can manage its own service through the `--service` flag: + +| Command | Effect | +|------------------------------------|-------------------------------| +| `telegraf.exe --service install` | Install telegraf as a service | +| `telegraf.exe --service uninstall` | Remove the telegraf service | +| `telegraf.exe --service start` | Start the telegraf service | +| `telegraf.exe --service stop` | Stop the telegraf service | diff --git a/content/telegraf/v1.15/concepts/_index.md b/content/telegraf/v1.15/concepts/_index.md new file mode 100644 index 000000000..dc860c471 --- /dev/null +++ b/content/telegraf/v1.15/concepts/_index.md @@ -0,0 +1,21 @@ +--- +title: Key Telegraf concepts +description: This section discusses key concepts about Telegraf, including information on supported input data formats, output data formats, aggregator and processor plugins, and includes a glossary of important terms. +menu: + telegraf_1_15: + name: Concepts + weight: 30 +--- +This section discusses key concepts about Telegraf, the plug-in driven server agent component of the InfluxData time series platform. Topics covered include metrics, aggregator and processor plugins, and a glossary of important terms. + +## [Telegraf metrics](/telegraf/v1.15/concepts/metrics/) + +[Telegraf metrics](/telegraf/v1.15/concepts/metrics/) are internal representations used to model data during processing. + +## [Telegraf aggregator and processor plugins](/telegraf/v1.15/concepts/aggregator_processor_plugins/) + +[Telegraf aggregator and processor plugins](/telegraf/v1.15/concepts/aggregator_processor_plugins/) work between the input plugins and output plugins to aggregate and process metrics in Telegraf. + +## [Glossary of terms (for Telegraf)](/telegraf/v1.15/concepts/glossary/) + +This section includes definitions of important terms for related to Telegraf. diff --git a/content/telegraf/v1.15/concepts/aggregator_processor_plugins.md b/content/telegraf/v1.15/concepts/aggregator_processor_plugins.md new file mode 100644 index 000000000..e5a8d6971 --- /dev/null +++ b/content/telegraf/v1.15/concepts/aggregator_processor_plugins.md @@ -0,0 +1,62 @@ +--- +title: Telegraf aggregator and processor plugins +description: Use Telegraf aggregator and processor plugins to aggregate and process data between the input plugins and output plugins. +menu: + telegraf_1_15: + name: Aggregator and processor plugins + weight: 20 + parent: Concepts +--- + +Besides the input plugins and output plugins, Telegraf includes aggregator and processor plugins, which are used to aggregate and process metrics as they pass through Telegraf. + +``` +┌───────────┐ +│ │ +│ CPU │───┐ +│ │ │ +└───────────┘ │ + │ +┌───────────┐ │ ┌───────────┐ +│ │ │ │ │ +│ Memory │───┤ ┌──▶│ InfluxDB │ +│ │ │ │ │ │ +└───────────┘ │ ┌─────────────┐ ┌─────────────┐ │ └───────────┘ + │ │ │ │Aggregate │ │ +┌───────────┐ │ │Process │ │ - mean │ │ ┌───────────┐ +│ │ │ │ - transform │ │ - quantiles │ │ │ │ +│ MySQL │───┼──▶ │ - decorate │────▶│ - min/max │───┼──▶│ File │ +│ │ │ │ - filter │ │ - count │ │ │ │ +└───────────┘ │ │ │ │ │ │ └───────────┘ + │ └─────────────┘ └─────────────┘ │ +┌───────────┐ │ │ ┌───────────┐ +│ │ │ │ │ │ +│ SNMP │───┤ └──▶│ Kafka │ +│ │ │ │ │ +└───────────┘ │ └───────────┘ + │ +┌───────────┐ │ +│ │ │ +│ Docker │───┘ +│ │ +└───────────┘ +``` + +**Processor plugins** process metrics as they pass through and immediately emit +results based on the values they process. For example, this could be printing +all metrics or adding a tag to all metrics that pass through. + +**Aggregator plugins**, on the other hand, are a bit more complicated. Aggregators +are typically for emitting new _aggregate_ metrics, such as a running mean, +minimum, maximum, quantiles, or standard deviation. For this reason, all _aggregator_ +plugins are configured with a `period`. The `period` is the size of the window +of metrics that each _aggregate_ represents. In other words, the emitted +_aggregate_ metric will be the aggregated value of the past `period` seconds. +Since many users will only care about their aggregates and not every single metric +gathered, there is also a `drop_original` argument, which tells Telegraf to only +emit the aggregates and not the original metrics. + +**NOTE** Since aggregator plugins only aggregate metrics within their periods, +historical data is not supported. In other words, if your metric timestamp is more +than `now() - period` in the past, it will not be aggregated. If this is a feature +that you need, please comment on this [GitHub issue](https://github.com/influxdata/telegraf/issues/1992). diff --git a/content/telegraf/v1.15/concepts/glossary.md b/content/telegraf/v1.15/concepts/glossary.md new file mode 100644 index 000000000..d63bb731f --- /dev/null +++ b/content/telegraf/v1.15/concepts/glossary.md @@ -0,0 +1,103 @@ +--- +title: Telegraf glossary of terms +description: This section includes definitions of important terms for related to Telegraf, the plug-in driven server agent component of the InfluxData time series platform. +menu: + telegraf_1_15: + name: Glossary of terms + weight: 30 + parent: Concepts +--- + +## agent + +An agent is the core part of Telegraf that gathers metrics from the declared input plugins and sends metrics to the declared output plugins, based on the plugins enabled by the given configuration. + +Related entries: [input plugin](/telegraf/v1.15/concepts/glossary/#input-plugin), [output plugin](/telegraf/v1.15/concepts/glossary/#output-plugin) + +## aggregator plugin + +Aggregator plugins receive raw metrics from input plugins and create aggregate metrics from them. +The aggregate metrics are then passed to the configured output plugins. + +Related entries: [input plugin](/telegraf/v1.15/concepts/glossary/#input-plugin), [output plugin](/telegraf/v1.15/concepts/glossary/#output-plugin), [processor plugin](/telegraf/v1.15/concepts/glossary/#processor-plugin) + +## batch size + +The Telegraf agent sends metrics to output plugins in batches, not individually. +The batch size controls the size of each write batch that Telegraf sends to the output plugins. + +Related entries: [output plugin](/telegraf/v1.15/concepts/glossary/#output-plugin) + +## collection interval + +The default global interval for collecting data from each input plugin. +The collection interval can be overridden by each individual input plugin's configuration. + +Related entries: [input plugin](/telegraf/v1.15/concepts/glossary/#input-plugin) + +## collection jitter + +Collection jitter is used to prevent every input plugin from collecting metrics simultaneously, which can have a measurable effect on the system. +Each collection interval, every input plugin will sleep for a random time between zero and the collection jitter before collecting the metrics. + +Related entries: [collection interval](/telegraf/v1.15/concepts/glossary/#collection-interval), [input plugin](/telegraf/v1.15/concepts/glossary/#input-plugin) + +## flush interval + +The global interval for flushing data from each output plugin to its destination. +This value should not be set lower than the collection interval. + +Related entries: [collection interval](/telegraf/v1.15/concepts/glossary/#collection-interval), [flush jitter](/telegraf/v1.15/concepts/glossary/#flush-jitter), [output plugin](/telegraf/v1.15/concepts/glossary/#output-plugin) + +## flush jitter + +Flush jitter is used to prevent every output plugin from sending writes simultaneously, which can overwhelm some data sinks. +Each flush interval, every output plugin will sleep for a random time between zero and the flush jitter before emitting metrics. +This helps smooth out write spikes when running a large number of Telegraf instances. + +Related entries: [flush interval](/telegraf/v1.15/concepts/glossary/#flush-interval), [output plugin](/telegraf/v1.15/concepts/glossary/#output-plugin) + +## input plugin + +Input plugins actively gather metrics and deliver them to the core agent, where aggregator, processor, and output plugins can operate on the metrics. +In order to activate an input plugin, it needs to be enabled and configured in Telegraf's configuration file. + +Related entries: [aggregator plugin](/telegraf/v1.15/concepts/glossary/#aggregator-plugin), [collection interval](/telegraf/v1.15/concepts/glossary/#collection-interval), [output plugin](/telegraf/v1.15/concepts/glossary/#output-plugin), [processor plugin](/telegraf/v1.15/concepts/glossary/#processor-plugin) + +## metric buffer + +The metric buffer caches individual metrics when writes are failing for an output plugin. +Telegraf will attempt to flush the buffer upon a successful write to the output. +The oldest metrics are dropped first when this buffer fills. + +Related entries: [output plugin](/telegraf/v1.15/concepts/glossary/#output-plugin) + +## output plugin + +Output plugins deliver metrics to their configured destination. In order to activate an output plugin, it needs to be enabled and configured in Telegraf's configuration file. + +Related entries: [aggregator plugin](/telegraf/v1.15/concepts/glossary/#aggregator-plugin), [flush interval](/telegraf/v1.15/concepts/glossary/#flush-interval), [input plugin](/telegraf/v1.15/concepts/glossary/#input-plugin), [processor plugin](/telegraf/v1.15/concepts/glossary/#processor-plugin) + +## precision + +The precision configuration setting determines how much timestamp precision is retained in the points received from input plugins. All incoming timestamps are truncated to the given precision. +Telegraf then pads the truncated timestamps with zeros to create a nanosecond timestamp; output plugins will emit timestamps in nanoseconds. +Valid precisions are `ns`, `us` or `µs`, `ms`, and `s`. + +For example, if the precision is set to `ms`, the nanosecond epoch timestamp `1480000000123456789` would be truncated to `1480000000123` in millisecond precision and then padded with zeroes to make a new, less precise nanosecond timestamp of `1480000000123000000`. +Output plugins do not alter the timestamp further. The precision setting is ignored for service input plugins. + +Related entries: [aggregator plugin](/telegraf/v1.15/concepts/glossary/#aggregator-plugin), [input plugin](/telegraf/v1.15/concepts/glossary/#input-plugin), [output plugin](/telegraf/v1.15/concepts/glossary/#output-plugin), [processor plugin](/telegraf/v1.15/concepts/glossary/#processor-plugin), [service input plugin](/telegraf/v1.15/concepts/glossary/#service-input-plugin) + +## processor plugin + +Processor plugins transform, decorate, and/or filter metrics collected by input plugins, passing the transformed metrics to the output plugins. + +Related entries: [aggregator plugin](/telegraf/v1.15/concepts/glossary/#aggregator-plugin), [input plugin](/telegraf/v1.15/concepts/glossary/#input-plugin), [output plugin](/telegraf/v1.15/concepts/glossary/#output-plugin) + +## service input plugin + +Service input plugins are input plugins that run in a passive collection mode while the Telegraf agent is running. +They listen on a socket for known protocol inputs, or apply their own logic to ingested metrics before delivering them to the Telegraf agent. + +Related entries: [aggregator plugin](/telegraf/v1.15/concepts/glossary/#aggregator-plugin), [input plugin](/telegraf/v1.15/concepts/glossary/#input-plugin), [output plugin](/telegraf/v1.15/concepts/glossary/#output-plugin), [processor plugin](/telegraf/v1.15/concepts/glossary/#processor-plugin) diff --git a/content/telegraf/v1.15/concepts/metrics.md b/content/telegraf/v1.15/concepts/metrics.md new file mode 100644 index 000000000..c8bc62c1d --- /dev/null +++ b/content/telegraf/v1.15/concepts/metrics.md @@ -0,0 +1,28 @@ +--- +title: Telegraf metrics +description: Telegraf metrics are internal representations used to model data during processing and are based on InfluxDB's data model. Each metric component includes the measurement name, tags, fields, and timestamp. +menu: + telegraf_1_15: + name: Metrics + weight: 10 + parent: Concepts +--- + +Telegraf metrics are the internal representation used to model data during +processing. These metrics are closely based on InfluxDB's data model and contain +four main components: + +- **Measurement name**: Description and namespace for the metric. +- **Tags**: Key/Value string pairs and usually used to identify the + metric. +- **Fields**: Key/Value pairs that are typed and usually contain the + metric data. +- **Timestamp**: Date and time associated with the fields. + +This metric type exists only in memory and must be converted to a concrete +representation in order to be transmitted or viewed. Telegraf provides [output data formats][output data formats] (also known as *serializers*) for these conversions. Telegraf's default serializer converts to [InfluxDB Line +Protocol][line protocol], which provides a high performance and one-to-one +direct mapping from Telegraf metrics. + +[output data formats]: /telegraf/v1.15/data_formats/output/ +[line protocol]: /telegraf/v1.15/data_formats/output/influx/ diff --git a/content/telegraf/v1.15/data_formats/_index.md b/content/telegraf/v1.15/data_formats/_index.md new file mode 100644 index 000000000..500c3bb28 --- /dev/null +++ b/content/telegraf/v1.15/data_formats/_index.md @@ -0,0 +1,21 @@ +--- +title: Telegraf data formats +description: Telegraf supports input data formats and output data formats for converting input and output data. +menu: + telegraf_1_15: + name: Data formats + weight: 50 +--- +This section covers the input data formats and output data formats used in the Telegraf plugin-driven server agent component of the InfluxData time series platform. + +## [Telegraf input data formats](/telegraf/v1.15/data_formats/input/) + +[Telegraf input data formats](/telegraf/v1.15/data_formats/input/) supports parsing input data formats into metrics for InfluxDB Line Protocol, JSON, Graphite, Value, Nagios, Collectd, and Dropwizard. + +## [Telegraf output data formats](/telegraf/v1.15/data_formats/output/) + +[Telegraf output data formats](/telegraf/v1.15/data_formats/output/) can serialize metrics into output data formats for InfluxDB Line Protocol, JSON, and Graphite. + +## [Telegraf template patterns](/telegraf/v1.15/data_formats/template-patterns/) + +[Telegraf template patterns](/telegraf/v1.15/data_formats/template-patterns/) are used to define templates for use with parsing and serializing data formats in Telegraf. diff --git a/content/telegraf/v1.15/data_formats/input/_index.md b/content/telegraf/v1.15/data_formats/input/_index.md new file mode 100644 index 000000000..c135a0070 --- /dev/null +++ b/content/telegraf/v1.15/data_formats/input/_index.md @@ -0,0 +1,46 @@ +--- +title: Telegraf input data formats +description: Telegraf supports parsing input data formats into Telegraf metrics for InfluxDB Line Protocol, CollectD, CSV, Dropwizard, Graphite, Grok, JSON, Logfmt, Nagios, Value, and Wavefront. +menu: + telegraf_1_15: + name: Input data formats + weight: 1 + parent: Data formats +--- + +Telegraf contains many general purpose plugins that support parsing input data +using a configurable parser into [metrics][]. This allows, for example, the +`kafka_consumer` input plugin to process messages in either InfluxDB Line +Protocol or in JSON format. Telegraf supports the following input data formats: + +- [InfluxDB Line Protocol](/telegraf/v1.15/data_formats/input/influx/) +- [collectd](/telegraf/v1.15/data_formats/input/collectd/) +- [CSV](/telegraf/v1.15/data_formats/input/csv/) +- [Dropwizard](/telegraf/v1.15/data_formats/input/dropwizard/) +- [Graphite](/telegraf/v1.15/data_formats/input/graphite/) +- [Grok](/telegraf/v1.15/data_formats/input/grok/) +- [JSON](/telegraf/v1.15/data_formats/input/json/) +- [logfmt](/telegraf/v1.15/data_formats/input/logfmt/) +- [Nagios](/telegraf/v1.15/data_formats/input/nagios/) +- [Value](/telegraf/v1.15/data_formats/input/value/), ie: 45 or "booyah" +- [Wavefront](/telegraf/v1.15/data_formats/input/wavefront/) + +Any input plugin containing the `data_format` option can use it to select the +desired parser: + +```toml +[[inputs.exec]] + ## Commands array + commands = ["/tmp/test.sh", "/usr/bin/mycollector --foo=bar"] + + ## measurement name suffix (for separating different commands) + name_suffix = "_mycollector" + + ## Data format to consume. + ## Each data format has its own unique set of configuration options, read + ## more about them here: + ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md + data_format = "json" +``` + +[metrics]: /telegraf/v1.15/concepts/metrics/ diff --git a/content/telegraf/v1.15/data_formats/input/collectd.md b/content/telegraf/v1.15/data_formats/input/collectd.md new file mode 100644 index 000000000..903d974ea --- /dev/null +++ b/content/telegraf/v1.15/data_formats/input/collectd.md @@ -0,0 +1,48 @@ +--- +title: Collectd input data format +description: Use the collectd input data format to parse the collectd network binary protocol to create tags for host, instance, type, and type instance. +menu: + telegraf_1_15: + name: collectd + weight: 10 + parent: Input data formats +--- + +The collectd input data format parses the collectd network binary protocol to create tags for host, instance, type, and type instance. All collectd values are added as float64 fields. + +For more information, see [binary protocol](https://collectd.org/wiki/index.php/Binary_protocol) in the collectd Wiki. + +You can control the cryptographic settings with parser options. +Create an authentication file and set `collectd_auth_file` to the path of the file, then set the desired security level in `collectd_security_level`. + +For more information, including client setup, see +[Cryptographic setup](https://collectd.org/wiki/index.php/Networking_introduction#Cryptographic_setup) in the collectd Wiki. + +You can also change the path to the typesdb or add additional typesdb using +`collectd_typesdb`. + +## Configuration + +```toml +[[inputs.file]] + files = ["example"] + + ## Data format to consume. + ## Each data format has its own unique set of configuration options, read + ## more about them here: + ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md + data_format = "collectd" + + ## Authentication file for cryptographic security levels + collectd_auth_file = "/etc/collectd/auth_file" + ## One of none (default), sign, or encrypt + collectd_security_level = "encrypt" + ## Path of to TypesDB specifications + collectd_typesdb = ["/usr/share/collectd/types.db"] + + ## Multi-value plugins can be handled two ways. + ## "split" will parse and store the multi-value plugin data into separate measurements + ## "join" will parse and store the multi-value plugin as a single multi-value measurement. + ## "split" is the default behavior for backward compatability with previous versions of influxdb. + collectd_parse_multivalue = "split" +``` diff --git a/content/telegraf/v1.15/data_formats/input/csv.md b/content/telegraf/v1.15/data_formats/input/csv.md new file mode 100644 index 000000000..4935dfbcc --- /dev/null +++ b/content/telegraf/v1.15/data_formats/input/csv.md @@ -0,0 +1,111 @@ +--- +title: CSV input data format +description: Use the "csv" input data format to parse a document containing comma-separated values into Telegraf metrics. +menu: + telegraf_1_15: + name: CSV + weight: 20 + parent: Input data formats +--- + +The CSV input data format parses documents containing comma-separated values into Telegraf metrics. + +## Configuration + +```toml +[[inputs.file]] + files = ["example"] + + ## Data format to consume. + ## Each data format has its own unique set of configuration options, read + ## more about them here: + ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md + data_format = "csv" + + ## Indicates how many rows to treat as a header. By default, the parser assumes + ## there is no header and will parse the first row as data. If set to anything more + ## than 1, column names will be concatenated with the name listed in the next header row. + ## If `csv_column_names` is specified, the column names in header will be overridden. + csv_header_row_count = 0 + + ## For assigning custom names to columns + ## If this is specified, all columns should have a name + ## Unnamed columns will be ignored by the parser. + ## If `csv_header_row_count` is set to 0, this config must be used + csv_column_names = [] + + ## Indicates the number of rows to skip before looking for header information. + csv_skip_rows = 0 + + ## Indicates the number of columns to skip before looking for data to parse. + ## These columns will be skipped in the header as well. + csv_skip_columns = 0 + + ## The seperator between csv fields + ## By default, the parser assumes a comma (",") + csv_delimiter = "," + + ## The character reserved for marking a row as a comment row + ## Commented rows are skipped and not parsed + csv_comment = "" + + ## If set to true, the parser will remove leading whitespace from fields + ## By default, this is false + csv_trim_space = false + + ## Columns listed here will be added as tags. Any other columns + ## will be added as fields. + csv_tag_columns = [] + + ## The column to extract the name of the metric from + csv_measurement_column = "" + + ## The column to extract time information for the metric + ## `csv_timestamp_format` must be specified if this is used + csv_timestamp_column = "" + + ## The format of time data extracted from `csv_timestamp_column` + ## this must be specified if `csv_timestamp_column` is specified + csv_timestamp_format = "" + ``` +### csv_timestamp_column, csv_timestamp_format + +By default the current time will be used for all created metrics, to set the +time using the JSON document you can use the `csv_timestamp_column` and +`csv_timestamp_format` options together to set the time to a value in the parsed +document. + +The `csv_timestamp_column` option specifies the column name containing the +time value and `csv_timestamp_format` must be set to a Go "reference time" +which is defined to be the specific time: `Mon Jan 2 15:04:05 MST 2006`. + +Consult the Go [time][time parse] package for details and additional examples +on how to set the time format. + +## Metrics + +One metric is created for each row with the columns added as fields. The type +of the field is automatically determined based on the contents of the value. + +## Examples + +Config: +``` +[[inputs.file]] + files = ["example"] + data_format = "csv" + csv_header_row_count = 1 + csv_timestamp_column = "time" + csv_timestamp_format = "2006-01-02T15:04:05Z07:00" +``` + +Input: +``` +measurement,cpu,time_user,time_system,time_idle,time +cpu,cpu0,42,42,42,2018-09-13T13:03:28Z +``` + +Output: +``` +cpu cpu=cpu0,time_user=42,time_system=42,time_idle=42 1536869008000000000 +``` diff --git a/content/telegraf/v1.15/data_formats/input/dropwizard.md b/content/telegraf/v1.15/data_formats/input/dropwizard.md new file mode 100644 index 000000000..3e7356d14 --- /dev/null +++ b/content/telegraf/v1.15/data_formats/input/dropwizard.md @@ -0,0 +1,179 @@ +--- +title: Dropwizard input data format +description: Use the "dropwizard" input data format to parse Dropwizard JSON representations into Telegraf metrics. +menu: + telegraf_1_15: + name: Dropwizard + weight: 30 + parent: Input data formats +--- + +The `dropwizard` data format can parse a [Dropwizard JSON representation][dropwizard] representation of a single metrics registry. By default, tags are parsed from metric names as if they were actual InfluxDB Line Protocol keys (`measurement<,tag_set>`) which can be overridden using custom [template patterns][templates]. All field value types are supported, including `string`, `number` and `boolean`. + +[templates]: /telegraf/v1.15/data_formats/template-patterns/ +[dropwizard]: http://metrics.dropwizard.io/3.1.0/manual/json/ + +## Configuration + +```toml +[[inputs.file]] + files = ["example"] + + ## Data format to consume. + ## Each data format has its own unique set of configuration options, read + ## more about them here: + ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md + data_format = "dropwizard" + + ## Used by the templating engine to join matched values when cardinality is > 1 + separator = "_" + + ## Each template line requires a template pattern. It can have an optional + ## filter before the template and separated by spaces. It can also have optional extra + ## tags following the template. Multiple tags should be separated by commas and no spaces + ## similar to the line protocol format. There can be only one default template. + ## Templates support below format: + ## 1. filter + template + ## 2. filter + template + extra tag(s) + ## 3. filter + template with field key + ## 4. default template + ## By providing an empty template array, templating is disabled and measurements are parsed as influxdb line protocol keys (measurement<,tag_set>) + templates = [] + + ## You may use an appropriate [gjson path](https://github.com/tidwall/gjson#path-syntax) + ## to locate the metric registry within the JSON document + # dropwizard_metric_registry_path = "metrics" + + ## You may use an appropriate [gjson path](https://github.com/tidwall/gjson#path-syntax) + ## to locate the default time of the measurements within the JSON document + # dropwizard_time_path = "time" + # dropwizard_time_format = "2006-01-02T15:04:05Z07:00" + + ## You may use an appropriate [gjson path](https://github.com/tidwall/gjson#path-syntax) + ## to locate the tags map within the JSON document + # dropwizard_tags_path = "tags" + + ## You may even use tag paths per tag + # [inputs.exec.dropwizard_tag_paths] + # tag1 = "tags.tag1" + # tag2 = "tags.tag2" +``` + + +## Examples + +A typical JSON of a dropwizard metric registry: + +```json +{ + "version": "3.0.0", + "counters" : { + "measurement,tag1=green" : { + "count" : 1 + } + }, + "meters" : { + "measurement" : { + "count" : 1, + "m15_rate" : 1.0, + "m1_rate" : 1.0, + "m5_rate" : 1.0, + "mean_rate" : 1.0, + "units" : "events/second" + } + }, + "gauges" : { + "measurement" : { + "value" : 1 + } + }, + "histograms" : { + "measurement" : { + "count" : 1, + "max" : 1.0, + "mean" : 1.0, + "min" : 1.0, + "p50" : 1.0, + "p75" : 1.0, + "p95" : 1.0, + "p98" : 1.0, + "p99" : 1.0, + "p999" : 1.0, + "stddev" : 1.0 + } + }, + "timers" : { + "measurement" : { + "count" : 1, + "max" : 1.0, + "mean" : 1.0, + "min" : 1.0, + "p50" : 1.0, + "p75" : 1.0, + "p95" : 1.0, + "p98" : 1.0, + "p99" : 1.0, + "p999" : 1.0, + "stddev" : 1.0, + "m15_rate" : 1.0, + "m1_rate" : 1.0, + "m5_rate" : 1.0, + "mean_rate" : 1.0, + "duration_units" : "seconds", + "rate_units" : "calls/second" + } + } +} +``` + +Would get translated into 4 different measurements: + +``` +measurement,metric_type=counter,tag1=green count=1 +measurement,metric_type=meter count=1,m15_rate=1.0,m1_rate=1.0,m5_rate=1.0,mean_rate=1.0 +measurement,metric_type=gauge value=1 +measurement,metric_type=histogram count=1,max=1.0,mean=1.0,min=1.0,p50=1.0,p75=1.0,p95=1.0,p98=1.0,p99=1.0,p999=1.0 +measurement,metric_type=timer count=1,max=1.0,mean=1.0,min=1.0,p50=1.0,p75=1.0,p95=1.0,p98=1.0,p99=1.0,p999=1.0,stddev=1.0,m15_rate=1.0,m1_rate=1.0,m5_rate=1.0,mean_rate=1.0 +``` + +You may also parse a dropwizard registry from any JSON document which contains a dropwizard registry in some inner field. +Eg. to parse the following JSON document: + +```json +{ + "time" : "2017-02-22T14:33:03.662+02:00", + "tags" : { + "tag1" : "green", + "tag2" : "yellow" + }, + "metrics" : { + "counters" : { + "measurement" : { + "count" : 1 + } + }, + "meters" : {}, + "gauges" : {}, + "histograms" : {}, + "timers" : {} + } +} +``` +and translate it into: + +``` +measurement,metric_type=counter,tag1=green,tag2=yellow count=1 1487766783662000000 +``` + +you simply need to use the following additional configuration properties: + +```toml +dropwizard_metric_registry_path = "metrics" +dropwizard_time_path = "time" +dropwizard_time_format = "2006-01-02T15:04:05Z07:00" +dropwizard_tags_path = "tags" +## tag paths per tag are supported too, eg. +#[inputs.yourinput.dropwizard_tag_paths] +# tag1 = "tags.tag1" +# tag2 = "tags.tag2" +``` diff --git a/content/telegraf/v1.15/data_formats/input/graphite.md b/content/telegraf/v1.15/data_formats/input/graphite.md new file mode 100644 index 000000000..20f8c1a8d --- /dev/null +++ b/content/telegraf/v1.15/data_formats/input/graphite.md @@ -0,0 +1,55 @@ +--- +title: Graphite input data format +description: Us the Graphite data format to translate Graphite dot buckets directly into Telegraf measurement names, with a single value field, and without any tags. +menu: + telegraf_1_15: + name: Graphite + weight: 40 + parent: Input data formats +--- + +The Graphite data format translates Graphite *dot* buckets directly into +Telegraf measurement names, with a single value field, and without any tags. +By default, the separator is left as `.`, but this can be changed using the +`separator` argument. For more advanced options, Telegraf supports specifying +[templates](#templates) to translate graphite buckets into Telegraf metrics. + +## Configuration + +```toml +[[inputs.exec]] + ## Commands array + commands = ["/tmp/test.sh", "/usr/bin/mycollector --foo=bar"] + + ## measurement name suffix (for separating different commands) + name_suffix = "_mycollector" + + ## Data format to consume. + ## Each data format has its own unique set of configuration options, read + ## more about them here: + ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md + data_format = "graphite" + + ## This string will be used to join the matched values. + separator = "_" + + ## Each template line requires a template pattern. It can have an optional + ## filter before the template and separated by spaces. It can also have optional extra + ## tags following the template. Multiple tags should be separated by commas and no spaces + ## similar to the line protocol format. There can be only one default template. + ## Templates support below format: + ## 1. filter + template + ## 2. filter + template + extra tag(s) + ## 3. filter + template with field key + ## 4. default template + templates = [ + "*.app env.service.resource.measurement", + "stats.* .host.measurement* region=eu-east,agent=sensu", + "stats2.* .host.measurement.field", + "measurement*" + ] +``` + +### templates + +For information on creating templates, see [Template patterns](/telegraf/v1.15/data_formats/template-patterns/). diff --git a/content/telegraf/v1.15/data_formats/input/grok.md b/content/telegraf/v1.15/data_formats/input/grok.md new file mode 100644 index 000000000..673407d0a --- /dev/null +++ b/content/telegraf/v1.15/data_formats/input/grok.md @@ -0,0 +1,226 @@ +--- +title: Grok input data format +description: Use the grok data format to parse line-delimited data using a regular expression-like language. +menu: + telegraf_1_15: + name: Grok + weight: 40 + parent: Input data formats +--- + +The grok data format parses line delimited data using a regular expression-like +language. + +If you need to become familiar with grok patterns, see [Grok Basics](https://www.elastic.co/guide/en/logstash/current/plugins-filters-grok.html#_grok_basics) +in the Logstash documentation. The grok parser uses a slightly modified version of logstash "grok" +patterns, using the format: + +``` +%{[:][:]} +``` + +The `capture_syntax` defines the grok pattern that is used to parse the input +line and the `semantic_name` is used to name the field or tag. The extension +`modifier` controls the data type that the parsed item is converted to or +other special handling. + +By default, all named captures are converted into string fields. +Timestamp modifiers can be used to convert captures to the timestamp of the +parsed metric. If no timestamp is parsed the metric will be created using the +current time. + +You must capture at least one field per line. + +- Available modifiers: + - string (default if nothing is specified) + - int + - float + - duration (ie, 5.23ms gets converted to int nanoseconds) + - tag (converts the field into a tag) + - drop (drops the field completely) + - measurement (use the matched text as the measurement name) +- Timestamp modifiers: + - ts (This will auto-learn the timestamp format) + - ts-ansic ("Mon Jan _2 15:04:05 2006") + - ts-unix ("Mon Jan _2 15:04:05 MST 2006") + - ts-ruby ("Mon Jan 02 15:04:05 -0700 2006") + - ts-rfc822 ("02 Jan 06 15:04 MST") + - ts-rfc822z ("02 Jan 06 15:04 -0700") + - ts-rfc850 ("Monday, 02-Jan-06 15:04:05 MST") + - ts-rfc1123 ("Mon, 02 Jan 2006 15:04:05 MST") + - ts-rfc1123z ("Mon, 02 Jan 2006 15:04:05 -0700") + - ts-rfc3339 ("2006-01-02T15:04:05Z07:00") + - ts-rfc3339nano ("2006-01-02T15:04:05.999999999Z07:00") + - ts-httpd ("02/Jan/2006:15:04:05 -0700") + - ts-epoch (seconds since unix epoch, may contain decimal) + - ts-epochnano (nanoseconds since unix epoch) + - ts-syslog ("Jan 02 15:04:05", parsed time is set to the current year) + - ts-"CUSTOM" + +CUSTOM time layouts must be within quotes and be the representation of the +"reference time", which is `Mon Jan 2 15:04:05 -0700 MST 2006`. +To match a comma decimal point you can use a period. For example `%{TIMESTAMP:timestamp:ts-"2006-01-02 15:04:05.000"}` can be used to match `"2018-01-02 15:04:05,000"` +To match a comma decimal point you can use a period in the pattern string. +See https://golang.org/pkg/time/#Parse for more details. + +Telegraf has many of its own [built-in patterns](https://github.com/influxdata/telegraf/blob/master/plugins/parsers/grok/influx_patterns.go), +as well as support for most of +[logstash's builtin patterns](https://github.com/logstash-plugins/logstash-patterns-core/blob/master/patterns/grok-patterns). +_Golang regular expressions do not support lookahead or lookbehind. +logstash patterns that depend on these are not supported._ + +If you need help building patterns to match your logs, the +[Grok Debugger application](https://grokdebug.herokuapp.com) might be helpful. + +## Configuration + +```toml +[[inputs.file]] + ## Files to parse each interval. + ## These accept standard unix glob matching rules, but with the addition of + ## ** as a "super asterisk". ie: + ## /var/log/**.log -> recursively find all .log files in /var/log + ## /var/log/*/*.log -> find all .log files with a parent dir in /var/log + ## /var/log/apache.log -> only tail the apache log file + files = ["/var/log/apache/access.log"] + + ## The dataformat to be read from files + ## Each data format has its own unique set of configuration options, read + ## more about them here: + ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md + data_format = "grok" + + ## This is a list of patterns to check the given log file(s) for. + ## Note that adding patterns here increases processing time. The most + ## efficient configuration is to have one pattern. + ## Other common built-in patterns are: + ## %{COMMON_LOG_FORMAT} (plain apache & nginx access logs) + ## %{COMBINED_LOG_FORMAT} (access logs + referrer & agent) + grok_patterns = ["%{COMBINED_LOG_FORMAT}"] + + ## Full path(s) to custom pattern files. + grok_custom_pattern_files = [] + + ## Custom patterns can also be defined here. Put one pattern per line. + grok_custom_patterns = ''' + ''' + + ## Timezone allows you to provide an override for timestamps that + ## don't already include an offset + ## e.g. 04/06/2016 12:41:45 data one two 5.43µs + ## + ## Default: "" which renders UTC + ## Options are as follows: + ## 1. Local -- interpret based on machine localtime + ## 2. "Canada/Eastern" -- Unix TZ values like those found in https://en.wikipedia.org/wiki/List_of_tz_database_time_zones + ## 3. UTC -- or blank/unspecified, will return timestamp in UTC + grok_timezone = "Canada/Eastern" +``` + +### Timestamp examples + +This example input and config parses a file using a custom timestamp conversion: + +``` +2017-02-21 13:10:34 value=42 +``` + +```toml +[[inputs.file]] + grok_patterns = ['%{TIMESTAMP_ISO8601:timestamp:ts-"2006-01-02 15:04:05"} value=%{NUMBER:value:int}'] +``` + +This example input and config parses a file using a timestamp in unix time: + +``` +1466004605 value=42 +1466004605.123456789 value=42 +``` + +```toml +[[inputs.file]] + grok_patterns = ['%{NUMBER:timestamp:ts-epoch} value=%{NUMBER:value:int}'] +``` + +This example parses a file using a built-in conversion and a custom pattern: + +``` +Wed Apr 12 13:10:34 PST 2017 value=42 +``` + +```toml +[[inputs.file]] + grok_patterns = ["%{TS_UNIX:timestamp:ts-unix} value=%{NUMBER:value:int}"] + grok_custom_patterns = ''' + TS_UNIX %{DAY} %{MONTH} %{MONTHDAY} %{HOUR}:%{MINUTE}:%{SECOND} %{TZ} %{YEAR} + ''' +``` + +For cases where the timestamp itself is without offset, the `timezone` config var is available +to denote an offset. By default (with `timezone` either omit, blank or set to `"UTC"`), the times +are processed as if in the UTC timezone. If specified as `timezone = "Local"`, the timestamp +will be processed based on the current machine timezone configuration. Lastly, if using a +timezone from the list of Unix [timezones](https://en.wikipedia.org/wiki/List_of_tz_database_time_zones), +grok will offset the timestamp accordingly. + +### TOML escaping + +When saving patterns to the configuration file, keep in mind the different TOML +[string](https://github.com/toml-lang/toml#string) types and the escaping +rules for each. These escaping rules must be applied in addition to the +escaping required by the grok syntax. Using the Multi-line line literal +syntax with `'''` may be useful. + +The following config examples will parse this input file: + +``` +|42|\uD83D\uDC2F|'telegraf'| +``` + +Since `|` is a special character in the grok language, we must escape it to +get a literal `|`. With a basic TOML string, special characters such as +backslash must be escaped, requiring us to escape the backslash a second time. + +```toml +[[inputs.file]] + grok_patterns = ["\\|%{NUMBER:value:int}\\|%{UNICODE_ESCAPE:escape}\\|'%{WORD:name}'\\|"] + grok_custom_patterns = "UNICODE_ESCAPE (?:\\\\u[0-9A-F]{4})+" +``` + +We cannot use a literal TOML string for the pattern, because we cannot match a +`'` within it. However, it works well for the custom pattern. +```toml +[[inputs.file]] + grok_patterns = ["\\|%{NUMBER:value:int}\\|%{UNICODE_ESCAPE:escape}\\|'%{WORD:name}'\\|"] + grok_custom_patterns = 'UNICODE_ESCAPE (?:\\u[0-9A-F]{4})+' +``` + +A multi-line literal string allows us to encode the pattern: +```toml +[[inputs.file]] + grok_patterns = [''' + \|%{NUMBER:value:int}\|%{UNICODE_ESCAPE:escape}\|'%{WORD:name}'\| + '''] + grok_custom_patterns = 'UNICODE_ESCAPE (?:\\u[0-9A-F]{4})+' +``` + +### Tips for creating patterns + +Writing complex patterns can be difficult, here is some advice for writing a +new pattern or testing a pattern developed [online](https://grokdebug.herokuapp.com). + +Create a file output that writes to stdout, and disable other outputs while +testing. This will allow you to see the captured metrics. Keep in mind that +the file output will only print once per `flush_interval`. + +```toml +[[outputs.file]] + files = ["stdout"] +``` + +- Start with a file containing only a single line of your input. +- Remove all but the first token or piece of the line. +- Add the section of your pattern to match this piece to your configuration file. +- Verify that the metric is parsed successfully by running Telegraf. +- If successful, add the next token, update the pattern and retest. +- Continue one token at a time until the entire line is successfully parsed. diff --git a/content/telegraf/v1.15/data_formats/input/influx.md b/content/telegraf/v1.15/data_formats/input/influx.md new file mode 100644 index 000000000..c895c7fc2 --- /dev/null +++ b/content/telegraf/v1.15/data_formats/input/influx.md @@ -0,0 +1,27 @@ +--- +title: InfluxDB Line Protocol input data format +description: Use the InfluxDB Line Protocol input data format to parse InfluxDB metrics directly into Telegraf metrics. +menu: + telegraf_1_15: + name: InfluxDB Line Protocol input + weight: 60 + parent: Input data formats +--- + +There are no additional configuration options for InfluxDB [line protocol][]. The +InfluxDB metrics are parsed directly into Telegraf metrics. + +[line protocol]: /influxdb/latest/write_protocols/line/ + +### Configuration + +```toml +[[inputs.file]] + files = ["example"] + + ## Data format to consume. + ## Each data format has its own unique set of configuration options, read + ## more about them here: + ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md + data_format = "influx" +``` diff --git a/content/telegraf/v1.15/data_formats/input/json.md b/content/telegraf/v1.15/data_formats/input/json.md new file mode 100644 index 000000000..cfa8b7d59 --- /dev/null +++ b/content/telegraf/v1.15/data_formats/input/json.md @@ -0,0 +1,224 @@ +--- +title: JSON input data format +description: Use the JSON input data format to parse [JSON][json] objects, or an array of objects, into Telegraf metric fields. +menu: + telegraf_1_15: + name: JSON input + weight: 70 + parent: Input data formats +--- + + +The JSON input data format parses a [JSON][json] object or an array of objects +into Telegraf metric fields. + +**NOTE:** All JSON numbers are converted to float fields. JSON String are +ignored unless specified in the `tag_key` or `json_string_fields` options. + +## Configuration + +```toml +[[inputs.file]] + files = ["example"] + + ## Data format to consume. + ## Each data format has its own unique set of configuration options, read + ## more about them here: + ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md + data_format = "json" + + ## Query is a GJSON path that specifies a specific chunk of JSON to be + ## parsed, if not specified the whole document will be parsed. + ## + ## GJSON query paths are described here: + ## https://github.com/tidwall/gjson#path-syntax + json_query = "" + + ## Tag keys is an array of keys that should be added as tags. + tag_keys = [ + "my_tag_1", + "my_tag_2" + ] + + ## String fields is an array of keys that should be added as string fields. + json_string_fields = [] + + ## Name key is the key to use as the measurement name. + json_name_key = "" + + ## Time key is the key containing the time that should be used to create the + ## metric. + json_time_key = "" + + ## Time format is the time layout that should be used to interprete the + ## json_time_key. The time must be `unix`, `unix_ms` or a time in the + ## "reference time". + ## ex: json_time_format = "Mon Jan 2 15:04:05 -0700 MST 2006" + ## json_time_format = "2006-01-02T15:04:05Z07:00" + ## json_time_format = "unix" + ## json_time_format = "unix_ms" + json_time_format = "" +``` + +### `json_query` + +The `json_query` is a [GJSON][gjson] path that can be used to limit the +portion of the overall JSON document that should be parsed. The result of the +query should contain a JSON object or an array of objects. + +Consult the GJSON [path syntax][gjson syntax] for details and examples. + +### json_time_key, json_time_format + +By default the current time will be used for all created metrics, to set the +time using the JSON document you can use the `json_time_key` and +`json_time_format` options together to set the time to a value in the parsed +document. + +The `json_time_key` option specifies the key containing the time value and +`json_time_format` must be set to `unix`, `unix_ms`, or the Go "reference +time" which is defined to be the specific time: `Mon Jan 2 15:04:05 MST 2006`. + +Consult the Go [time][time parse] package for details and additional examples +on how to set the time format. + +## Examples + +### Basic parsing + +Config: +```toml +[[inputs.file]] + files = ["example"] + name_override = "myjsonmetric" + data_format = "json" +``` + +Input: +```json +{ + "a": 5, + "b": { + "c": 6 + }, + "ignored": "I'm a string" +} +``` + +Output: +``` +myjsonmetric a=5,b_c=6 +``` + +### Name, tags, and string fields + +Config: +```toml +[[inputs.file]] + files = ["example"] + json_name_key = "name" + tag_keys = ["my_tag_1"] + json_string_fields = ["my_field"] + data_format = "json" +``` + +Input: +```json +{ + "a": 5, + "b": { + "c": 6, + "my_field": "description" + }, + "my_tag_1": "foo", + "name": "my_json" +} +``` + +Output: +``` +my_json,my_tag_1=foo a=5,b_c=6,my_field="description" +``` + +### Arrays + +If the JSON data is an array, then each object within the array is parsed with +the configured settings. + +Config: +```toml +[[inputs.file]] + files = ["example"] + data_format = "json" + json_time_key = "b_time" + json_time_format = "02 Jan 06 15:04 MST" +``` + +Input: +```json +[ + { + "a": 5, + "b": { + "c": 6, + "time":"04 Jan 06 15:04 MST" + }, + }, + { + "a": 7, + "b": { + "c": 8, + "time":"11 Jan 07 15:04 MST" + }, + } +] +``` + +Output: +``` +file a=5,b_c=6 1136387040000000000 +file a=7,b_c=8 1168527840000000000 +``` + +### Query + +The `json_query` option can be used to parse a subset of the document. + +Config: +```toml +[[inputs.file]] + files = ["example"] + data_format = "json" + tag_keys = ["first"] + json_string_fields = ["last"] + json_query = "obj.friends" +``` + +Input: +```json +{ + "obj": { + "name": {"first": "Tom", "last": "Anderson"}, + "age":37, + "children": ["Sara","Alex","Jack"], + "fav.movie": "Deer Hunter", + "friends": [ + {"first": "Dale", "last": "Murphy", "age": 44}, + {"first": "Roger", "last": "Craig", "age": 68}, + {"first": "Jane", "last": "Murphy", "age": 47} + ] + } +} +``` + +Output: +``` +file,first=Dale last="Murphy",age=44 +file,first=Roger last="Craig",age=68 +file,first=Jane last="Murphy",age=47 +``` + +[gjson]: https://github.com/tidwall/gjson +[gjson syntax]: https://github.com/tidwall/gjson#path-syntax +[json]: https://www.json.org/ +[time parse]: https://golang.org/pkg/time/#Parse diff --git a/content/telegraf/v1.15/data_formats/input/logfmt.md b/content/telegraf/v1.15/data_formats/input/logfmt.md new file mode 100644 index 000000000..cb396473d --- /dev/null +++ b/content/telegraf/v1.15/data_formats/input/logfmt.md @@ -0,0 +1,42 @@ +--- +title: Logfmt input data format +description: Use the "logfmt" input data format to parse "logfmt" data into Telegraf metrics. +menu: + telegraf_1_15: + name: logfmt + weight: 80 + parent: Input data formats +--- + +The `logfmt` data format parses [logfmt] data into Telegraf metrics. + +[logfmt]: https://brandur.org/logfmt + +## Configuration + +```toml +[[inputs.file]] + files = ["example"] + + ## Data format to consume. + ## Each data format has its own unique set of configuration options, read + ## more about them here: + ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md + data_format = "logfmt" + + ## Set the name of the created metric, if unset the name of the plugin will + ## be used. + metric_name = "logfmt" +``` + +## Metrics + +Each key/value pair in the line is added to a new metric as a field. The type +of the field is automatically determined based on the contents of the value. + +## Examples + +``` +- method=GET host=example.org ts=2018-07-24T19:43:40.275Z connect=4ms service=8ms status=200 bytes=1653 ++ logfmt method="GET",host="example.org",ts="2018-07-24T19:43:40.275Z",connect="4ms",service="8ms",status=200i,bytes=1653i +``` diff --git a/content/telegraf/v1.15/data_formats/input/nagios.md b/content/telegraf/v1.15/data_formats/input/nagios.md new file mode 100644 index 000000000..3a1af43a0 --- /dev/null +++ b/content/telegraf/v1.15/data_formats/input/nagios.md @@ -0,0 +1,29 @@ +--- +title: Nagios input data format +description: Use the Nagios input data format to parse the output of Nagios plugins into Telegraf metrics. +menu: + telegraf_1_15: + name: Nagios + weight: 90 + parent: Input data formats +--- + +# Nagios + +The Nagios input data format parses the output of +[Nagios plugins](https://www.nagios.org/downloads/nagios-plugins/) into +Telegraf metrics. + +## Configuration + +```toml +[[inputs.exec]] + ## Commands array + commands = ["/usr/lib/nagios/plugins/check_load -w 5,6,7 -c 7,8,9"] + + ## Data format to consume. + ## Each data format has its own unique set of configuration options, read + ## more about them here: + ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md + data_format = "nagios" +``` diff --git a/content/telegraf/v1.15/data_formats/input/value.md b/content/telegraf/v1.15/data_formats/input/value.md new file mode 100644 index 000000000..ea3c76e1d --- /dev/null +++ b/content/telegraf/v1.15/data_formats/input/value.md @@ -0,0 +1,44 @@ +--- +title: Value input data format +description: Use the "value" input data format to parse single values into Telegraf metrics. +menu: + telegraf_1_15: + name: Value + weight: 100 + parent: Input data formats +--- + + +The "value" input data format translates single values into Telegraf metrics. This +is done by assigning a measurement name and setting a single field ("value") +as the parsed metric. + +## Configuration + +You **must** tell Telegraf what type of metric to collect by using the +`data_type` configuration option. Available data type options are: + +1. integer +2. float or long +3. string +4. boolean + +> **Note:** It is also recommended that you set `name_override` to a measurement +name that makes sense for your metric; otherwise, it will just be set to the +name of the plugin. + +```toml +[[inputs.exec]] + ## Commands array + commands = ["cat /proc/sys/kernel/random/entropy_avail"] + + ## override the default metric name of "exec" + name_override = "entropy_available" + + ## Data format to consume. + ## Each data format has its own unique set of configuration options, read + ## more about them here: + ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md + data_format = "value" + data_type = "integer" # required +``` diff --git a/content/telegraf/v1.15/data_formats/input/wavefront.md b/content/telegraf/v1.15/data_formats/input/wavefront.md new file mode 100644 index 000000000..d099e999b --- /dev/null +++ b/content/telegraf/v1.15/data_formats/input/wavefront.md @@ -0,0 +1,28 @@ +--- +title: Wavefront input data format +description: Use the Wavefront input data format to parse Wavefront data into Telegraf metrics. +menu: + telegraf_1_15: + name: Wavefront + weight: 110 + parent: Input data formats +--- + +The Wavefront input data format parse Wavefront data into Telegraf metrics. +For more information on the Wavefront native data format, see +[Wavefront Data Format](https://docs.wavefront.com/wavefront_data_format.html) in the Wavefront documentation. + +## Configuration + +There are no additional configuration options for Wavefront Data Format line-protocol. + +```toml +[[inputs.file]] + files = ["example"] + + ## Data format to consume. + ## Each data format has its own unique set of configuration options, read + ## more about them here: + ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md + data_format = "wavefront" +``` diff --git a/content/telegraf/v1.15/data_formats/output/_index.md b/content/telegraf/v1.15/data_formats/output/_index.md new file mode 100644 index 000000000..e827b67a2 --- /dev/null +++ b/content/telegraf/v1.15/data_formats/output/_index.md @@ -0,0 +1,35 @@ +--- +title: Telegraf output data formats +description: Telegraf serializes metrics into output data formats for InfluxDB Line Protocol, JSON, Graphite, and Splunk metrics. +menu: + telegraf_1_15: + name: Output data formats + weight: 1 + parent: Data formats +--- + +In addition to output-specific data formats, Telegraf supports the following set +of common data formats that may be selected when configuring many of the Telegraf +output plugins. + +* [Carbon2](/telegraf/v1.15/data_formats/output/carbon2) +* [Graphite](/telegraf/v1.15/data_formats/output/graphite) +* [InfluxDB Line Protocol](/telegraf/v1.15/data_formats/output/influx) +* [JSON](/telegraf/v1.15/data_formats/output/json) +* [ServiceNow Metrics](/telegraf/v1.15/data_formats/output/nowmetric) +* [SplunkMetric](/telegraf/v1.15/data_formats/output/splunkmetric) + +You will be able to identify the plugins with support by the presence of a +`data_format` configuration option, for example, in the File (`file`) output plugin: + +```toml +[[outputs.file]] + ## Files to write to, "stdout" is a specially handled file. + files = ["stdout"] + + ## Data format to output. + ## Each data format has its own unique set of configuration options, read + ## more about them here: + ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md + data_format = "influx" +``` diff --git a/content/telegraf/v1.15/data_formats/output/carbon2.md b/content/telegraf/v1.15/data_formats/output/carbon2.md new file mode 100644 index 000000000..012d2fe36 --- /dev/null +++ b/content/telegraf/v1.15/data_formats/output/carbon2.md @@ -0,0 +1,60 @@ +--- +title: Carbon2 output data format +description: Use the Carbon2 output data format (serializer) converts Telegraf metrics into the Carbon2 format. +menu: + telegraf_1_15: + name: Carbon2 + weight: 10 + parent: Output data formats +--- + +The `carbon2` output data format (serializer) translates the Telegraf metric format to the [Carbon2 format](http://metrics20.org/implementations/). + +### Configuration + +```toml +[[outputs.file]] + ## Files to write to, "stdout" is a specially handled file. + files = ["stdout", "/tmp/metrics.out"] + + ## Data format to output. + ## Each data format has its own unique set of configuration options, read + ## more about them here: + ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md + data_format = "carbon2" +``` + +Standard form: + +``` +metric=name field=field_1 host=foo 30 1234567890 +metric=name field=field_2 host=foo 4 1234567890 +metric=name field=field_N host=foo 59 1234567890 +``` + +### Metrics + +The serializer converts the metrics by creating `intrinsic_tags` using the combination of metric name and fields. So, if one Telegraf metric has 4 fields, the `carbon2` output will be 4 separate metrics. There will be a `metric` tag that represents the name of the metric and a `field` tag to represent the field. + +### Example + +If we take the following InfluxDB Line Protocol: + +``` +weather,location=us-midwest,season=summer temperature=82,wind=100 1234567890 +``` + +After serializing in Carbon2, the result would be: + +``` +metric=weather field=temperature location=us-midwest season=summer 82 1234567890 +metric=weather field=wind location=us-midwest season=summer 100 1234567890 +``` + +### Fields and tags with spaces + +When a field key or tag key/value have spaces, spaces will be replaced with `_`. + +### Tags with empty values + +When a tag's value is empty, it will be replaced with `null`. diff --git a/content/telegraf/v1.15/data_formats/output/graphite.md b/content/telegraf/v1.15/data_formats/output/graphite.md new file mode 100644 index 000000000..8d5fcbe6b --- /dev/null +++ b/content/telegraf/v1.15/data_formats/output/graphite.md @@ -0,0 +1,58 @@ +--- +title: Graphite output data format +description: Use the "Graphite" output data format to serialize data from Telegraf metrics. +menu: + telegraf_1_15: + name: Graphite output + weight: 20 + parent: Output data formats +--- + +The Graphite data format is serialized from Telegraf metrics using either the +template pattern or tag support method. You can select between the two +methods using the [`graphite_tag_support`](#graphite-tag-support) option. When set, the tag support method is used, +otherwise the [template pattern][templates]) option is used. + +## Configuration + +```toml +[[outputs.file]] + ## Files to write to, "stdout" is a specially handled file. + files = ["stdout", "/tmp/metrics.out"] + + ## Data format to output. + ## Each data format has its own unique set of configuration options, read + ## more about them here: + ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md + data_format = "graphite" + + ## Prefix added to each graphite bucket + prefix = "telegraf" + ## Graphite template pattern + template = "host.tags.measurement.field" + + ## Support Graphite tags, recommended to enable when using Graphite 1.1 or later. + # graphite_tag_support = false +``` + +### graphite_tag_support + +When the `graphite_tag_support` option is enabled, the template pattern is not +used. Instead, tags are encoded using +[Graphite tag support](http://graphite.readthedocs.io/en/latest/tags.html), +added in Graphite 1.1. The `metric_path` is a combination of the optional +`prefix` option, measurement name, and field name. + +The tag `name` is reserved by Graphite, any conflicting tags and will be encoded as `_name`. + +**Example conversion**: +``` +cpu,cpu=cpu-total,dc=us-east-1,host=tars usage_idle=98.09,usage_user=0.89 1455320660004257758 +=> +cpu.usage_user;cpu=cpu-total;dc=us-east-1;host=tars 0.89 1455320690 +cpu.usage_idle;cpu=cpu-total;dc=us-east-1;host=tars 98.09 1455320690 +``` + +### templates + +For more information on templates and template patterns, see [Template patterns](/telegraf/v1.15/data_formats/template-patterns/). diff --git a/content/telegraf/v1.15/data_formats/output/influx.md b/content/telegraf/v1.15/data_formats/output/influx.md new file mode 100644 index 000000000..b56d945fe --- /dev/null +++ b/content/telegraf/v1.15/data_formats/output/influx.md @@ -0,0 +1,41 @@ +--- +title: InfluxDB Line Protocol output data format +description: The "influx" data format outputs metrics into the InfluxDB Line Protocol format. +menu: + telegraf_1_15: + name: InfluxDB Line Protocol + weight: 30 + parent: Output data formats +--- + +The `influx` output data format outputs metrics into [InfluxDB Line Protocol][line protocol]. InfluxData recommends this data format unless another format is required for interoperability. + +## Configuration + +```toml +[[outputs.file]] + ## Files to write to, "stdout" is a specially handled file. + files = ["stdout", "/tmp/metrics.out"] + + ## Data format to output. + ## Each data format has its own unique set of configuration options, read + ## more about them here: + ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md + data_format = "influx" + + ## Maximum line length in bytes. Useful only for debugging. + influx_max_line_bytes = 0 + + ## When true, fields will be output in ascending lexical order. Enabling + ## this option will result in decreased performance and is only recommended + ## when you need predictable ordering while debugging. + influx_sort_fields = false + + ## When true, Telegraf will output unsigned integers as unsigned values, + ## i.e.: `42u`. You will need a version of InfluxDB supporting unsigned + ## integer values. Enabling this option will result in field type errors if + ## existing data has been written. + influx_uint_support = false +``` + +[line protocol]: /influxdb/latest/write_protocols/line_protocol_tutorial/ diff --git a/content/telegraf/v1.15/data_formats/output/json.md b/content/telegraf/v1.15/data_formats/output/json.md new file mode 100644 index 000000000..17ddae837 --- /dev/null +++ b/content/telegraf/v1.15/data_formats/output/json.md @@ -0,0 +1,89 @@ +--- +title: JSON output data format +description: Telegraf's "json" output data format converts metrics into JSON documents. +menu: + telegraf_1_15: + name: JSON + weight: 40 + parent: Output data formats +--- + +The `json` output data format serializes Telegraf metrics into JSON documents. + +## Configuration + +```toml +[[outputs.file]] + ## Files to write to, "stdout" is a specially handled file. + files = ["stdout", "/tmp/metrics.out"] + + ## Data format to output. + ## Each data format has its own unique set of configuration options, read + ## more about them here: + ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md + data_format = "json" + + ## The resolution to use for the metric timestamp. Must be a duration string + ## such as "1ns", "1us", "1ms", "10ms", "1s". Durations are truncated to + ## the power of 10 less than the specified units. + json_timestamp_units = "1s" +``` + +## Examples + +### Standard format + +```json +{ + "fields": { + "field_1": 30, + "field_2": 4, + "field_N": 59, + "n_images": 660 + }, + "name": "docker", + "tags": { + "host": "raynor" + }, + "timestamp": 1458229140 +} +``` + +### Batch format + +When an output plugin needs to emit multiple metrics at one time, it may use the +batch format. The use of batch format is determined by the plugin -- reference +the documentation for the specific plugin. + +```json +{ + "metrics": [ + { + "fields": { + "field_1": 30, + "field_2": 4, + "field_N": 59, + "n_images": 660 + }, + "name": "docker", + "tags": { + "host": "raynor" + }, + "timestamp": 1458229140 + }, + { + "fields": { + "field_1": 30, + "field_2": 4, + "field_N": 59, + "n_images": 660 + }, + "name": "docker", + "tags": { + "host": "raynor" + }, + "timestamp": 1458229140 + } + ] +} +``` diff --git a/content/telegraf/v1.15/data_formats/output/nowmetric.md b/content/telegraf/v1.15/data_formats/output/nowmetric.md new file mode 100644 index 000000000..e59b61af2 --- /dev/null +++ b/content/telegraf/v1.15/data_formats/output/nowmetric.md @@ -0,0 +1,90 @@ +--- +title: ServiceNow Metrics output data format +description: Use the ServiceNow Metrics output data format (serializer) to output metrics in the ServiceNow Operational Intelligence format. +menu: + telegraf_1_15: + name: ServiceNow Metrics + weight: 50 + parent: Output data formats +--- + +The ServiceNow Metrics output data format (serializer) outputs metrics in the [ServiceNow Operational Intelligence format](https://docs.servicenow.com/bundle/london-it-operations-management/page/product/event-management/reference/mid-POST-metrics.html). + +It can be used to write to a file using the File output plugin, or for sending metrics to a MID Server with Enable REST endpoint activated using the standard telegraf HTTP output. +If you're using the HTTP output plugin, this serializer knows how to batch the metrics so you don't end up with an HTTP POST per metric. + +An example event looks like: + +```javascript +[{ + "metric_type": "Disk C: % Free Space", + "resource": "C:\\", + "node": "lnux100", + "value": 50, + "timestamp": 1473183012000, + "ci2metric_id": { + "node": "lnux100" + }, + "source": “Telegraf” +}] +``` + +## Using with the HTTP output plugin + +To send this data to a ServiceNow MID Server with Web Server extension activated, you can use the HTTP output plugin, there are some custom headers that you need to add to manage the MID Web Server authorization, here's a sample config for an HTTP output: + +```toml +[[outputs.http]] + ## URL is the address to send metrics to + url = "http://:9082/api/mid/sa/metrics" + + ## Timeout for HTTP message + # timeout = "5s" + + ## HTTP method, one of: "POST" or "PUT" + method = "POST" + + ## HTTP Basic Auth credentials + username = 'evt.integration' + password = 'P@$$w0rd!' + + ## Optional TLS Config + # tls_ca = "/etc/telegraf/ca.pem" + # tls_cert = "/etc/telegraf/cert.pem" + # tls_key = "/etc/telegraf/key.pem" + ## Use TLS but skip chain & host verification + # insecure_skip_verify = false + + ## Data format to output. + ## Each data format has it's own unique set of configuration options, read + ## more about them here: + ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md + data_format = "nowmetric" + + ## Additional HTTP headers + [outputs.http.headers] + # # Should be set manually to "application/json" for json data_format + Content-Type = "application/json" + Accept = "application/json" +``` + +Starting with the London release, you also need to explicitly create event rule to allow binding of metric events to host CIs. + +https://docs.servicenow.com/bundle/london-it-operations-management/page/product/event-management/task/event-rule-bind-metrics-to-host.html + +## Using with the File output plugin + +You can use the File output plugin to output the payload in a file. +In this case, just add the following section to your telegraf configuration file. + +```toml +[[outputs.file]] + ## Files to write to, "stdout" is a specially handled file. + files = ["C:/Telegraf/metrics.out"] + + ## Data format to output. + ## Each data format has its own unique set of configuration options, read + ## more about them here: + ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md + data_format = "nowmetric" +``` diff --git a/content/telegraf/v1.15/data_formats/output/splunkmetric.md b/content/telegraf/v1.15/data_formats/output/splunkmetric.md new file mode 100644 index 000000000..ba1c83d04 --- /dev/null +++ b/content/telegraf/v1.15/data_formats/output/splunkmetric.md @@ -0,0 +1,147 @@ +--- +title: SplunkMetric output data format +description: The SplunkMetric serializer formats and outputs data in a format that can be consumed by a Splunk metrics index. +menu: + telegraf_1_15: + name: SplunkMetric + weight: 60 + parent: Output data formats +--- + +The SplunkMetric serializer formats and outputs the metric data in a format that can be consumed by a Splunk metrics index. +It can be used to write to a file using the file output, or for sending metrics to a HEC using the standard Telegraf HTTP output. + +If you're using the HTTP output, this serializer knows how to batch the metrics so you don't end up with an HTTP POST per metric. + +Th data is output in a format that conforms to the specified Splunk HEC JSON format as found here: +[Send metrics in JSON format](http://dev.splunk.com/view/event-collector/SP-CAAAFDN). + +An example event looks like: +```javascript +{ + "time": 1529708430, + "event": "metric", + "host": "patas-mbp", + "fields": { + "_value": 0.6, + "cpu": "cpu0", + "dc": "mobile", + "metric_name": "cpu.usage_user", + "user": "ronnocol" + } +} +``` +In the above snippet, the following keys are dimensions: +* cpu +* dc +* user + +## Using with the HTTP output + +To send this data to a Splunk HEC, you can use the HTTP output, there are some custom headers that you need to add +to manage the HEC authorization, here's a sample config for an HTTP output: + +```toml +[[outputs.http]] + ## URL is the address to send metrics to + url = "https://localhost:8088/services/collector" + + ## Timeout for HTTP message + # timeout = "5s" + + ## HTTP method, one of: "POST" or "PUT" + # method = "POST" + + ## HTTP Basic Auth credentials + # username = "username" + # password = "pa$$word" + + ## Optional TLS Config + # tls_ca = "/etc/telegraf/ca.pem" + # tls_cert = "/etc/telegraf/cert.pem" + # tls_key = "/etc/telegraf/key.pem" + ## Use TLS but skip chain & host verification + # insecure_skip_verify = false + + ## Data format to output. + ## Each data format has it's own unique set of configuration options, read + ## more about them here: + ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md + data_format = "splunkmetric" + ## Provides time, index, source overrides for the HEC + splunkmetric_hec_routing = true + + ## Additional HTTP headers + [outputs.http.headers] + # Should be set manually to "application/json" for json data_format + Content-Type = "application/json" + Authorization = "Splunk xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" + X-Splunk-Request-Channel = "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" +``` + +## Overrides +You can override the default values for the HEC token you are using by adding additional tags to the config file. + +The following aspects of the token can be overriden with tags: +* index +* source + +You can either use `[global_tags]` or using a more advanced configuration as documented [here](https://github.com/influxdata/telegraf/blob/master/docs/CONFIGURATION.md). + +Such as this example which overrides the index just on the cpu metric: +```toml +[[inputs.cpu]] + percpu = false + totalcpu = true + [inputs.cpu.tags] + index = "cpu_metrics" +``` + +## Using with the File output + +You can use the file output when running telegraf on a machine with a Splunk forwarder. + +A sample event when `hec_routing` is false (or unset) looks like: +```javascript +{ + "_value": 0.6, + "cpu": "cpu0", + "dc": "mobile", + "metric_name": "cpu.usage_user", + "user": "ronnocol", + "time": 1529708430 +} +``` +Data formatted in this manner can be ingested with a simple `props.conf` file that +looks like this: + +```ini +[telegraf] +category = Metrics +description = Telegraf Metrics +pulldown_type = 1 +DATETIME_CONFIG = +NO_BINARY_CHECK = true +SHOULD_LINEMERGE = true +disabled = false +INDEXED_EXTRACTIONS = json +KV_MODE = none +TIMESTAMP_FIELDS = time +TIME_FORMAT = %s.%3N +``` + +An example configuration of a file based output is: + +```toml + # Send telegraf metrics to file(s) +[[outputs.file]] + ## Files to write to, "stdout" is a specially handled file. + files = ["/tmp/metrics.out"] + + ## Data format to output. + ## Each data format has its own unique set of configuration options, read + ## more about them here: + ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md + data_format = "splunkmetric" + hec_routing = false +``` diff --git a/content/telegraf/v1.15/data_formats/template-patterns.md b/content/telegraf/v1.15/data_formats/template-patterns.md new file mode 100644 index 000000000..766b01501 --- /dev/null +++ b/content/telegraf/v1.15/data_formats/template-patterns.md @@ -0,0 +1,145 @@ +--- +title: Telegraf template patterns +description: Use template patterns to describe how dot-delimited strings should map to and from Telegraf metrics. +menu: + telegraf_1_15: + name: Template patterns + weight: 30 + parent: Data formats +--- + + + +Template patterns are a mini language that describes how a dot delimited +string should be mapped to and from [metrics][]. + +A template has the form: +``` +"host.mytag.mytag.measurement.measurement.field*" +``` + +Where the following keywords can be set: + +1. `measurement`: specifies that this section of the graphite bucket corresponds +to the measurement name. This can be specified multiple times. +2. `field`: specifies that this section of the graphite bucket corresponds +to the field name. This can be specified multiple times. +3. `measurement*`: specifies that all remaining elements of the graphite bucket +correspond to the measurement name. +4. `field*`: specifies that all remaining elements of the graphite bucket +correspond to the field name. + +Any part of the template that is not a keyword is treated as a tag key. This +can also be specified multiple times. + +**NOTE:** `field*` cannot be used in conjunction with `measurement*`. + +## Examples + +### Measurement and tag templates + +The most basic template is to specify a single transformation to apply to all +incoming metrics. So the following template: + +```toml +templates = [ + "region.region.measurement*" +] +``` + +would result in the following Graphite -> Telegraf transformation. + +``` +us.west.cpu.load 100 +=> cpu.load,region=us.west value=100 +``` + +Multiple templates can also be specified, but these should be differentiated +using _filters_ (see below for more details) + +```toml +templates = [ + "*.*.* region.region.measurement", # <- all 3-part measurements will match this one. + "*.*.*.* region.region.host.measurement", # <- all 4-part measurements will match this one. +] +``` + +### Field templates + +The field keyword tells Telegraf to give the metric that field name. +So the following template: + +```toml +separator = "_" +templates = [ + "measurement.measurement.field.field.region" +] +``` + +would result in the following Graphite -> Telegraf transformation. + +``` +cpu.usage.idle.percent.eu-east 100 +=> cpu_usage,region=eu-east idle_percent=100 +``` + +The field key can also be derived from all remaining elements of the graphite +bucket by specifying `field*`: + +```toml +separator = "_" +templates = [ + "measurement.measurement.region.field*" +] +``` + +which would result in the following Graphite -> Telegraf transformation. + +``` +cpu.usage.eu-east.idle.percentage 100 +=> cpu_usage,region=eu-east idle_percentage=100 +``` + +### Filter templates + +Users can also filter the template(s) to use based on the name of the bucket, +using glob matching, like so: + +```toml +templates = [ + "cpu.* measurement.measurement.region", + "mem.* measurement.measurement.host" +] +``` + +which would result in the following transformation: + +``` +cpu.load.eu-east 100 +=> cpu_load,region=eu-east value=100 + +mem.cached.localhost 256 +=> mem_cached,host=localhost value=256 +``` + +### Adding Tags + +Additional tags can be added to a metric that don't exist on the received metric. +You can add additional tags by specifying them after the pattern. +Tags have the same format as the line protocol. +Multiple tags are separated by commas. + +```toml +templates = [ + "measurement.measurement.field.region datacenter=1a" +] +``` + +would result in the following Graphite -> Telegraf transformation. + +``` +cpu.usage.idle.eu-east 100 +=> cpu_usage,region=eu-east,datacenter=1a idle=100 +``` + +[metrics]: /telegraf/v1.15/concepts/metrics/ diff --git a/content/telegraf/v1.15/guides/_index.md b/content/telegraf/v1.15/guides/_index.md new file mode 100644 index 000000000..14ba24e5b --- /dev/null +++ b/content/telegraf/v1.15/guides/_index.md @@ -0,0 +1,12 @@ +--- +title: Telegraf Guides + +menu: + telegraf_1_15: + name: Guides + weight: 20 +--- + +The following articles guide you through step-by-step Telegraf configuration examples. + +[Using the HTTP plugin with Citi Bike data](/telegraf/v1.15/guides/using_http) diff --git a/content/telegraf/v1.15/guides/using_http.md b/content/telegraf/v1.15/guides/using_http.md new file mode 100644 index 000000000..8699a009c --- /dev/null +++ b/content/telegraf/v1.15/guides/using_http.md @@ -0,0 +1,116 @@ +--- +title: Using the HTTP input plugin with Citi Bike data +description: +menu: + telegraf_1_15: + name: Using the HTTP plugin + weight: 30 + parent: Guides +--- + +This example walks through using the Telegraf HTTP input plugin to collect live metrics on Citi Bike stations in New York City. Live station data is available in JSON format from [NYC OpenData](https://data.cityofnewyork.us/NYC-BigApps/Citi-Bike-Live-Station-Feed-JSON-/p94q-8hxh). + +For the following example to work, configure [`influxdb` output plugin](/telegraf/v1.15/plugins/plugin-list/#influxdb). This plugin is what allows Telegraf to write the metrics to your InfluxDB. + +## Configure the HTTP Input plugin in your Telegraf configuration file + +To retrieve data from the Citi Bike URL endpoint, enable the `inputs.http` input plugin in your Telegraf configuration file. + +Specify the following options: + +### `urls` +One or more URLs to read metrics from. For this example, use `https://feeds.citibikenyc.com/stations/stations.json`. + +### `data_format` +The format of the data in the HTTP endpoints that Telegraf will ingest. For this example, use JSON. + + +## Add parser information to your Telegraf configuration + +Specify the following JSON-specific options. + +### JSON + +#### `json_query` +To parse only the relevant portion of JSON data, set the `json_query` option with a [GJSON](https://github.com/tidwall/gjson) path. The result of the query should contain a JSON object or an array of objects. +In this case, we don't want to parse the JSON query's `executionTime` at the beginning of the data, so we'll limit this to include only the data in the `stationBeanList` array. + +#### `tag_keys` +List of one or more JSON keys that should be added as tags. For this example, we'll use the tag keys `id`, `stationName`, `city`, and `postalCode`. + +#### `json_string_fields` +List the keys of fields that are in string format so that they can be parsed as strings. Here, the string fields are `statusValue`, `stAddress1`, `stAddress2`, `location`, and `landMark`. + +#### `json_time_key` +Key from the JSON file that creates the timestamp metric. In this case, we want to use the time that station data was last reported, or the `lastCommunicationTime`. If you don't specify a key, the time that Telegraf reads the data becomes the timestamp. + +#### `json_time_format` +The format used to interpret the designated `json_time_key`. This example uses [Go reference time format](https://golang.org/pkg/time/#Time.Format). For example, `Mon Jan 2 15:04:05 MST 2006`. + +#### `json_timezone` +The timezone We'll set this to the Unix TZ value where our bike data takes place, `America/New_York`. + + +#### Example configuration + + ```toml + [[inputs.http]] + #URL for NYC's Citi Bike station data in JSON format + urls = ["https://feeds.citibikenyc.com/stations/stations.json"] + + #Overwrite measurement name from default `http` to `citibikenyc` + name_override = "citibikenyc" + + #Exclude url and host items from tags + tagexclude = ["url", "host"] + + #Data from HTTP in JSON format + data_format = "json" + + #Parse `stationBeanList` array only + json_query = "stationBeanList" + + #Set station metadata as tags + tag_keys = ["id", "stationName", "city", "postalCode"] + + #Do not include station landmark data as fields + fielddrop = ["landMark"] + + #JSON values to set as string fields + json_string_fields = ["statusValue", "stAddress1", "stAddress2", "location", "landMark"] + + #Latest station information reported at `lastCommunicationTime` + json_time_key = "lastCommunicationTime" + + #Time is reported in Golang "reference time" format + json_time_format = "2006-01-02 03:04:05 PM" + + #Time is reported in Eastern Standard Time (EST) + json_timezone = "America/New_York" + ``` + + + +## Start Telegraf and verify data appears + +[Start the Telegraf service](/telegraf/v1.15/introduction/getting-started/#start-the-telegraf-service). + +To test that the data is being sent to InfluxDB, run the following (replacing `telegraf.conf` with the path to your configuration file): + +``` +telegraf -config ~/telegraf.conf -test +``` + +This command should return line protocol that looks similar to the following: + + +``` +citibikenyc,id=3443,stationName=W\ 52\ St\ &\ 6\ Ave statusKey=1,location="",totalDocks=41,availableDocks=32,latitude=40.76132983124814,longitude=-73.97982001304626,availableBikes=8,stAddress2="",stAddress1="W 52 St & 6 Ave",statusValue="In Service" 1581533519000000000 +citibikenyc,id=367,stationName=E\ 53\ St\ &\ Lexington\ Ave availableBikes=8,stAddress1="E 53 St & Lexington Ave",longitude=-73.97069431,latitude=40.75828065,stAddress2="",statusKey=1,location="",statusValue="In Service",totalDocks=34,availableDocks=24 1581533492000000000 +citibikenyc,id=359,stationName=E\ 47\ St\ &\ Park\ Ave totalDocks=64,availableBikes=15,statusValue="In Service",location="",latitude=40.75510267,availableDocks=49,stAddress1="E 47 St & Park Ave",longitude=-73.97498696,statusKey=1,stAddress2="" 1581533535000000000 +citibikenyc,id=304,stationName=Broadway\ &\ Battery\ Pl statusValue="In Service",availableDocks=11,stAddress1="Broadway & Battery Pl",statusKey=1,stAddress2="",location="",totalDocks=33,latitude=40.70463334,longitude=-74.01361706,availableBikes=22 1581533499000000000 +``` + +Now, you can explore and query the Citi Bike data in InfluxDB. The example below is an InfluxQL query and visualization showing the number of available bikes over the past 15 minutes at the Broadway and West 29th Street station. + +![Citi Bike visualization](/img/citibike_query.png) diff --git a/content/telegraf/v1.15/introduction/_index.md b/content/telegraf/v1.15/introduction/_index.md new file mode 100644 index 000000000..aa02375a1 --- /dev/null +++ b/content/telegraf/v1.15/introduction/_index.md @@ -0,0 +1,22 @@ +--- +title: Introducing Telegraf + +menu: + telegraf_1_15: + name: Introduction + weight: 20 +--- + +To get up and running with Telegraf, complete the following tasks: + +## [Download Telegraf](https://portal.influxdata.com/downloads) + +Find the latest release of Telegraf. + +## [Install Telegraf](/telegraf/v1.15/introduction/installation/) + +Learn how to install, start, and configure Telegraf. + +## [Get started with Telegraf](/telegraf/v1.15/introduction/getting-started/) + +Learn how to use Telegraf to get data into InfluxDB. diff --git a/content/telegraf/v1.15/introduction/downloading.md b/content/telegraf/v1.15/introduction/downloading.md new file mode 100644 index 000000000..5ecaec6d4 --- /dev/null +++ b/content/telegraf/v1.15/introduction/downloading.md @@ -0,0 +1,12 @@ +--- +title: Downloading Telegraf +menu: + telegraf_1_15: + name: Downloading + weight: 10 + parent: Introduction +--- + + + +Download the latest Telegraf release at the [InfluxData download page](https://portal.influxdata.com/downloads). diff --git a/content/telegraf/v1.15/introduction/getting-started.md b/content/telegraf/v1.15/introduction/getting-started.md new file mode 100644 index 000000000..17e470b1d --- /dev/null +++ b/content/telegraf/v1.15/introduction/getting-started.md @@ -0,0 +1,135 @@ +--- +title: Getting started with Telegraf +description: Downloading, installing, configuring and getting started with Telegraf, the plug-in driven server agent of the InfluxData time series platform. +aliases: + - /telegraf/v1.15/introduction/getting_started/ +menu: + telegraf_1_15: + name: Getting started + weight: 30 + parent: Introduction +--- + +Use Telegraf to collect and write metrics into InfluxDB and other supported outputs. + +To get up and running, do the following: + +1. [Download and install Telegraf](#download-and-install-telegraf) +2. [Configure Telegraf](#configure-telegraf) +3. [Start Telegraf service](#start-telegraf-service) + +## Download and install Telegraf + +Follow the instructions in the Telegraf section on the [Downloads page](https://influxdata.com/downloads/). + +> **Note:** Telegraf will start automatically using the default configuration when installed from a deb package. + +## Configure Telegraf + +### Configuration file location by installation type + +* macOS [Homebrew](http://brew.sh/): `/usr/local/etc/telegraf.conf` +* Linux debian and RPM packages: `/etc/telegraf/telegraf.conf` +* Standalone Binary: see the next section for how to create a configuration file + +> **Note:** You can also specify a remote URL endpoint to pull a configuration file from. See [Configuration file locations](/telegraf/v1.15/administration/configuration/#configuration-file-locations). + +### Create and edit the configuration file + +Before starting the Telegraf server, create or edit the initial configuration to specify your [inputs](/telegraf/v1.15/plugins/inputs/) (where the metrics come from) and [outputs](/telegraf/v1.15/plugins/outputs/) (where the metrics go). You can do this [several ways](/telegraf/v1.15/administration/configuration/). + +The following example shows how to create a configuration file called `telegraf.conf` and specify two inputs (`cpu` and `mem`) with the `--input-filter` flag and specify InfluxDB as the output with the `--output-filter` flag. + +```bash +telegraf -sample-config --input-filter cpu:mem --output-filter influxdb > telegraf.conf +``` + +`cpu` and `mem` reads metrics about the system's cpu usage and memory usage, and then output this data to InfluxDB. + +## Start Telegraf service + +Start the Telegraf service and direct it to the relevant configuration file or URL to pull a configuration file from a remote endpoint: + +### macOS [Homebrew](http://brew.sh/) +```bash +telegraf --config telegraf.conf +``` + +### Linux (sysvinit and upstart installations) +```bash +sudo service telegraf start +``` + +### Linux (systemd installations) +```bash +systemctl start telegraf +``` + +## Results + +Telegraf starts collecting and writing data to the specified output. + +Returning to our sample configuration, we show what the `cpu` and `mem` data looks like in InfluxDB below. +Note that we used the default input and output configuration settings to get this data. + +* List all [measurements](/influxdb/v1.4/concepts/glossary/#measurement) in the `telegraf` [database](/influxdb/v1.4/concepts/glossary/#database): + +``` +> SHOW MEASUREMENTS +name: measurements +------------------ +name +cpu +mem +``` + +* List all [field keys](/influxdb/v1.4/concepts/glossary/#field-key) by measurement: + +``` +> SHOW FIELD KEYS +name: cpu +--------- +fieldKey fieldType +usage_guest float +usage_guest_nice float +usage_idle float +usage_iowait float +usage_irq float +usage_nice float +usage_softirq float +usage_steal float +usage_system float +usage_user float + +name: mem +--------- +fieldKey fieldType +active integer +available integer +available_percent float +buffered integer +cached integer +free integer +inactive integer +total integer +used integer +used_percent float +``` + +* Select a sample of the data in the [field](/influxdb/v1.4/concepts/glossary/#field) `usage_idle` in the measurement `cpu_usage_idle`: + +```bash +> SELECT usage_idle FROM cpu WHERE cpu = 'cpu-total' LIMIT 5 +name: cpu +--------- +time usage_idle +2016-01-16T00:03:00Z 97.56189047261816 +2016-01-16T00:03:10Z 97.76305923519121 +2016-01-16T00:03:20Z 97.32533433320835 +2016-01-16T00:03:30Z 95.68857785553611 +2016-01-16T00:03:40Z 98.63715928982245 +``` + +Notice that the timestamps occur at rounded ten second intervals (that is, `:00`, `:10`, `:20`, and so on) - this is a configurable setting. + +That's it! You ready to use Telegraf to collect metrics and write them to your output of choice. diff --git a/content/telegraf/v1.15/introduction/installation.md b/content/telegraf/v1.15/introduction/installation.md new file mode 100644 index 000000000..38d65f1ab --- /dev/null +++ b/content/telegraf/v1.15/introduction/installation.md @@ -0,0 +1,443 @@ +--- +title: Installing Telegraf +menu: + telegraf_1_15: + name: Installing + weight: 20 + parent: Introduction +--- + +This page provides directions for installing, starting, and configuring Telegraf. + +## Requirements + +Installation of the Telegraf package may require `root` or administrator privileges in order to complete successfully. + +### Networking + +Telegraf offers multiple service [input plugins](/telegraf/v1.15/plugins/inputs/) that may +require custom ports. +Modify port mappings through the configuration file (`telegraf.conf`). + +For Linux distributions, this file is located at `/etc/telegraf` for default installations. + +For Windows distributions, the configuration file is located in the directory where you unzipped the Telegraf ZIP archive. +The default location is `C:\InfluxData\telegraf`. + +### NTP + +Telegraf uses a host's local time in UTC to assign timestamps to data. +Use the Network Time Protocol (NTP) to synchronize time between hosts; if hosts' clocks +aren't synchronized with NTP, the timestamps on the data can be inaccurate. + +## Installation + +{{< tabs-wrapper >}} +{{% tabs %}} + [Ubuntu & Debian](#) + [RedHat & CentOS](#) + [SLES & openSUSE](#) + [FreeBSD/PC-BSD](#) + [macOS](#) + [Windows](#) +{{% /tabs %}} + +{{% tab-content %}} +Debian and Ubuntu users can install the latest stable version of Telegraf using the `apt-get` package manager. + +**Ubuntu:** Add the InfluxData repository with the following commands: + +{{< code-tabs-wrapper >}} +{{% code-tabs %}} +[wget](#) +[curl](#) +{{% /code-tabs %}} + +{{% code-tab-content %}} +```bash +wget -qO- https://repos.influxdata.com/influxdb.key | sudo apt-key add - +source /etc/lsb-release +echo "deb https://repos.influxdata.com/${DISTRIB_ID,,} ${DISTRIB_CODENAME} stable" | sudo tee /etc/apt/sources.list.d/influxdb.list +``` +{{% /code-tab-content %}} + +{{% code-tab-content %}} +```bash +curl -sL https://repos.influxdata.com/influxdb.key | sudo apt-key add - +source /etc/lsb-release +echo "deb https://repos.influxdata.com/${DISTRIB_ID,,} ${DISTRIB_CODENAME} stable" | sudo tee /etc/apt/sources.list.d/influxdb.list +``` +{{% /code-tab-content %}} +{{< /code-tabs-wrapper >}} + +**Debian:** Add the InfluxData repository with the following commands: + +{{< code-tabs-wrapper >}} +{{% code-tabs %}} +[wget](#) +[curl](#) +{{% /code-tabs %}} + +{{% code-tab-content %}} +```bash +# Before adding Influx repository, run this so that apt will be able to read the repository. + +sudo apt-get update && sudo apt-get install apt-transport-https + +# Add the InfluxData key + +wget -qO- https://repos.influxdata.com/influxdb.key | sudo apt-key add - +source /etc/os-release +test $VERSION_ID = "7" && echo "deb https://repos.influxdata.com/debian wheezy stable" | sudo tee /etc/apt/sources.list.d/influxdb.list +test $VERSION_ID = "8" && echo "deb https://repos.influxdata.com/debian jessie stable" | sudo tee /etc/apt/sources.list.d/influxdb.list +test $VERSION_ID = "9" && echo "deb https://repos.influxdata.com/debian stretch stable" | sudo tee /etc/apt/sources.list.d/influxdb.list +test $VERSION_ID = "10" && echo "deb https://repos.influxdata.com/debian buster stable" | sudo tee /etc/apt/sources.list.d/influxdb.list +``` +{{% /code-tab-content %}} + +{{% code-tab-content %}} +```bash +# Before adding Influx repository, run this so that apt will be able to read the repository. + +sudo apt-get update && sudo apt-get install apt-transport-https + +# Add the InfluxData key + +curl -sL https://repos.influxdata.com/influxdb.key | sudo apt-key add - +source /etc/os-release +test $VERSION_ID = "7" && echo "deb https://repos.influxdata.com/debian wheezy stable" | sudo tee /etc/apt/sources.list.d/influxdb.list +test $VERSION_ID = "8" && echo "deb https://repos.influxdata.com/debian jessie stable" | sudo tee /etc/apt/sources.list.d/influxdb.list +test $VERSION_ID = "9" && echo "deb https://repos.influxdata.com/debian stretch stable" | sudo tee /etc/apt/sources.list.d/influxdb.list +``` +{{% /code-tab-content %}} +{{< /code-tabs-wrapper >}} + +Then, install and start the Telegraf service: + +```bash +sudo apt-get update && sudo apt-get install telegraf +sudo service telegraf start +``` + +Or if your operating system is using systemd (Ubuntu 15.04+, Debian 8+): +``` +sudo apt-get update && sudo apt-get install telegraf +sudo systemctl start telegraf +``` + +**Install from a `.deb` file**: + +To manually install the Debian package from a `.deb` file: + +1. Download the latest Telegraf `.deb` release + from the Telegraf section of the [downloads page](https://influxdata.com/downloads/). +2. Run the following command (making sure to supply the correct version number for the downloaded file): + + ```sh + sudo sudo dpkg -i telegraf_1._amd64.deb + ``` + +{{% telegraf/verify %}} + +## Configuration + +### Create a configuration file with default input and output plugins. + +Every plugin will be in the file, but most will be commented out. + +``` +telegraf config > telegraf.conf +``` + +### Create a configuration file with specific inputs and outputs +``` +telegraf --input-filter [:] --output-filter [:] config > telegraf.conf +``` + +For more advanced configuration details, see the +[configuration documentation](/telegraf/v1.15/administration/configuration/). +{{% /tab-content %}} + +{{% tab-content %}} +For instructions on how to manually install the RPM package from a file, please see the [downloads page](https://influxdata.com/downloads/). + +**RedHat and CentOS:** Install the latest stable version of Telegraf using the `yum` package manager: + +```bash +cat < telegraf.conf +``` + +### Create a configuration file with specific inputs and outputs +``` +telegraf --input-filter [:] --output-filter [:] config > telegraf.conf +``` + +For more advanced configuration details, see the +[configuration documentation](/telegraf/v1.15/administration/configuration/). +{{% /tab-content %}} + +{{% tab-content %}} +There are RPM packages provided by openSUSE Build Service for SUSE Linux users: + +```bash +# add go repository +zypper ar -f obs://devel:languages:go/ go +# install latest telegraf +zypper in telegraf +``` + +{{% telegraf/verify %}} + +## Configuration + +### Create a configuration file with default input and output plugins. + +Every plugin will be in the file, but most will be commented out. + +``` +telegraf config > telegraf.conf +``` + +### Create a configuration file with specific inputs and outputs +``` +telegraf --input-filter [:] --output-filter [:] config > telegraf.conf +``` + +For more advanced configuration details, see the +[configuration documentation](/telegraf/v1.15/administration/configuration/). +{{% /tab-content %}} + +{{% tab-content %}} +Telegraf is part of the FreeBSD package system. +It can be installed by running: + +```bash +sudo pkg install telegraf +``` + +The configuration file is located at `/usr/local/etc/telegraf.conf` with examples in `/usr/local/etc/telegraf.conf.sample`. + +{{% telegraf/verify %}} + +## Configuration + +### Create a configuration file with default input and output plugins. + +Every plugin will be in the file, but most will be commented out. + +``` +telegraf config > telegraf.conf +``` + +### Create a configuration file with specific inputs and outputs +``` +telegraf --input-filter [:] --output-filter [:] config > telegraf.conf +``` + +For more advanced configuration details, see the +[configuration documentation](/telegraf/v1.15/administration/configuration/). +{{% /tab-content %}} + +{{% tab-content %}} +Users of macOS 10.8 and higher can install Telegraf using the [Homebrew](http://brew.sh/) package manager. +Once `brew` is installed, you can install Telegraf by running: + +```bash +brew update +brew install telegraf +``` + +To have launchd start telegraf at next login: +``` +ln -sfv /usr/local/opt/telegraf/*.plist ~/Library/LaunchAgents +``` +To load telegraf now: +``` +launchctl load ~/Library/LaunchAgents/homebrew.mxcl.telegraf.plist +``` + +Or, if you don't want/need launchctl, you can just run: +``` +telegraf -config /usr/local/etc/telegraf.conf +``` + +{{% telegraf/verify %}} + +## Configuration + +### Create a configuration file with default input and output plugins. + +Every plugin will be in the file, but most will be commented out. + +``` +telegraf config > telegraf.conf +``` + +### Create a configuration file with specific inputs and outputs +``` +telegraf --input-filter [:] --output-filter [:] config > telegraf.conf +``` + +For more advanced configuration details, see the +[configuration documentation](/telegraf/v1.15/administration/configuration/). +{{% /tab-content %}} + +{{% tab-content %}} + +## Installation + +Download the Telegraf ZIP archive for Windows from the [InfluxData downloads page](https://portal.influxdata.com/downloads). + +Extract the contents of the ZIP archive to `C:\Program Files\InfluxData\Telegraf`. + +##### Verify the integrity of the downloaded Telegraf binary (optional) + +To obtain the SHA256 hash for the Windows Telegraf download, use the following PowerShell command: + +```powershell +CertUtil -hashfile /telegraf-1._windows_amd64.zip SHA256 +``` + +Compare the output from this command to the hash listed on the downloads page to ensure the integrity of the download. + +### Configure an input plugin + +The Telegraf ZIP archive contains a default configuration file (`telegraf.conf`). +In this file, the input plugin for capturing basic [Windows system metrics](/telegraf/v1.15/plugins/plugin-list/#win_perf_counters) is already activated. +With this plugin, Telegraf monitors the following defined Windows Operating System objects: + +- Processor +- LogicalDisk +- PhysicalDisk +- Network Interface +- System +- Memory +- Paging File + +Telegraf can capture metrics and log information from a wide variety of sources. +For more advanced configuration details, see the [configuration documentation](/telegraf/v1.15/administration/configuration/). + +### Configure an output plugin + +Before you start the Telegraf agent, configure an output plugin to send data to InfluxDB. +Choose the appropriate plugin based on the version of InfluxDB you are using. + +The `telegraf.conf` file included in the ZIP archive contains sections for configuring +both the [InfluxDB v1](/telegraf/v1.15/plugins/plugin-list/#influxdb) and +[InfluxDB v2](/telegraf/v1.15/plugins/plugin-list/#influxdb_v2) output plugins. + +#### Writing data to InfluxDB 1.x + +Open `telegraf.conf` in a text editor and fill in the `database` field under `[[outputs.influxdb]]`. + +#### Writing data to InfluxDB 2.0 + +Open `telegraf.conf` in a text editor and comment out the InfluxDB v1 plugin +by placing a `#` in front of `[[outputs.influxdb]]`. +Then remove the `#` in front of `[[outputs.influxdb_v2]]`. + +For detailed instructions on configuring Telegraf to write to InfluxDB 2.0, see +[Enable and configure the InfluxDB v2 output plugin](/v2.0/write-data/use-telegraf/manual-config/#enable-and-configure-the-influxdb-v2-output-plugin). + +### Start the agent + +Once configured, run the following commands in PowerShell to begin sending metrics with Telegraf: + +```powershell +> cd C:\Program Files\InfluxData\Telegraf # path to extracted Telegraf directory +> .\telegraf.exe -config +``` +## Install Telegraf as a Windows Service + +Telegraf natively supports running as a Windows service. + +The following commands are available: + +| Command | Effect | +|------------------------------------|-------------------------------| +| `telegraf.exe --service install` | Install telegraf as a service | +| `telegraf.exe --service uninstall` | Remove the telegraf service | +| `telegraf.exe --service start` | Start the telegraf service | +| `telegraf.exe --service stop` | Stop the telegraf service | + +Outlined below are the general steps to install Telegraf as a Service. + +{{% note %}} +Installing a Windows service requires administrative permissions. +Be sure to [launch Powershell as administrator]( +https://docs.microsoft.com/en-us/powershell/scripting/windows-powershell/starting-windows-powershell?view=powershell-7#with-administrative-privileges-run-as-administrator). +{{% /note %}} + +1. Download the Telegraf binary and unzip its contents to `C:\Program Files\InfluxData\Telegraf`. +2. In PowerShell, run the following as an administrator: + ```powershell + > cd "C:\Program Files\InfluxData\Telegraf" + > .\telegraf.exe --service install --config "C:\Program Files\InfluxData\Telegraf\telegraf.conf" + ``` + When installing as service in Windows, always double check to specify full, correct path of the config file. + Otherwise the Windows service may fail to start. +3. To test that the installation works, run: + + ```powershell + > C:\"Program Files"\Telegraf\telegraf.exe --config C:\"Program Files"\Telegraf\telegraf.conf --test + ``` + +4. To start collecting data, run: + + ```powershell + telegraf.exe --service start + ``` + + + +{{% note %}} +##### Logging and troubleshooting +When Telegraf runs as a Windows service, Telegraf logs messages to Windows event logs. +If the Telegraf service fails on start, view error logs by selecting **Event Viewer**→**Windows Logs**→**Application**. +{{% /note %}} + +{{< /tab-content >}} +{{< /tabs-wrapper >}} diff --git a/content/telegraf/v1.15/plugins/_index.md b/content/telegraf/v1.15/plugins/_index.md new file mode 100644 index 000000000..7b1d2b6a1 --- /dev/null +++ b/content/telegraf/v1.15/plugins/_index.md @@ -0,0 +1,25 @@ +--- +title: Telegraf plugins +description: Telegraf plugins are agents used in the InfluxData time series platform for collecting, processing, aggregating, and writing metrics from time series data on the InfluxDB time series database and other popular databases and applications. +menu: + telegraf_1_15: + name: Plugins + weight: 40 +--- + +Telegraf is an agent, written in the Go programming language, for collecting, processing, aggregating, and writing metrics. Telegraf is plugin-driven and supports four categories of plugin types, including input, output, aggregator, and processor. + +## [Full Telegraf plugins list](/telegraf/v1.15/plugins/plugin-list/) +View the full list of available Telegraf plugins. + +### Telegraf input plugins +The [Telegraf input plugins](/telegraf/v1.15/plugins/inputs/) collect metrics from the system, services, or third party APIs. + +### Telegraf output plugins +The [Telegraf output plugins](/telegraf/v1.15/plugins/outputs/) write metrics to various destinations. + +### Telegraf aggregator plugins +The [Telegraf aggregator plugins](/telegraf/v1.15/plugins/aggregators/) create aggregate metrics (for example, mean, min, max, quantiles, etc.) + +### Telegraf processor plugins +The [Telegraf processor plugins](/telegraf/v1.15/plugins/processors/) transform, decorate, and filter metrics. diff --git a/content/telegraf/v1.15/plugins/aggregators.md b/content/telegraf/v1.15/plugins/aggregators.md new file mode 100644 index 000000000..0711f4768 --- /dev/null +++ b/content/telegraf/v1.15/plugins/aggregators.md @@ -0,0 +1,14 @@ +--- +title: Telegraf aggregator plugins +description: Use the Telegraf aggregator plugins with the InfluxData time series platfrom to create aggregate metrics (for example, mean, min, max, quantiles, etc.) collected by the input plugins. Aggregator plugins support basic statistics, histograms, and min/max values. +menu: + telegraf_1_15: + name: Aggregator + weight: 30 + parent: Plugins +--- + +Aggregators emit new aggregate metrics based on the metrics collected by the input plugins. + +## Supported Telegraf aggregator plugins +_View the [Telegraf plugin list](/telegraf/v1.15/plugins/plugin-list/) to view all aggregator plugins._ diff --git a/content/telegraf/v1.15/plugins/inputs.md b/content/telegraf/v1.15/plugins/inputs.md new file mode 100644 index 000000000..16394e01e --- /dev/null +++ b/content/telegraf/v1.15/plugins/inputs.md @@ -0,0 +1,17 @@ +--- +title: Telegraf input plugins +description: Telegraf input plugins are used with the InfluxData time series platform to collect metrics from the system, services, or third party APIs. +menu: + telegraf_1_15: + name: Input + weight: 10 + parent: Plugins +--- + +Telegraf input plugins are used with the InfluxData time series platform to collect metrics from the system, services, or third party APIs. All metrics are gathered from the inputs you [enable and configure in the configuration file](/telegraf/v1.15/administration/configuration/). + +## Usage instructions +View usage instructions for each service input by running `telegraf --usage `. + +## Supported Telegraf input plugins +_View the [Telegraf plugin list](/telegraf/v1.15/plugins/plugin-list/) to view all input plugins._ diff --git a/content/telegraf/v1.15/plugins/outputs.md b/content/telegraf/v1.15/plugins/outputs.md new file mode 100644 index 000000000..d0341951f --- /dev/null +++ b/content/telegraf/v1.15/plugins/outputs.md @@ -0,0 +1,14 @@ +--- +title: Telegraf output plugins +descriptions: Use Telegraf output plugins to transform, decorate, and filter metrics. Supported output plugins include Datadog, Elasticsearch, Graphite, InfluxDB, Kafka, MQTT, Prometheus Client, Riemann, and Wavefront. +menu: + telegraf_1_15: + name: Output + weight: 20 + parent: Plugins +--- + +Telegraf allows users to specify multiple output sinks in the configuration file. + +## Supported Telegraf output plugins +_View the [Telegraf plugin list](/telegraf/v1.15/plugins/plugin-list/) to view all output plugins._ diff --git a/content/telegraf/v1.15/plugins/plugin-list.md b/content/telegraf/v1.15/plugins/plugin-list.md new file mode 100644 index 000000000..9907a6f7f --- /dev/null +++ b/content/telegraf/v1.15/plugins/plugin-list.md @@ -0,0 +1,45 @@ +--- +title: Telegraf plugins +description: > + Telegraf is a plugin-driven agent that collects, processes, aggregates, and writes metrics. + It supports four categories of plugins including input, output, aggregator, and processor. + View and search all available Telegraf plugins. +menu: + telegraf_1_15: + parent: Plugins +weight: 6 +--- + +Telegraf is a plugin-driven agent that collects, processes, aggregates, and writes metrics. +It supports four categories of plugins including input, output, aggregator, and processor. + +{{< telegraf/filters >}} + + +**Jump to:** + +- [Input plugins](#input-plugins) +- [Output plugins](#output-plugins) +- [Aggregator plugins](#aggregator-plugins) +- [Processor plugins](#processor-plugins) + +## Input plugins +Telegraf input plugins are used with the InfluxData time series platform to collect +metrics from the system, services, or third party APIs. + +{{< telegraf/plugins type="input" >}} + +## Output plugins +Telegraf processor plugins write metrics to various destinations. + +{{< telegraf/plugins type="output" >}} + +## Aggregator plugins +Telegraf aggregator plugins create aggregate metrics (for example, mean, min, max, quantiles, etc.) + +{{< telegraf/plugins type="aggregator" >}} + +## Processor plugins +Telegraf output plugins transform, decorate, and filter metrics. + +{{< telegraf/plugins type="processor" >}} diff --git a/content/telegraf/v1.15/plugins/processors.md b/content/telegraf/v1.15/plugins/processors.md new file mode 100644 index 000000000..cf8e0365c --- /dev/null +++ b/content/telegraf/v1.15/plugins/processors.md @@ -0,0 +1,15 @@ +--- +title: Telegraf processor plugins +description: Use Telegraf processor plugins in the InfluxData time series platform to process metrics and emit results based on the values processed. +menu: + telegraf_1_15: + name: Processor + identifier: processors + weight: 40 + parent: Plugins +--- + +Processor plugins process metrics as they pass through and immediately emit results based on the values they process. + +## Supported Telegraf processor plugins +_View the [Telegraf plugin list](/telegraf/v1.15/plugins/plugin-list/) to view all processor plugins._ diff --git a/layouts/partials/header.html b/layouts/partials/header.html index 40f2edc01..0af368446 100644 --- a/layouts/partials/header.html +++ b/layouts/partials/header.html @@ -15,7 +15,7 @@ {{ else if eq (len $productPathData) 2 }} {{ (index .Site.Data.products $product).name }} {{ $currentVersion }} Documentation {{ else }} - {{ if .Params.seotitle }} {{ print .Params.seotitle " | " }} {{ else if .Title }} {{ print .Title " | " }} {{ end }}InfluxDB {{ if $currentVersion }}{{print $currentVersion " " }}{{ end }}Documentation + {{ if .Params.seotitle }} {{ print .Params.seotitle " | " }} {{ else if .Title }} {{ print .Title " | " }} {{ end }}{{ (index .Site.Data.products $product).name }} {{ if $currentVersion }}{{print $currentVersion " " }}{{ end }}Documentation {{ end }} diff --git a/layouts/shortcodes/telegraf/verify.md b/layouts/shortcodes/telegraf/verify.md new file mode 100644 index 000000000..c3fe6e562 --- /dev/null +++ b/layouts/shortcodes/telegraf/verify.md @@ -0,0 +1,32 @@ +### Verify the authenticity of downloaded binary (optional) + +InfluxData cryptographically signs each Telegraf binary release. +For added security, follow these steps to verify the signature of your download with `gpg`. + +(Most operating systems include the `gpg` command by default. +If `gpg` is not available, see the [GnuPG homepage](https://gnupg.org/download/) for installation instructions.) + +1. Download and import InfluxData's public key: + + ``` + curl -sL https://repos.influxdata.com/influxdb.key | gpg --import + ``` + +2. Download the signature file for the release by adding `.asc` to the download URL. + For example: + + ``` + wget https://dl.influxdata.com/telegraf/releases/telegraf-1.14.1_linux_amd64.tar.gz.asc + ``` + +3. Verify the signature with `gpg --verify`: + + ``` + gpg --verify telegraf-1.14.1_linux_amd64.tar.gz.asc telegraf-1.14.1_linux_amd64.tar.gz + ``` + + The output from this command should include the following: + + ``` + gpg: Good signature from "InfluxDB Packaging Service " [unknown] + ```